1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements an inter procedural pass that deduces and/or propagating 10 // attributes. This is done in an abstract interpretation style fixpoint 11 // iteration. See the Attributor.h file comment and the class descriptions in 12 // that file for more information. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/IPO/Attributor.h" 17 18 #include "llvm/ADT/DepthFirstIterator.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/GlobalsModRef.h" 26 #include "llvm/Analysis/Loads.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/IR/Argument.h" 29 #include "llvm/IR/Attributes.h" 30 #include "llvm/IR/CFG.h" 31 #include "llvm/IR/InstIterator.h" 32 #include "llvm/IR/IntrinsicInst.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 37 #include "llvm/Transforms/Utils/Local.h" 38 39 #include <cassert> 40 41 using namespace llvm; 42 43 #define DEBUG_TYPE "attributor" 44 45 STATISTIC(NumFnWithExactDefinition, 46 "Number of function with exact definitions"); 47 STATISTIC(NumFnWithoutExactDefinition, 48 "Number of function without exact definitions"); 49 STATISTIC(NumAttributesTimedOut, 50 "Number of abstract attributes timed out before fixpoint"); 51 STATISTIC(NumAttributesValidFixpoint, 52 "Number of abstract attributes in a valid fixpoint state"); 53 STATISTIC(NumAttributesManifested, 54 "Number of abstract attributes manifested in IR"); 55 STATISTIC(NumFnNoUnwind, "Number of functions marked nounwind"); 56 57 STATISTIC(NumFnUniqueReturned, "Number of function with unique return"); 58 STATISTIC(NumFnKnownReturns, "Number of function with known return values"); 59 STATISTIC(NumFnArgumentReturned, 60 "Number of function arguments marked returned"); 61 STATISTIC(NumFnNoSync, "Number of functions marked nosync"); 62 STATISTIC(NumFnNoFree, "Number of functions marked nofree"); 63 STATISTIC(NumFnReturnedNonNull, 64 "Number of function return values marked nonnull"); 65 STATISTIC(NumFnArgumentNonNull, "Number of function arguments marked nonnull"); 66 STATISTIC(NumCSArgumentNonNull, "Number of call site arguments marked nonnull"); 67 STATISTIC(NumFnWillReturn, "Number of functions marked willreturn"); 68 STATISTIC(NumFnArgumentNoAlias, "Number of function arguments marked noalias"); 69 STATISTIC(NumFnReturnedDereferenceable, 70 "Number of function return values marked dereferenceable"); 71 STATISTIC(NumFnArgumentDereferenceable, 72 "Number of function arguments marked dereferenceable"); 73 STATISTIC(NumCSArgumentDereferenceable, 74 "Number of call site arguments marked dereferenceable"); 75 STATISTIC(NumFnReturnedAlign, "Number of function return values marked align"); 76 STATISTIC(NumFnArgumentAlign, "Number of function arguments marked align"); 77 STATISTIC(NumCSArgumentAlign, "Number of call site arguments marked align"); 78 79 // TODO: Determine a good default value. 80 // 81 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads 82 // (when run with the first 5 abstract attributes). The results also indicate 83 // that we never reach 32 iterations but always find a fixpoint sooner. 84 // 85 // This will become more evolved once we perform two interleaved fixpoint 86 // iterations: bottom-up and top-down. 87 static cl::opt<unsigned> 88 MaxFixpointIterations("attributor-max-iterations", cl::Hidden, 89 cl::desc("Maximal number of fixpoint iterations."), 90 cl::init(32)); 91 92 static cl::opt<bool> DisableAttributor( 93 "attributor-disable", cl::Hidden, 94 cl::desc("Disable the attributor inter-procedural deduction pass."), 95 cl::init(true)); 96 97 static cl::opt<bool> VerifyAttributor( 98 "attributor-verify", cl::Hidden, 99 cl::desc("Verify the Attributor deduction and " 100 "manifestation of attributes -- may issue false-positive errors"), 101 cl::init(false)); 102 103 /// Logic operators for the change status enum class. 104 /// 105 ///{ 106 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) { 107 return l == ChangeStatus::CHANGED ? l : r; 108 } 109 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) { 110 return l == ChangeStatus::UNCHANGED ? l : r; 111 } 112 ///} 113 114 /// Helper to adjust the statistics. 115 static void bookkeeping(AbstractAttribute::ManifestPosition MP, 116 const Attribute &Attr) { 117 if (!AreStatisticsEnabled()) 118 return; 119 120 switch (Attr.getKindAsEnum()) { 121 case Attribute::Alignment: 122 switch (MP) { 123 case AbstractAttribute::MP_RETURNED: 124 NumFnReturnedAlign++; 125 break; 126 case AbstractAttribute::MP_ARGUMENT: 127 NumFnArgumentAlign++; 128 break; 129 case AbstractAttribute::MP_CALL_SITE_ARGUMENT: 130 NumCSArgumentAlign++; 131 break; 132 default: 133 break; 134 } 135 break; 136 case Attribute::Dereferenceable: 137 switch (MP) { 138 case AbstractAttribute::MP_RETURNED: 139 NumFnReturnedDereferenceable++; 140 break; 141 case AbstractAttribute::MP_ARGUMENT: 142 NumFnArgumentDereferenceable++; 143 break; 144 case AbstractAttribute::MP_CALL_SITE_ARGUMENT: 145 NumCSArgumentDereferenceable++; 146 break; 147 default: 148 break; 149 } 150 break; 151 case Attribute::NoUnwind: 152 NumFnNoUnwind++; 153 return; 154 case Attribute::Returned: 155 NumFnArgumentReturned++; 156 return; 157 case Attribute::NoSync: 158 NumFnNoSync++; 159 break; 160 case Attribute::NoFree: 161 NumFnNoFree++; 162 break; 163 case Attribute::NonNull: 164 switch (MP) { 165 case AbstractAttribute::MP_RETURNED: 166 NumFnReturnedNonNull++; 167 break; 168 case AbstractAttribute::MP_ARGUMENT: 169 NumFnArgumentNonNull++; 170 break; 171 case AbstractAttribute::MP_CALL_SITE_ARGUMENT: 172 NumCSArgumentNonNull++; 173 break; 174 default: 175 break; 176 } 177 break; 178 case Attribute::WillReturn: 179 NumFnWillReturn++; 180 break; 181 case Attribute::NoAlias: 182 NumFnArgumentNoAlias++; 183 return; 184 default: 185 return; 186 } 187 } 188 189 template <typename StateTy> 190 using followValueCB_t = std::function<bool(Value *, StateTy &State)>; 191 template <typename StateTy> 192 using visitValueCB_t = std::function<void(Value *, StateTy &State)>; 193 194 /// Recursively visit all values that might become \p InitV at some point. This 195 /// will be done by looking through cast instructions, selects, phis, and calls 196 /// with the "returned" attribute. The callback \p FollowValueCB is asked before 197 /// a potential origin value is looked at. If no \p FollowValueCB is passed, a 198 /// default one is used that will make sure we visit every value only once. Once 199 /// we cannot look through the value any further, the callback \p VisitValueCB 200 /// is invoked and passed the current value and the \p State. To limit how much 201 /// effort is invested, we will never visit more than \p MaxValues values. 202 template <typename StateTy> 203 static bool genericValueTraversal( 204 Value *InitV, StateTy &State, visitValueCB_t<StateTy> &VisitValueCB, 205 followValueCB_t<StateTy> *FollowValueCB = nullptr, int MaxValues = 8) { 206 207 SmallPtrSet<Value *, 16> Visited; 208 followValueCB_t<bool> DefaultFollowValueCB = [&](Value *Val, bool &) { 209 return Visited.insert(Val).second; 210 }; 211 212 if (!FollowValueCB) 213 FollowValueCB = &DefaultFollowValueCB; 214 215 SmallVector<Value *, 16> Worklist; 216 Worklist.push_back(InitV); 217 218 int Iteration = 0; 219 do { 220 Value *V = Worklist.pop_back_val(); 221 222 // Check if we should process the current value. To prevent endless 223 // recursion keep a record of the values we followed! 224 if (!(*FollowValueCB)(V, State)) 225 continue; 226 227 // Make sure we limit the compile time for complex expressions. 228 if (Iteration++ >= MaxValues) 229 return false; 230 231 // Explicitly look through calls with a "returned" attribute if we do 232 // not have a pointer as stripPointerCasts only works on them. 233 if (V->getType()->isPointerTy()) { 234 V = V->stripPointerCasts(); 235 } else { 236 CallSite CS(V); 237 if (CS && CS.getCalledFunction()) { 238 Value *NewV = nullptr; 239 for (Argument &Arg : CS.getCalledFunction()->args()) 240 if (Arg.hasReturnedAttr()) { 241 NewV = CS.getArgOperand(Arg.getArgNo()); 242 break; 243 } 244 if (NewV) { 245 Worklist.push_back(NewV); 246 continue; 247 } 248 } 249 } 250 251 // Look through select instructions, visit both potential values. 252 if (auto *SI = dyn_cast<SelectInst>(V)) { 253 Worklist.push_back(SI->getTrueValue()); 254 Worklist.push_back(SI->getFalseValue()); 255 continue; 256 } 257 258 // Look through phi nodes, visit all operands. 259 if (auto *PHI = dyn_cast<PHINode>(V)) { 260 Worklist.append(PHI->op_begin(), PHI->op_end()); 261 continue; 262 } 263 264 // Once a leaf is reached we inform the user through the callback. 265 VisitValueCB(V, State); 266 } while (!Worklist.empty()); 267 268 // All values have been visited. 269 return true; 270 } 271 272 /// Helper to identify the correct offset into an attribute list. 273 static unsigned getAttrIndex(AbstractAttribute::ManifestPosition MP, 274 unsigned ArgNo = 0) { 275 switch (MP) { 276 case AbstractAttribute::MP_ARGUMENT: 277 case AbstractAttribute::MP_CALL_SITE_ARGUMENT: 278 return ArgNo + AttributeList::FirstArgIndex; 279 case AbstractAttribute::MP_FUNCTION: 280 return AttributeList::FunctionIndex; 281 case AbstractAttribute::MP_RETURNED: 282 return AttributeList::ReturnIndex; 283 } 284 llvm_unreachable("Unknown manifest position!"); 285 } 286 287 /// Return true if \p New is equal or worse than \p Old. 288 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) { 289 if (!Old.isIntAttribute()) 290 return true; 291 292 return Old.getValueAsInt() >= New.getValueAsInt(); 293 } 294 295 /// Return true if the information provided by \p Attr was added to the 296 /// attribute list \p Attrs. This is only the case if it was not already present 297 /// in \p Attrs at the position describe by \p MP and \p ArgNo. 298 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr, 299 AttributeList &Attrs, 300 AbstractAttribute::ManifestPosition MP, 301 unsigned ArgNo = 0) { 302 unsigned AttrIdx = getAttrIndex(MP, ArgNo); 303 304 if (Attr.isEnumAttribute()) { 305 Attribute::AttrKind Kind = Attr.getKindAsEnum(); 306 if (Attrs.hasAttribute(AttrIdx, Kind)) 307 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind))) 308 return false; 309 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr); 310 return true; 311 } 312 if (Attr.isStringAttribute()) { 313 StringRef Kind = Attr.getKindAsString(); 314 if (Attrs.hasAttribute(AttrIdx, Kind)) 315 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind))) 316 return false; 317 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr); 318 return true; 319 } 320 if (Attr.isIntAttribute()) { 321 Attribute::AttrKind Kind = Attr.getKindAsEnum(); 322 if (Attrs.hasAttribute(AttrIdx, Kind)) 323 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind))) 324 return false; 325 Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind); 326 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr); 327 return true; 328 } 329 330 llvm_unreachable("Expected enum or string attribute!"); 331 } 332 333 ChangeStatus AbstractAttribute::update(Attributor &A) { 334 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 335 if (getState().isAtFixpoint()) 336 return HasChanged; 337 338 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n"); 339 340 HasChanged = updateImpl(A); 341 342 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this 343 << "\n"); 344 345 return HasChanged; 346 } 347 348 ChangeStatus AbstractAttribute::manifest(Attributor &A) { 349 assert(getState().isValidState() && 350 "Attempted to manifest an invalid state!"); 351 assert(getAssociatedValue() && 352 "Attempted to manifest an attribute without associated value!"); 353 354 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 355 SmallVector<Attribute, 4> DeducedAttrs; 356 getDeducedAttributes(DeducedAttrs); 357 358 Function &ScopeFn = getAnchorScope(); 359 LLVMContext &Ctx = ScopeFn.getContext(); 360 ManifestPosition MP = getManifestPosition(); 361 362 AttributeList Attrs; 363 SmallVector<unsigned, 4> ArgNos; 364 365 // In the following some generic code that will manifest attributes in 366 // DeducedAttrs if they improve the current IR. Due to the different 367 // annotation positions we use the underlying AttributeList interface. 368 // Note that MP_CALL_SITE_ARGUMENT can annotate multiple locations. 369 370 switch (MP) { 371 case MP_ARGUMENT: 372 ArgNos.push_back(cast<Argument>(getAssociatedValue())->getArgNo()); 373 Attrs = ScopeFn.getAttributes(); 374 break; 375 case MP_FUNCTION: 376 case MP_RETURNED: 377 ArgNos.push_back(0); 378 Attrs = ScopeFn.getAttributes(); 379 break; 380 case MP_CALL_SITE_ARGUMENT: { 381 CallSite CS(&getAnchoredValue()); 382 for (unsigned u = 0, e = CS.getNumArgOperands(); u != e; u++) 383 if (CS.getArgOperand(u) == getAssociatedValue()) 384 ArgNos.push_back(u); 385 Attrs = CS.getAttributes(); 386 } 387 } 388 389 for (const Attribute &Attr : DeducedAttrs) { 390 for (unsigned ArgNo : ArgNos) { 391 if (!addIfNotExistent(Ctx, Attr, Attrs, MP, ArgNo)) 392 continue; 393 394 HasChanged = ChangeStatus::CHANGED; 395 bookkeeping(MP, Attr); 396 } 397 } 398 399 if (HasChanged == ChangeStatus::UNCHANGED) 400 return HasChanged; 401 402 switch (MP) { 403 case MP_ARGUMENT: 404 case MP_FUNCTION: 405 case MP_RETURNED: 406 ScopeFn.setAttributes(Attrs); 407 break; 408 case MP_CALL_SITE_ARGUMENT: 409 CallSite(&getAnchoredValue()).setAttributes(Attrs); 410 } 411 412 return HasChanged; 413 } 414 415 Function &AbstractAttribute::getAnchorScope() { 416 Value &V = getAnchoredValue(); 417 if (isa<Function>(V)) 418 return cast<Function>(V); 419 if (isa<Argument>(V)) 420 return *cast<Argument>(V).getParent(); 421 if (isa<Instruction>(V)) 422 return *cast<Instruction>(V).getFunction(); 423 llvm_unreachable("No scope for anchored value found!"); 424 } 425 426 const Function &AbstractAttribute::getAnchorScope() const { 427 return const_cast<AbstractAttribute *>(this)->getAnchorScope(); 428 } 429 430 // Helper function that returns argument index of value. 431 // If the value is not an argument, this returns -1. 432 static int getArgNo(Value &V) { 433 if (auto *Arg = dyn_cast<Argument>(&V)) 434 return Arg->getArgNo(); 435 return -1; 436 } 437 438 /// -----------------------NoUnwind Function Attribute-------------------------- 439 440 struct AANoUnwindFunction : AANoUnwind, BooleanState { 441 442 AANoUnwindFunction(Function &F, InformationCache &InfoCache) 443 : AANoUnwind(F, InfoCache) {} 444 445 /// See AbstractAttribute::getState() 446 /// { 447 AbstractState &getState() override { return *this; } 448 const AbstractState &getState() const override { return *this; } 449 /// } 450 451 /// See AbstractAttribute::getManifestPosition(). 452 ManifestPosition getManifestPosition() const override { return MP_FUNCTION; } 453 454 const std::string getAsStr() const override { 455 return getAssumed() ? "nounwind" : "may-unwind"; 456 } 457 458 /// See AbstractAttribute::updateImpl(...). 459 ChangeStatus updateImpl(Attributor &A) override; 460 461 /// See AANoUnwind::isAssumedNoUnwind(). 462 bool isAssumedNoUnwind() const override { return getAssumed(); } 463 464 /// See AANoUnwind::isKnownNoUnwind(). 465 bool isKnownNoUnwind() const override { return getKnown(); } 466 }; 467 468 ChangeStatus AANoUnwindFunction::updateImpl(Attributor &A) { 469 Function &F = getAnchorScope(); 470 471 // The map from instruction opcodes to those instructions in the function. 472 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F); 473 auto Opcodes = { 474 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 475 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 476 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 477 478 for (unsigned Opcode : Opcodes) { 479 for (Instruction *I : OpcodeInstMap[Opcode]) { 480 if (!I->mayThrow()) 481 continue; 482 483 auto *NoUnwindAA = A.getAAFor<AANoUnwind>(*this, *I); 484 485 if (!NoUnwindAA || !NoUnwindAA->isAssumedNoUnwind()) { 486 indicatePessimisticFixpoint(); 487 return ChangeStatus::CHANGED; 488 } 489 } 490 } 491 return ChangeStatus::UNCHANGED; 492 } 493 494 /// --------------------- Function Return Values ------------------------------- 495 496 /// "Attribute" that collects all potential returned values and the return 497 /// instructions that they arise from. 498 /// 499 /// If there is a unique returned value R, the manifest method will: 500 /// - mark R with the "returned" attribute, if R is an argument. 501 class AAReturnedValuesImpl final : public AAReturnedValues, AbstractState { 502 503 /// Mapping of values potentially returned by the associated function to the 504 /// return instructions that might return them. 505 DenseMap<Value *, SmallPtrSet<ReturnInst *, 2>> ReturnedValues; 506 507 /// State flags 508 /// 509 ///{ 510 bool IsFixed; 511 bool IsValidState; 512 bool HasOverdefinedReturnedCalls; 513 ///} 514 515 /// Collect values that could become \p V in the set \p Values, each mapped to 516 /// \p ReturnInsts. 517 void collectValuesRecursively( 518 Attributor &A, Value *V, SmallPtrSetImpl<ReturnInst *> &ReturnInsts, 519 DenseMap<Value *, SmallPtrSet<ReturnInst *, 2>> &Values) { 520 521 visitValueCB_t<bool> VisitValueCB = [&](Value *Val, bool &) { 522 assert(!isa<Instruction>(Val) || 523 &getAnchorScope() == cast<Instruction>(Val)->getFunction()); 524 Values[Val].insert(ReturnInsts.begin(), ReturnInsts.end()); 525 }; 526 527 bool UnusedBool; 528 bool Success = genericValueTraversal(V, UnusedBool, VisitValueCB); 529 530 // If we did abort the above traversal we haven't see all the values. 531 // Consequently, we cannot know if the information we would derive is 532 // accurate so we give up early. 533 if (!Success) 534 indicatePessimisticFixpoint(); 535 } 536 537 public: 538 /// See AbstractAttribute::AbstractAttribute(...). 539 AAReturnedValuesImpl(Function &F, InformationCache &InfoCache) 540 : AAReturnedValues(F, InfoCache) { 541 // We do not have an associated argument yet. 542 AssociatedVal = nullptr; 543 } 544 545 /// See AbstractAttribute::initialize(...). 546 void initialize(Attributor &A) override { 547 // Reset the state. 548 AssociatedVal = nullptr; 549 IsFixed = false; 550 IsValidState = true; 551 HasOverdefinedReturnedCalls = false; 552 ReturnedValues.clear(); 553 554 Function &F = cast<Function>(getAnchoredValue()); 555 556 // The map from instruction opcodes to those instructions in the function. 557 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F); 558 559 // Look through all arguments, if one is marked as returned we are done. 560 for (Argument &Arg : F.args()) { 561 if (Arg.hasReturnedAttr()) { 562 563 auto &ReturnInstSet = ReturnedValues[&Arg]; 564 for (Instruction *RI : OpcodeInstMap[Instruction::Ret]) 565 ReturnInstSet.insert(cast<ReturnInst>(RI)); 566 567 indicateOptimisticFixpoint(); 568 return; 569 } 570 } 571 572 // If no argument was marked as returned we look at all return instructions 573 // and collect potentially returned values. 574 for (Instruction *RI : OpcodeInstMap[Instruction::Ret]) { 575 SmallPtrSet<ReturnInst *, 1> RISet({cast<ReturnInst>(RI)}); 576 collectValuesRecursively(A, cast<ReturnInst>(RI)->getReturnValue(), RISet, 577 ReturnedValues); 578 } 579 } 580 581 /// See AbstractAttribute::manifest(...). 582 ChangeStatus manifest(Attributor &A) override; 583 584 /// See AbstractAttribute::getState(...). 585 AbstractState &getState() override { return *this; } 586 587 /// See AbstractAttribute::getState(...). 588 const AbstractState &getState() const override { return *this; } 589 590 /// See AbstractAttribute::getManifestPosition(). 591 ManifestPosition getManifestPosition() const override { return MP_ARGUMENT; } 592 593 /// See AbstractAttribute::updateImpl(Attributor &A). 594 ChangeStatus updateImpl(Attributor &A) override; 595 596 /// Return the number of potential return values, -1 if unknown. 597 size_t getNumReturnValues() const { 598 return isValidState() ? ReturnedValues.size() : -1; 599 } 600 601 /// Return an assumed unique return value if a single candidate is found. If 602 /// there cannot be one, return a nullptr. If it is not clear yet, return the 603 /// Optional::NoneType. 604 Optional<Value *> getAssumedUniqueReturnValue() const; 605 606 /// See AbstractState::checkForallReturnedValues(...). 607 bool 608 checkForallReturnedValues(std::function<bool(Value &)> &Pred) const override; 609 610 /// Pretty print the attribute similar to the IR representation. 611 const std::string getAsStr() const override; 612 613 /// See AbstractState::isAtFixpoint(). 614 bool isAtFixpoint() const override { return IsFixed; } 615 616 /// See AbstractState::isValidState(). 617 bool isValidState() const override { return IsValidState; } 618 619 /// See AbstractState::indicateOptimisticFixpoint(...). 620 void indicateOptimisticFixpoint() override { 621 IsFixed = true; 622 IsValidState &= true; 623 } 624 void indicatePessimisticFixpoint() override { 625 IsFixed = true; 626 IsValidState = false; 627 } 628 }; 629 630 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 631 ChangeStatus Changed = ChangeStatus::UNCHANGED; 632 633 // Bookkeeping. 634 assert(isValidState()); 635 NumFnKnownReturns++; 636 637 // Check if we have an assumed unique return value that we could manifest. 638 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(); 639 640 if (!UniqueRV.hasValue() || !UniqueRV.getValue()) 641 return Changed; 642 643 // Bookkeeping. 644 NumFnUniqueReturned++; 645 646 // If the assumed unique return value is an argument, annotate it. 647 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { 648 AssociatedVal = UniqueRVArg; 649 Changed = AbstractAttribute::manifest(A) | Changed; 650 } 651 652 return Changed; 653 } 654 655 const std::string AAReturnedValuesImpl::getAsStr() const { 656 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 657 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")"; 658 } 659 660 Optional<Value *> AAReturnedValuesImpl::getAssumedUniqueReturnValue() const { 661 // If checkForallReturnedValues provides a unique value, ignoring potential 662 // undef values that can also be present, it is assumed to be the actual 663 // return value and forwarded to the caller of this method. If there are 664 // multiple, a nullptr is returned indicating there cannot be a unique 665 // returned value. 666 Optional<Value *> UniqueRV; 667 668 std::function<bool(Value &)> Pred = [&](Value &RV) -> bool { 669 // If we found a second returned value and neither the current nor the saved 670 // one is an undef, there is no unique returned value. Undefs are special 671 // since we can pretend they have any value. 672 if (UniqueRV.hasValue() && UniqueRV != &RV && 673 !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) { 674 UniqueRV = nullptr; 675 return false; 676 } 677 678 // Do not overwrite a value with an undef. 679 if (!UniqueRV.hasValue() || !isa<UndefValue>(RV)) 680 UniqueRV = &RV; 681 682 return true; 683 }; 684 685 if (!checkForallReturnedValues(Pred)) 686 UniqueRV = nullptr; 687 688 return UniqueRV; 689 } 690 691 bool AAReturnedValuesImpl::checkForallReturnedValues( 692 std::function<bool(Value &)> &Pred) const { 693 if (!isValidState()) 694 return false; 695 696 // Check all returned values but ignore call sites as long as we have not 697 // encountered an overdefined one during an update. 698 for (auto &It : ReturnedValues) { 699 Value *RV = It.first; 700 701 ImmutableCallSite ICS(RV); 702 if (ICS && !HasOverdefinedReturnedCalls) 703 continue; 704 705 if (!Pred(*RV)) 706 return false; 707 } 708 709 return true; 710 } 711 712 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 713 714 // Check if we know of any values returned by the associated function, 715 // if not, we are done. 716 if (getNumReturnValues() == 0) { 717 indicateOptimisticFixpoint(); 718 return ChangeStatus::UNCHANGED; 719 } 720 721 // Check if any of the returned values is a call site we can refine. 722 decltype(ReturnedValues) AddRVs; 723 bool HasCallSite = false; 724 725 // Keep track of any change to trigger updates on dependent attributes. 726 ChangeStatus Changed = ChangeStatus::UNCHANGED; 727 728 // Look at all returned call sites. 729 for (auto &It : ReturnedValues) { 730 SmallPtrSet<ReturnInst *, 2> &ReturnInsts = It.second; 731 Value *RV = It.first; 732 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Potentially returned value " << *RV 733 << "\n"); 734 735 // Only call sites can change during an update, ignore the rest. 736 CallSite RetCS(RV); 737 if (!RetCS) 738 continue; 739 740 // For now, any call site we see will prevent us from directly fixing the 741 // state. However, if the information on the callees is fixed, the call 742 // sites will be removed and we will fix the information for this state. 743 HasCallSite = true; 744 745 // Try to find a assumed unique return value for the called function. 746 auto *RetCSAA = A.getAAFor<AAReturnedValuesImpl>(*this, *RV); 747 if (!RetCSAA) { 748 if (!HasOverdefinedReturnedCalls) 749 Changed = ChangeStatus::CHANGED; 750 HasOverdefinedReturnedCalls = true; 751 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned call site (" << *RV 752 << ") with " << (RetCSAA ? "invalid" : "no") 753 << " associated state\n"); 754 continue; 755 } 756 757 // Try to find a assumed unique return value for the called function. 758 Optional<Value *> AssumedUniqueRV = RetCSAA->getAssumedUniqueReturnValue(); 759 760 // If no assumed unique return value was found due to the lack of 761 // candidates, we may need to resolve more calls (through more update 762 // iterations) or the called function will not return. Either way, we simply 763 // stick with the call sites as return values. Because there were not 764 // multiple possibilities, we do not treat it as overdefined. 765 if (!AssumedUniqueRV.hasValue()) 766 continue; 767 768 // If multiple, non-refinable values were found, there cannot be a unique 769 // return value for the called function. The returned call is overdefined! 770 if (!AssumedUniqueRV.getValue()) { 771 if (!HasOverdefinedReturnedCalls) 772 Changed = ChangeStatus::CHANGED; 773 HasOverdefinedReturnedCalls = true; 774 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned call site has multiple " 775 "potentially returned values\n"); 776 continue; 777 } 778 779 LLVM_DEBUG({ 780 bool UniqueRVIsKnown = RetCSAA->isAtFixpoint(); 781 dbgs() << "[AAReturnedValues] Returned call site " 782 << (UniqueRVIsKnown ? "known" : "assumed") 783 << " unique return value: " << *AssumedUniqueRV << "\n"; 784 }); 785 786 // The assumed unique return value. 787 Value *AssumedRetVal = AssumedUniqueRV.getValue(); 788 789 // If the assumed unique return value is an argument, lookup the matching 790 // call site operand and recursively collect new returned values. 791 // If it is not an argument, it is just put into the set of returned values 792 // as we would have already looked through casts, phis, and similar values. 793 if (Argument *AssumedRetArg = dyn_cast<Argument>(AssumedRetVal)) 794 collectValuesRecursively(A, 795 RetCS.getArgOperand(AssumedRetArg->getArgNo()), 796 ReturnInsts, AddRVs); 797 else 798 AddRVs[AssumedRetVal].insert(ReturnInsts.begin(), ReturnInsts.end()); 799 } 800 801 for (auto &It : AddRVs) { 802 assert(!It.second.empty() && "Entry does not add anything."); 803 auto &ReturnInsts = ReturnedValues[It.first]; 804 for (ReturnInst *RI : It.second) 805 if (ReturnInsts.insert(RI).second) { 806 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value " 807 << *It.first << " => " << *RI << "\n"); 808 Changed = ChangeStatus::CHANGED; 809 } 810 } 811 812 // If there is no call site in the returned values we are done. 813 if (!HasCallSite) { 814 indicateOptimisticFixpoint(); 815 return ChangeStatus::CHANGED; 816 } 817 818 return Changed; 819 } 820 821 /// ------------------------ NoSync Function Attribute ------------------------- 822 823 struct AANoSyncFunction : AANoSync, BooleanState { 824 825 AANoSyncFunction(Function &F, InformationCache &InfoCache) 826 : AANoSync(F, InfoCache) {} 827 828 /// See AbstractAttribute::getState() 829 /// { 830 AbstractState &getState() override { return *this; } 831 const AbstractState &getState() const override { return *this; } 832 /// } 833 834 /// See AbstractAttribute::getManifestPosition(). 835 ManifestPosition getManifestPosition() const override { return MP_FUNCTION; } 836 837 const std::string getAsStr() const override { 838 return getAssumed() ? "nosync" : "may-sync"; 839 } 840 841 /// See AbstractAttribute::updateImpl(...). 842 ChangeStatus updateImpl(Attributor &A) override; 843 844 /// See AANoSync::isAssumedNoSync() 845 bool isAssumedNoSync() const override { return getAssumed(); } 846 847 /// See AANoSync::isKnownNoSync() 848 bool isKnownNoSync() const override { return getKnown(); } 849 850 /// Helper function used to determine whether an instruction is non-relaxed 851 /// atomic. In other words, if an atomic instruction does not have unordered 852 /// or monotonic ordering 853 static bool isNonRelaxedAtomic(Instruction *I); 854 855 /// Helper function used to determine whether an instruction is volatile. 856 static bool isVolatile(Instruction *I); 857 858 /// Helper function uset to check if intrinsic is volatile (memcpy, memmove, 859 /// memset). 860 static bool isNoSyncIntrinsic(Instruction *I); 861 }; 862 863 bool AANoSyncFunction::isNonRelaxedAtomic(Instruction *I) { 864 if (!I->isAtomic()) 865 return false; 866 867 AtomicOrdering Ordering; 868 switch (I->getOpcode()) { 869 case Instruction::AtomicRMW: 870 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 871 break; 872 case Instruction::Store: 873 Ordering = cast<StoreInst>(I)->getOrdering(); 874 break; 875 case Instruction::Load: 876 Ordering = cast<LoadInst>(I)->getOrdering(); 877 break; 878 case Instruction::Fence: { 879 auto *FI = cast<FenceInst>(I); 880 if (FI->getSyncScopeID() == SyncScope::SingleThread) 881 return false; 882 Ordering = FI->getOrdering(); 883 break; 884 } 885 case Instruction::AtomicCmpXchg: { 886 AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering(); 887 AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering(); 888 // Only if both are relaxed, than it can be treated as relaxed. 889 // Otherwise it is non-relaxed. 890 if (Success != AtomicOrdering::Unordered && 891 Success != AtomicOrdering::Monotonic) 892 return true; 893 if (Failure != AtomicOrdering::Unordered && 894 Failure != AtomicOrdering::Monotonic) 895 return true; 896 return false; 897 } 898 default: 899 llvm_unreachable( 900 "New atomic operations need to be known in the attributor."); 901 } 902 903 // Relaxed. 904 if (Ordering == AtomicOrdering::Unordered || 905 Ordering == AtomicOrdering::Monotonic) 906 return false; 907 return true; 908 } 909 910 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics. 911 /// FIXME: We should ipmrove the handling of intrinsics. 912 bool AANoSyncFunction::isNoSyncIntrinsic(Instruction *I) { 913 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 914 switch (II->getIntrinsicID()) { 915 /// Element wise atomic memory intrinsics are can only be unordered, 916 /// therefore nosync. 917 case Intrinsic::memset_element_unordered_atomic: 918 case Intrinsic::memmove_element_unordered_atomic: 919 case Intrinsic::memcpy_element_unordered_atomic: 920 return true; 921 case Intrinsic::memset: 922 case Intrinsic::memmove: 923 case Intrinsic::memcpy: 924 if (!cast<MemIntrinsic>(II)->isVolatile()) 925 return true; 926 return false; 927 default: 928 return false; 929 } 930 } 931 return false; 932 } 933 934 bool AANoSyncFunction::isVolatile(Instruction *I) { 935 assert(!ImmutableCallSite(I) && !isa<CallBase>(I) && 936 "Calls should not be checked here"); 937 938 switch (I->getOpcode()) { 939 case Instruction::AtomicRMW: 940 return cast<AtomicRMWInst>(I)->isVolatile(); 941 case Instruction::Store: 942 return cast<StoreInst>(I)->isVolatile(); 943 case Instruction::Load: 944 return cast<LoadInst>(I)->isVolatile(); 945 case Instruction::AtomicCmpXchg: 946 return cast<AtomicCmpXchgInst>(I)->isVolatile(); 947 default: 948 return false; 949 } 950 } 951 952 ChangeStatus AANoSyncFunction::updateImpl(Attributor &A) { 953 Function &F = getAnchorScope(); 954 955 /// We are looking for volatile instructions or Non-Relaxed atomics. 956 /// FIXME: We should ipmrove the handling of intrinsics. 957 for (Instruction *I : InfoCache.getReadOrWriteInstsForFunction(F)) { 958 ImmutableCallSite ICS(I); 959 auto *NoSyncAA = A.getAAFor<AANoSyncFunction>(*this, *I); 960 961 if (isa<IntrinsicInst>(I) && isNoSyncIntrinsic(I)) 962 continue; 963 964 if (ICS && (!NoSyncAA || !NoSyncAA->isAssumedNoSync()) && 965 !ICS.hasFnAttr(Attribute::NoSync)) { 966 indicatePessimisticFixpoint(); 967 return ChangeStatus::CHANGED; 968 } 969 970 if (ICS) 971 continue; 972 973 if (!isVolatile(I) && !isNonRelaxedAtomic(I)) 974 continue; 975 976 indicatePessimisticFixpoint(); 977 return ChangeStatus::CHANGED; 978 } 979 980 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F); 981 auto Opcodes = {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 982 (unsigned)Instruction::Call}; 983 984 for (unsigned Opcode : Opcodes) { 985 for (Instruction *I : OpcodeInstMap[Opcode]) { 986 // At this point we handled all read/write effects and they are all 987 // nosync, so they can be skipped. 988 if (I->mayReadOrWriteMemory()) 989 continue; 990 991 ImmutableCallSite ICS(I); 992 993 // non-convergent and readnone imply nosync. 994 if (!ICS.isConvergent()) 995 continue; 996 997 indicatePessimisticFixpoint(); 998 return ChangeStatus::CHANGED; 999 } 1000 } 1001 1002 return ChangeStatus::UNCHANGED; 1003 } 1004 1005 /// ------------------------ No-Free Attributes ---------------------------- 1006 1007 struct AANoFreeFunction : AbstractAttribute, BooleanState { 1008 1009 /// See AbstractAttribute::AbstractAttribute(...). 1010 AANoFreeFunction(Function &F, InformationCache &InfoCache) 1011 : AbstractAttribute(F, InfoCache) {} 1012 1013 /// See AbstractAttribute::getState() 1014 ///{ 1015 AbstractState &getState() override { return *this; } 1016 const AbstractState &getState() const override { return *this; } 1017 ///} 1018 1019 /// See AbstractAttribute::getManifestPosition(). 1020 ManifestPosition getManifestPosition() const override { return MP_FUNCTION; } 1021 1022 /// See AbstractAttribute::getAsStr(). 1023 const std::string getAsStr() const override { 1024 return getAssumed() ? "nofree" : "may-free"; 1025 } 1026 1027 /// See AbstractAttribute::updateImpl(...). 1028 ChangeStatus updateImpl(Attributor &A) override; 1029 1030 /// See AbstractAttribute::getAttrKind(). 1031 Attribute::AttrKind getAttrKind() const override { return ID; } 1032 1033 /// Return true if "nofree" is assumed. 1034 bool isAssumedNoFree() const { return getAssumed(); } 1035 1036 /// Return true if "nofree" is known. 1037 bool isKnownNoFree() const { return getKnown(); } 1038 1039 /// The identifier used by the Attributor for this class of attributes. 1040 static constexpr Attribute::AttrKind ID = Attribute::NoFree; 1041 }; 1042 1043 ChangeStatus AANoFreeFunction::updateImpl(Attributor &A) { 1044 Function &F = getAnchorScope(); 1045 1046 // The map from instruction opcodes to those instructions in the function. 1047 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F); 1048 1049 for (unsigned Opcode : 1050 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 1051 (unsigned)Instruction::Call}) { 1052 for (Instruction *I : OpcodeInstMap[Opcode]) { 1053 1054 auto ICS = ImmutableCallSite(I); 1055 auto *NoFreeAA = A.getAAFor<AANoFreeFunction>(*this, *I); 1056 1057 if ((!NoFreeAA || !NoFreeAA->isAssumedNoFree()) && 1058 !ICS.hasFnAttr(Attribute::NoFree)) { 1059 indicatePessimisticFixpoint(); 1060 return ChangeStatus::CHANGED; 1061 } 1062 } 1063 } 1064 return ChangeStatus::UNCHANGED; 1065 } 1066 1067 /// ------------------------ NonNull Argument Attribute ------------------------ 1068 struct AANonNullImpl : AANonNull, BooleanState { 1069 1070 AANonNullImpl(Value &V, InformationCache &InfoCache) 1071 : AANonNull(V, InfoCache) {} 1072 1073 AANonNullImpl(Value *AssociatedVal, Value &AnchoredValue, 1074 InformationCache &InfoCache) 1075 : AANonNull(AssociatedVal, AnchoredValue, InfoCache) {} 1076 1077 /// See AbstractAttribute::getState() 1078 /// { 1079 AbstractState &getState() override { return *this; } 1080 const AbstractState &getState() const override { return *this; } 1081 /// } 1082 1083 /// See AbstractAttribute::getAsStr(). 1084 const std::string getAsStr() const override { 1085 return getAssumed() ? "nonnull" : "may-null"; 1086 } 1087 1088 /// See AANonNull::isAssumedNonNull(). 1089 bool isAssumedNonNull() const override { return getAssumed(); } 1090 1091 /// See AANonNull::isKnownNonNull(). 1092 bool isKnownNonNull() const override { return getKnown(); } 1093 1094 /// Generate a predicate that checks if a given value is assumed nonnull. 1095 /// The generated function returns true if a value satisfies any of 1096 /// following conditions. 1097 /// (i) A value is known nonZero(=nonnull). 1098 /// (ii) A value is associated with AANonNull and its isAssumedNonNull() is 1099 /// true. 1100 std::function<bool(Value &)> generatePredicate(Attributor &); 1101 }; 1102 1103 std::function<bool(Value &)> AANonNullImpl::generatePredicate(Attributor &A) { 1104 // FIXME: The `AAReturnedValues` should provide the predicate with the 1105 // `ReturnInst` vector as well such that we can use the control flow sensitive 1106 // version of `isKnownNonZero`. This should fix `test11` in 1107 // `test/Transforms/FunctionAttrs/nonnull.ll` 1108 1109 std::function<bool(Value &)> Pred = [&](Value &RV) -> bool { 1110 if (isKnownNonZero(&RV, getAnchorScope().getParent()->getDataLayout())) 1111 return true; 1112 1113 auto *NonNullAA = A.getAAFor<AANonNull>(*this, RV); 1114 1115 ImmutableCallSite ICS(&RV); 1116 1117 if ((!NonNullAA || !NonNullAA->isAssumedNonNull()) && 1118 (!ICS || !ICS.hasRetAttr(Attribute::NonNull))) 1119 return false; 1120 1121 return true; 1122 }; 1123 1124 return Pred; 1125 } 1126 1127 /// NonNull attribute for function return value. 1128 struct AANonNullReturned : AANonNullImpl { 1129 1130 AANonNullReturned(Function &F, InformationCache &InfoCache) 1131 : AANonNullImpl(F, InfoCache) {} 1132 1133 /// See AbstractAttribute::getManifestPosition(). 1134 ManifestPosition getManifestPosition() const override { return MP_RETURNED; } 1135 1136 /// See AbstractAttriubute::initialize(...). 1137 void initialize(Attributor &A) override { 1138 Function &F = getAnchorScope(); 1139 1140 // Already nonnull. 1141 if (F.getAttributes().hasAttribute(AttributeList::ReturnIndex, 1142 Attribute::NonNull) || 1143 F.getAttributes().hasAttribute(AttributeList::ReturnIndex, 1144 Attribute::Dereferenceable)) 1145 indicateOptimisticFixpoint(); 1146 } 1147 1148 /// See AbstractAttribute::updateImpl(...). 1149 ChangeStatus updateImpl(Attributor &A) override; 1150 }; 1151 1152 ChangeStatus AANonNullReturned::updateImpl(Attributor &A) { 1153 Function &F = getAnchorScope(); 1154 1155 auto *AARetVal = A.getAAFor<AAReturnedValues>(*this, F); 1156 if (!AARetVal) { 1157 indicatePessimisticFixpoint(); 1158 return ChangeStatus::CHANGED; 1159 } 1160 1161 std::function<bool(Value &)> Pred = this->generatePredicate(A); 1162 if (!AARetVal->checkForallReturnedValues(Pred)) { 1163 indicatePessimisticFixpoint(); 1164 return ChangeStatus::CHANGED; 1165 } 1166 return ChangeStatus::UNCHANGED; 1167 } 1168 1169 /// NonNull attribute for function argument. 1170 struct AANonNullArgument : AANonNullImpl { 1171 1172 AANonNullArgument(Argument &A, InformationCache &InfoCache) 1173 : AANonNullImpl(A, InfoCache) {} 1174 1175 /// See AbstractAttribute::getManifestPosition(). 1176 ManifestPosition getManifestPosition() const override { return MP_ARGUMENT; } 1177 1178 /// See AbstractAttriubute::initialize(...). 1179 void initialize(Attributor &A) override { 1180 Argument *Arg = cast<Argument>(getAssociatedValue()); 1181 if (Arg->hasNonNullAttr()) 1182 indicateOptimisticFixpoint(); 1183 } 1184 1185 /// See AbstractAttribute::updateImpl(...). 1186 ChangeStatus updateImpl(Attributor &A) override; 1187 }; 1188 1189 /// NonNull attribute for a call site argument. 1190 struct AANonNullCallSiteArgument : AANonNullImpl { 1191 1192 /// See AANonNullImpl::AANonNullImpl(...). 1193 AANonNullCallSiteArgument(CallSite CS, unsigned ArgNo, 1194 InformationCache &InfoCache) 1195 : AANonNullImpl(CS.getArgOperand(ArgNo), *CS.getInstruction(), InfoCache), 1196 ArgNo(ArgNo) {} 1197 1198 /// See AbstractAttribute::initialize(...). 1199 void initialize(Attributor &A) override { 1200 CallSite CS(&getAnchoredValue()); 1201 if (CS.paramHasAttr(ArgNo, getAttrKind()) || 1202 CS.paramHasAttr(ArgNo, Attribute::Dereferenceable) || 1203 isKnownNonZero(getAssociatedValue(), 1204 getAnchorScope().getParent()->getDataLayout())) 1205 indicateOptimisticFixpoint(); 1206 } 1207 1208 /// See AbstractAttribute::updateImpl(Attributor &A). 1209 ChangeStatus updateImpl(Attributor &A) override; 1210 1211 /// See AbstractAttribute::getManifestPosition(). 1212 ManifestPosition getManifestPosition() const override { 1213 return MP_CALL_SITE_ARGUMENT; 1214 }; 1215 1216 // Return argument index of associated value. 1217 int getArgNo() const { return ArgNo; } 1218 1219 private: 1220 unsigned ArgNo; 1221 }; 1222 ChangeStatus AANonNullArgument::updateImpl(Attributor &A) { 1223 Function &F = getAnchorScope(); 1224 Argument &Arg = cast<Argument>(getAnchoredValue()); 1225 1226 unsigned ArgNo = Arg.getArgNo(); 1227 1228 // Callback function 1229 std::function<bool(CallSite)> CallSiteCheck = [&](CallSite CS) { 1230 assert(CS && "Sanity check: Call site was not initialized properly!"); 1231 1232 auto *NonNullAA = A.getAAFor<AANonNull>(*this, *CS.getInstruction(), ArgNo); 1233 1234 // Check that NonNullAA is AANonNullCallSiteArgument. 1235 if (NonNullAA) { 1236 ImmutableCallSite ICS(&NonNullAA->getAnchoredValue()); 1237 if (ICS && CS.getInstruction() == ICS.getInstruction()) 1238 return NonNullAA->isAssumedNonNull(); 1239 return false; 1240 } 1241 1242 if (CS.paramHasAttr(ArgNo, Attribute::NonNull)) 1243 return true; 1244 1245 Value *V = CS.getArgOperand(ArgNo); 1246 if (isKnownNonZero(V, getAnchorScope().getParent()->getDataLayout())) 1247 return true; 1248 1249 return false; 1250 }; 1251 if (!A.checkForAllCallSites(F, CallSiteCheck, true)) { 1252 indicatePessimisticFixpoint(); 1253 return ChangeStatus::CHANGED; 1254 } 1255 return ChangeStatus::UNCHANGED; 1256 } 1257 1258 ChangeStatus AANonNullCallSiteArgument::updateImpl(Attributor &A) { 1259 // NOTE: Never look at the argument of the callee in this method. 1260 // If we do this, "nonnull" is always deduced because of the assumption. 1261 1262 Value &V = *getAssociatedValue(); 1263 1264 auto *NonNullAA = A.getAAFor<AANonNull>(*this, V); 1265 1266 if (!NonNullAA || !NonNullAA->isAssumedNonNull()) { 1267 indicatePessimisticFixpoint(); 1268 return ChangeStatus::CHANGED; 1269 } 1270 1271 return ChangeStatus::UNCHANGED; 1272 } 1273 1274 /// ------------------------ Will-Return Attributes ---------------------------- 1275 1276 struct AAWillReturnImpl : public AAWillReturn, BooleanState { 1277 1278 /// See AbstractAttribute::AbstractAttribute(...). 1279 AAWillReturnImpl(Function &F, InformationCache &InfoCache) 1280 : AAWillReturn(F, InfoCache) {} 1281 1282 /// See AAWillReturn::isKnownWillReturn(). 1283 bool isKnownWillReturn() const override { return getKnown(); } 1284 1285 /// See AAWillReturn::isAssumedWillReturn(). 1286 bool isAssumedWillReturn() const override { return getAssumed(); } 1287 1288 /// See AbstractAttribute::getState(...). 1289 AbstractState &getState() override { return *this; } 1290 1291 /// See AbstractAttribute::getState(...). 1292 const AbstractState &getState() const override { return *this; } 1293 1294 /// See AbstractAttribute::getAsStr() 1295 const std::string getAsStr() const override { 1296 return getAssumed() ? "willreturn" : "may-noreturn"; 1297 } 1298 }; 1299 1300 struct AAWillReturnFunction final : AAWillReturnImpl { 1301 1302 /// See AbstractAttribute::AbstractAttribute(...). 1303 AAWillReturnFunction(Function &F, InformationCache &InfoCache) 1304 : AAWillReturnImpl(F, InfoCache) {} 1305 1306 /// See AbstractAttribute::getManifestPosition(). 1307 ManifestPosition getManifestPosition() const override { return MP_FUNCTION; } 1308 1309 /// See AbstractAttribute::initialize(...). 1310 void initialize(Attributor &A) override; 1311 1312 /// See AbstractAttribute::updateImpl(...). 1313 ChangeStatus updateImpl(Attributor &A) override; 1314 }; 1315 1316 // Helper function that checks whether a function has any cycle. 1317 // TODO: Replace with more efficent code 1318 bool containsCycle(Function &F) { 1319 SmallPtrSet<BasicBlock *, 32> Visited; 1320 1321 // Traverse BB by dfs and check whether successor is already visited. 1322 for (BasicBlock *BB : depth_first(&F)) { 1323 Visited.insert(BB); 1324 for (auto *SuccBB : successors(BB)) { 1325 if (Visited.count(SuccBB)) 1326 return true; 1327 } 1328 } 1329 return false; 1330 } 1331 1332 // Helper function that checks the function have a loop which might become an 1333 // endless loop 1334 // FIXME: Any cycle is regarded as endless loop for now. 1335 // We have to allow some patterns. 1336 bool containsPossiblyEndlessLoop(Function &F) { return containsCycle(F); } 1337 1338 void AAWillReturnFunction::initialize(Attributor &A) { 1339 Function &F = getAnchorScope(); 1340 1341 if (containsPossiblyEndlessLoop(F)) 1342 indicatePessimisticFixpoint(); 1343 } 1344 1345 ChangeStatus AAWillReturnFunction::updateImpl(Attributor &A) { 1346 Function &F = getAnchorScope(); 1347 1348 // The map from instruction opcodes to those instructions in the function. 1349 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F); 1350 1351 for (unsigned Opcode : 1352 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 1353 (unsigned)Instruction::Call}) { 1354 for (Instruction *I : OpcodeInstMap[Opcode]) { 1355 auto ICS = ImmutableCallSite(I); 1356 1357 if (ICS.hasFnAttr(Attribute::WillReturn)) 1358 continue; 1359 1360 auto *WillReturnAA = A.getAAFor<AAWillReturn>(*this, *I); 1361 if (!WillReturnAA || !WillReturnAA->isAssumedWillReturn()) { 1362 indicatePessimisticFixpoint(); 1363 return ChangeStatus::CHANGED; 1364 } 1365 1366 auto *NoRecurseAA = A.getAAFor<AANoRecurse>(*this, *I); 1367 1368 // FIXME: (i) Prohibit any recursion for now. 1369 // (ii) AANoRecurse isn't implemented yet so currently any call is 1370 // regarded as having recursion. 1371 // Code below should be 1372 // if ((!NoRecurseAA || !NoRecurseAA->isAssumedNoRecurse()) && 1373 if (!NoRecurseAA && !ICS.hasFnAttr(Attribute::NoRecurse)) { 1374 indicatePessimisticFixpoint(); 1375 return ChangeStatus::CHANGED; 1376 } 1377 } 1378 } 1379 1380 return ChangeStatus::UNCHANGED; 1381 } 1382 1383 /// ------------------------ NoAlias Argument Attribute ------------------------ 1384 1385 struct AANoAliasImpl : AANoAlias, BooleanState { 1386 1387 AANoAliasImpl(Value &V, InformationCache &InfoCache) 1388 : AANoAlias(V, InfoCache) {} 1389 1390 /// See AbstractAttribute::getState() 1391 /// { 1392 AbstractState &getState() override { return *this; } 1393 const AbstractState &getState() const override { return *this; } 1394 /// } 1395 1396 const std::string getAsStr() const override { 1397 return getAssumed() ? "noalias" : "may-alias"; 1398 } 1399 1400 /// See AANoAlias::isAssumedNoAlias(). 1401 bool isAssumedNoAlias() const override { return getAssumed(); } 1402 1403 /// See AANoAlias::isKnowndNoAlias(). 1404 bool isKnownNoAlias() const override { return getKnown(); } 1405 }; 1406 1407 /// NoAlias attribute for function return value. 1408 struct AANoAliasReturned : AANoAliasImpl { 1409 1410 AANoAliasReturned(Function &F, InformationCache &InfoCache) 1411 : AANoAliasImpl(F, InfoCache) {} 1412 1413 /// See AbstractAttribute::getManifestPosition(). 1414 virtual ManifestPosition getManifestPosition() const override { 1415 return MP_RETURNED; 1416 } 1417 1418 /// See AbstractAttriubute::initialize(...). 1419 void initialize(Attributor &A) override { 1420 Function &F = getAnchorScope(); 1421 1422 // Already noalias. 1423 if (F.returnDoesNotAlias()) { 1424 indicateOptimisticFixpoint(); 1425 return; 1426 } 1427 } 1428 1429 /// See AbstractAttribute::updateImpl(...). 1430 virtual ChangeStatus updateImpl(Attributor &A) override; 1431 }; 1432 1433 ChangeStatus AANoAliasReturned::updateImpl(Attributor &A) { 1434 Function &F = getAnchorScope(); 1435 1436 auto *AARetValImpl = A.getAAFor<AAReturnedValuesImpl>(*this, F); 1437 if (!AARetValImpl) { 1438 indicatePessimisticFixpoint(); 1439 return ChangeStatus::CHANGED; 1440 } 1441 1442 std::function<bool(Value &)> Pred = [&](Value &RV) -> bool { 1443 if (Constant *C = dyn_cast<Constant>(&RV)) 1444 if (C->isNullValue() || isa<UndefValue>(C)) 1445 return true; 1446 1447 /// For now, we can only deduce noalias if we have call sites. 1448 /// FIXME: add more support. 1449 ImmutableCallSite ICS(&RV); 1450 if (!ICS) 1451 return false; 1452 1453 auto *NoAliasAA = A.getAAFor<AANoAlias>(*this, RV); 1454 1455 if (!ICS.returnDoesNotAlias() && 1456 (!NoAliasAA || !NoAliasAA->isAssumedNoAlias())) 1457 return false; 1458 1459 /// FIXME: We can improve capture check in two ways: 1460 /// 1. Use the AANoCapture facilities. 1461 /// 2. Use the location of return insts for escape queries. 1462 if (PointerMayBeCaptured(&RV, /* ReturnCaptures */ false, 1463 /* StoreCaptures */ true)) 1464 return false; 1465 1466 return true; 1467 }; 1468 1469 if (!AARetValImpl->checkForallReturnedValues(Pred)) { 1470 indicatePessimisticFixpoint(); 1471 return ChangeStatus::CHANGED; 1472 } 1473 1474 return ChangeStatus::UNCHANGED; 1475 } 1476 1477 /// -------------------AAIsDead Function Attribute----------------------- 1478 1479 struct AAIsDeadFunction : AAIsDead, BooleanState { 1480 1481 AAIsDeadFunction(Function &F, InformationCache &InfoCache) 1482 : AAIsDead(F, InfoCache) {} 1483 1484 /// See AbstractAttribute::getState() 1485 /// { 1486 AbstractState &getState() override { return *this; } 1487 const AbstractState &getState() const override { return *this; } 1488 /// } 1489 1490 /// See AbstractAttribute::getManifestPosition(). 1491 ManifestPosition getManifestPosition() const override { return MP_FUNCTION; } 1492 1493 void initialize(Attributor &A) override { 1494 Function &F = getAnchorScope(); 1495 1496 ToBeExploredPaths.insert(&(F.getEntryBlock().front())); 1497 AssumedLiveBlocks.insert(&(F.getEntryBlock())); 1498 for (size_t i = 0; i < ToBeExploredPaths.size(); ++i) 1499 explorePath(A, ToBeExploredPaths[i]); 1500 } 1501 1502 /// Explores new instructions starting from \p I. If instruction is dead, stop 1503 /// and return true if it discovered a new instruction. 1504 bool explorePath(Attributor &A, Instruction *I); 1505 1506 const std::string getAsStr() const override { 1507 return "LiveBBs(" + std::to_string(AssumedLiveBlocks.size()) + "/" + 1508 std::to_string(getAnchorScope().size()) + ")"; 1509 } 1510 1511 /// See AbstractAttribute::manifest(...). 1512 ChangeStatus manifest(Attributor &A) override { 1513 assert(getState().isValidState() && 1514 "Attempted to manifest an invalid state!"); 1515 1516 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 1517 1518 for (Instruction *I : NoReturnCalls) { 1519 BasicBlock *BB = I->getParent(); 1520 1521 /// Invoke is replaced with a call and unreachable is placed after it. 1522 if (auto *II = dyn_cast<InvokeInst>(I)) { 1523 changeToCall(II); 1524 changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false); 1525 LLVM_DEBUG(dbgs() << "[AAIsDead] Replaced invoke with call inst\n"); 1526 continue; 1527 } 1528 1529 SplitBlock(BB, I->getNextNode()); 1530 changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false); 1531 HasChanged = ChangeStatus::CHANGED; 1532 } 1533 1534 return HasChanged; 1535 } 1536 1537 /// See AbstractAttribute::updateImpl(...). 1538 ChangeStatus updateImpl(Attributor &A) override; 1539 1540 /// See AAIsDead::isAssumedDead(). 1541 bool isAssumedDead(BasicBlock *BB) const override { 1542 if (!getAssumed()) 1543 return false; 1544 return !AssumedLiveBlocks.count(BB); 1545 } 1546 1547 /// See AAIsDead::isKnownDead(). 1548 bool isKnownDead(BasicBlock *BB) const override { 1549 if (!getKnown()) 1550 return false; 1551 return !AssumedLiveBlocks.count(BB); 1552 } 1553 1554 /// Collection of to be explored paths. 1555 SmallSetVector<Instruction *, 8> ToBeExploredPaths; 1556 1557 /// Collection of all assumed live BasicBlocks. 1558 DenseSet<BasicBlock *> AssumedLiveBlocks; 1559 1560 /// Collection of calls with noreturn attribute, assumed or knwon. 1561 SmallSetVector<Instruction *, 4> NoReturnCalls; 1562 }; 1563 1564 bool AAIsDeadFunction::explorePath(Attributor &A, Instruction *I) { 1565 BasicBlock *BB = I->getParent(); 1566 1567 while (I) { 1568 ImmutableCallSite ICS(I); 1569 1570 if (ICS) { 1571 auto *NoReturnAA = A.getAAFor<AANoReturn>(*this, *I); 1572 1573 if (NoReturnAA && NoReturnAA->isAssumedNoReturn()) { 1574 if (!NoReturnCalls.insert(I)) 1575 // If I is already in the NoReturnCalls set, then it stayed noreturn 1576 // and we didn't discover any new instructions. 1577 return false; 1578 1579 // Discovered new noreturn call, return true to indicate that I is not 1580 // noreturn anymore and should be deleted from NoReturnCalls. 1581 return true; 1582 } 1583 1584 if (ICS.hasFnAttr(Attribute::NoReturn)) { 1585 if (!NoReturnCalls.insert(I)) 1586 return false; 1587 1588 return true; 1589 } 1590 } 1591 1592 I = I->getNextNode(); 1593 } 1594 1595 // get new paths (reachable blocks). 1596 for (BasicBlock *SuccBB : successors(BB)) { 1597 Instruction *Inst = &(SuccBB->front()); 1598 AssumedLiveBlocks.insert(SuccBB); 1599 ToBeExploredPaths.insert(Inst); 1600 } 1601 1602 return true; 1603 } 1604 1605 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { 1606 // Temporary collection to iterate over existing noreturn instructions. This 1607 // will alow easier modification of NoReturnCalls collection 1608 SmallVector<Instruction *, 8> NoReturnChanged; 1609 ChangeStatus Status = ChangeStatus::UNCHANGED; 1610 1611 for (Instruction *I : NoReturnCalls) 1612 NoReturnChanged.push_back(I); 1613 1614 for (Instruction *I : NoReturnChanged) { 1615 size_t Size = ToBeExploredPaths.size(); 1616 1617 // Still noreturn. 1618 if (!explorePath(A, I)) 1619 continue; 1620 1621 NoReturnCalls.remove(I); 1622 1623 // No new paths. 1624 if (Size == ToBeExploredPaths.size()) 1625 continue; 1626 1627 // At least one new path. 1628 Status = ChangeStatus::CHANGED; 1629 1630 // explore new paths. 1631 while (Size != ToBeExploredPaths.size()) 1632 explorePath(A, ToBeExploredPaths[Size++]); 1633 } 1634 1635 LLVM_DEBUG( 1636 dbgs() << "[AAIsDead] AssumedLiveBlocks: " << AssumedLiveBlocks.size() 1637 << "Total number of blocks: " << getAnchorScope().size() << "\n"); 1638 1639 return Status; 1640 } 1641 1642 /// -------------------- Dereferenceable Argument Attribute -------------------- 1643 1644 struct DerefState : AbstractState { 1645 1646 /// State representing for dereferenceable bytes. 1647 IntegerState DerefBytesState; 1648 1649 /// State representing that whether the value is nonnull or global. 1650 IntegerState NonNullGlobalState; 1651 1652 /// Bits encoding for NonNullGlobalState. 1653 enum { 1654 DEREF_NONNULL = 1 << 0, 1655 DEREF_GLOBAL = 1 << 1, 1656 }; 1657 1658 /// See AbstractState::isValidState() 1659 bool isValidState() const override { return DerefBytesState.isValidState(); } 1660 1661 // See AbstractState::isAtFixpoint() 1662 bool isAtFixpoint() const override { 1663 return DerefBytesState.isAtFixpoint() && NonNullGlobalState.isAtFixpoint(); 1664 } 1665 1666 /// See AbstractState::indicateOptimisticFixpoint(...) 1667 void indicateOptimisticFixpoint() override { 1668 DerefBytesState.indicateOptimisticFixpoint(); 1669 NonNullGlobalState.indicateOptimisticFixpoint(); 1670 } 1671 1672 /// See AbstractState::indicatePessimisticFixpoint(...) 1673 void indicatePessimisticFixpoint() override { 1674 DerefBytesState.indicatePessimisticFixpoint(); 1675 NonNullGlobalState.indicatePessimisticFixpoint(); 1676 } 1677 1678 /// Update known dereferenceable bytes. 1679 void takeKnownDerefBytesMaximum(uint64_t Bytes) { 1680 DerefBytesState.takeKnownMaximum(Bytes); 1681 } 1682 1683 /// Update assumed dereferenceable bytes. 1684 void takeAssumedDerefBytesMinimum(uint64_t Bytes) { 1685 DerefBytesState.takeAssumedMinimum(Bytes); 1686 } 1687 1688 /// Update assumed NonNullGlobalState 1689 void updateAssumedNonNullGlobalState(bool IsNonNull, bool IsGlobal) { 1690 if (!IsNonNull) 1691 NonNullGlobalState.removeAssumedBits(DEREF_NONNULL); 1692 if (!IsGlobal) 1693 NonNullGlobalState.removeAssumedBits(DEREF_GLOBAL); 1694 } 1695 1696 /// Equality for DerefState. 1697 bool operator==(const DerefState &R) { 1698 return this->DerefBytesState == R.DerefBytesState && 1699 this->NonNullGlobalState == R.NonNullGlobalState; 1700 } 1701 }; 1702 struct AADereferenceableImpl : AADereferenceable, DerefState { 1703 1704 AADereferenceableImpl(Value &V, InformationCache &InfoCache) 1705 : AADereferenceable(V, InfoCache) {} 1706 1707 AADereferenceableImpl(Value *AssociatedVal, Value &AnchoredValue, 1708 InformationCache &InfoCache) 1709 : AADereferenceable(AssociatedVal, AnchoredValue, InfoCache) {} 1710 1711 /// See AbstractAttribute::getState() 1712 /// { 1713 AbstractState &getState() override { return *this; } 1714 const AbstractState &getState() const override { return *this; } 1715 /// } 1716 1717 /// See AADereferenceable::getAssumedDereferenceableBytes(). 1718 uint32_t getAssumedDereferenceableBytes() const override { 1719 return DerefBytesState.getAssumed(); 1720 } 1721 1722 /// See AADereferenceable::getKnownDereferenceableBytes(). 1723 uint32_t getKnownDereferenceableBytes() const override { 1724 return DerefBytesState.getKnown(); 1725 } 1726 1727 // Helper function for syncing nonnull state. 1728 void syncNonNull(const AANonNull *NonNullAA) { 1729 if (!NonNullAA) { 1730 NonNullGlobalState.removeAssumedBits(DEREF_NONNULL); 1731 return; 1732 } 1733 1734 if (NonNullAA->isKnownNonNull()) 1735 NonNullGlobalState.addKnownBits(DEREF_NONNULL); 1736 1737 if (!NonNullAA->isAssumedNonNull()) 1738 NonNullGlobalState.removeAssumedBits(DEREF_NONNULL); 1739 } 1740 1741 /// See AADereferenceable::isAssumedGlobal(). 1742 bool isAssumedGlobal() const override { 1743 return NonNullGlobalState.isAssumed(DEREF_GLOBAL); 1744 } 1745 1746 /// See AADereferenceable::isKnownGlobal(). 1747 bool isKnownGlobal() const override { 1748 return NonNullGlobalState.isKnown(DEREF_GLOBAL); 1749 } 1750 1751 /// See AADereferenceable::isAssumedNonNull(). 1752 bool isAssumedNonNull() const override { 1753 return NonNullGlobalState.isAssumed(DEREF_NONNULL); 1754 } 1755 1756 /// See AADereferenceable::isKnownNonNull(). 1757 bool isKnownNonNull() const override { 1758 return NonNullGlobalState.isKnown(DEREF_NONNULL); 1759 } 1760 1761 void getDeducedAttributes(SmallVectorImpl<Attribute> &Attrs) const override { 1762 LLVMContext &Ctx = AnchoredVal.getContext(); 1763 1764 // TODO: Add *_globally support 1765 if (isAssumedNonNull()) 1766 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 1767 Ctx, getAssumedDereferenceableBytes())); 1768 else 1769 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 1770 Ctx, getAssumedDereferenceableBytes())); 1771 } 1772 uint64_t computeAssumedDerefenceableBytes(Attributor &A, Value &V, 1773 bool &IsNonNull, bool &IsGlobal); 1774 1775 void initialize(Attributor &A) override { 1776 Function &F = getAnchorScope(); 1777 unsigned AttrIdx = 1778 getAttrIndex(getManifestPosition(), getArgNo(getAnchoredValue())); 1779 1780 for (Attribute::AttrKind AK : 1781 {Attribute::Dereferenceable, Attribute::DereferenceableOrNull}) 1782 if (F.getAttributes().hasAttribute(AttrIdx, AK)) 1783 takeKnownDerefBytesMaximum(F.getAttribute(AttrIdx, AK).getValueAsInt()); 1784 } 1785 1786 /// See AbstractAttribute::getAsStr(). 1787 const std::string getAsStr() const override { 1788 if (!getAssumedDereferenceableBytes()) 1789 return "unknown-dereferenceable"; 1790 return std::string("dereferenceable") + 1791 (isAssumedNonNull() ? "" : "_or_null") + 1792 (isAssumedGlobal() ? "_globally" : "") + "<" + 1793 std::to_string(getKnownDereferenceableBytes()) + "-" + 1794 std::to_string(getAssumedDereferenceableBytes()) + ">"; 1795 } 1796 }; 1797 1798 struct AADereferenceableReturned : AADereferenceableImpl { 1799 AADereferenceableReturned(Function &F, InformationCache &InfoCache) 1800 : AADereferenceableImpl(F, InfoCache) {} 1801 1802 /// See AbstractAttribute::getManifestPosition(). 1803 ManifestPosition getManifestPosition() const override { return MP_RETURNED; } 1804 1805 /// See AbstractAttribute::updateImpl(...). 1806 ChangeStatus updateImpl(Attributor &A) override; 1807 }; 1808 1809 // Helper function that returns dereferenceable bytes. 1810 static uint64_t calcDifferenceIfBaseIsNonNull(int64_t DerefBytes, 1811 int64_t Offset, bool IsNonNull) { 1812 if (!IsNonNull) 1813 return 0; 1814 return std::max((int64_t)0, DerefBytes - Offset); 1815 } 1816 1817 uint64_t AADereferenceableImpl::computeAssumedDerefenceableBytes( 1818 Attributor &A, Value &V, bool &IsNonNull, bool &IsGlobal) { 1819 // TODO: Tracking the globally flag. 1820 IsGlobal = false; 1821 1822 // First, we try to get information about V from Attributor. 1823 if (auto *DerefAA = A.getAAFor<AADereferenceable>(*this, V)) { 1824 IsNonNull &= DerefAA->isAssumedNonNull(); 1825 return DerefAA->getAssumedDereferenceableBytes(); 1826 } 1827 1828 // Otherwise, we try to compute assumed bytes from base pointer. 1829 const DataLayout &DL = getAnchorScope().getParent()->getDataLayout(); 1830 unsigned IdxWidth = 1831 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 1832 APInt Offset(IdxWidth, 0); 1833 Value *Base = V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset); 1834 1835 if (auto *BaseDerefAA = A.getAAFor<AADereferenceable>(*this, *Base)) { 1836 IsNonNull &= Offset != 0; 1837 return calcDifferenceIfBaseIsNonNull( 1838 BaseDerefAA->getAssumedDereferenceableBytes(), Offset.getSExtValue(), 1839 Offset != 0 || BaseDerefAA->isAssumedNonNull()); 1840 } 1841 1842 // Then, use IR information. 1843 1844 if (isDereferenceablePointer(Base, Base->getType(), DL)) 1845 return calcDifferenceIfBaseIsNonNull( 1846 DL.getTypeStoreSize(Base->getType()->getPointerElementType()), 1847 Offset.getSExtValue(), 1848 !NullPointerIsDefined(&getAnchorScope(), 1849 V.getType()->getPointerAddressSpace())); 1850 1851 IsNonNull = false; 1852 return 0; 1853 } 1854 ChangeStatus AADereferenceableReturned::updateImpl(Attributor &A) { 1855 Function &F = getAnchorScope(); 1856 auto BeforeState = static_cast<DerefState>(*this); 1857 1858 syncNonNull(A.getAAFor<AANonNull>(*this, F)); 1859 1860 auto *AARetVal = A.getAAFor<AAReturnedValues>(*this, F); 1861 if (!AARetVal) { 1862 indicatePessimisticFixpoint(); 1863 return ChangeStatus::CHANGED; 1864 } 1865 1866 bool IsNonNull = isAssumedNonNull(); 1867 bool IsGlobal = isAssumedGlobal(); 1868 1869 std::function<bool(Value &)> Pred = [&](Value &RV) -> bool { 1870 takeAssumedDerefBytesMinimum( 1871 computeAssumedDerefenceableBytes(A, RV, IsNonNull, IsGlobal)); 1872 return isValidState(); 1873 }; 1874 1875 if (AARetVal->checkForallReturnedValues(Pred)) { 1876 updateAssumedNonNullGlobalState(IsNonNull, IsGlobal); 1877 return BeforeState == static_cast<DerefState>(*this) 1878 ? ChangeStatus::UNCHANGED 1879 : ChangeStatus::CHANGED; 1880 } 1881 indicatePessimisticFixpoint(); 1882 return ChangeStatus::CHANGED; 1883 } 1884 1885 struct AADereferenceableArgument : AADereferenceableImpl { 1886 AADereferenceableArgument(Argument &A, InformationCache &InfoCache) 1887 : AADereferenceableImpl(A, InfoCache) {} 1888 1889 /// See AbstractAttribute::getManifestPosition(). 1890 ManifestPosition getManifestPosition() const override { return MP_ARGUMENT; } 1891 1892 /// See AbstractAttribute::updateImpl(...). 1893 ChangeStatus updateImpl(Attributor &A) override; 1894 }; 1895 1896 ChangeStatus AADereferenceableArgument::updateImpl(Attributor &A) { 1897 Function &F = getAnchorScope(); 1898 Argument &Arg = cast<Argument>(getAnchoredValue()); 1899 1900 auto BeforeState = static_cast<DerefState>(*this); 1901 1902 unsigned ArgNo = Arg.getArgNo(); 1903 1904 syncNonNull(A.getAAFor<AANonNull>(*this, F, ArgNo)); 1905 1906 bool IsNonNull = isAssumedNonNull(); 1907 bool IsGlobal = isAssumedGlobal(); 1908 1909 // Callback function 1910 std::function<bool(CallSite)> CallSiteCheck = [&](CallSite CS) -> bool { 1911 assert(CS && "Sanity check: Call site was not initialized properly!"); 1912 1913 // Check that DereferenceableAA is AADereferenceableCallSiteArgument. 1914 if (auto *DereferenceableAA = 1915 A.getAAFor<AADereferenceable>(*this, *CS.getInstruction(), ArgNo)) { 1916 ImmutableCallSite ICS(&DereferenceableAA->getAnchoredValue()); 1917 if (ICS && CS.getInstruction() == ICS.getInstruction()) { 1918 takeAssumedDerefBytesMinimum( 1919 DereferenceableAA->getAssumedDereferenceableBytes()); 1920 IsNonNull &= DereferenceableAA->isAssumedNonNull(); 1921 IsGlobal &= DereferenceableAA->isAssumedGlobal(); 1922 return isValidState(); 1923 } 1924 } 1925 1926 takeAssumedDerefBytesMinimum(computeAssumedDerefenceableBytes( 1927 A, *CS.getArgOperand(ArgNo), IsNonNull, IsGlobal)); 1928 1929 return isValidState(); 1930 }; 1931 1932 if (!A.checkForAllCallSites(F, CallSiteCheck, true)) { 1933 indicatePessimisticFixpoint(); 1934 return ChangeStatus::CHANGED; 1935 } 1936 1937 updateAssumedNonNullGlobalState(IsNonNull, IsGlobal); 1938 1939 return BeforeState == static_cast<DerefState>(*this) ? ChangeStatus::UNCHANGED 1940 : ChangeStatus::CHANGED; 1941 } 1942 1943 /// Dereferenceable attribute for a call site argument. 1944 struct AADereferenceableCallSiteArgument : AADereferenceableImpl { 1945 1946 /// See AADereferenceableImpl::AADereferenceableImpl(...). 1947 AADereferenceableCallSiteArgument(CallSite CS, unsigned ArgNo, 1948 InformationCache &InfoCache) 1949 : AADereferenceableImpl(CS.getArgOperand(ArgNo), *CS.getInstruction(), 1950 InfoCache), 1951 ArgNo(ArgNo) {} 1952 1953 /// See AbstractAttribute::initialize(...). 1954 void initialize(Attributor &A) override { 1955 CallSite CS(&getAnchoredValue()); 1956 if (CS.paramHasAttr(ArgNo, Attribute::Dereferenceable)) 1957 takeKnownDerefBytesMaximum(CS.getDereferenceableBytes(ArgNo)); 1958 1959 if (CS.paramHasAttr(ArgNo, Attribute::DereferenceableOrNull)) 1960 takeKnownDerefBytesMaximum(CS.getDereferenceableOrNullBytes(ArgNo)); 1961 } 1962 1963 /// See AbstractAttribute::updateImpl(Attributor &A). 1964 ChangeStatus updateImpl(Attributor &A) override; 1965 1966 /// See AbstractAttribute::getManifestPosition(). 1967 ManifestPosition getManifestPosition() const override { 1968 return MP_CALL_SITE_ARGUMENT; 1969 }; 1970 1971 // Return argument index of associated value. 1972 int getArgNo() const { return ArgNo; } 1973 1974 private: 1975 unsigned ArgNo; 1976 }; 1977 1978 ChangeStatus AADereferenceableCallSiteArgument::updateImpl(Attributor &A) { 1979 // NOTE: Never look at the argument of the callee in this method. 1980 // If we do this, "dereferenceable" is always deduced because of the 1981 // assumption. 1982 1983 Value &V = *getAssociatedValue(); 1984 1985 auto BeforeState = static_cast<DerefState>(*this); 1986 1987 syncNonNull(A.getAAFor<AANonNull>(*this, getAnchoredValue(), ArgNo)); 1988 bool IsNonNull = isAssumedNonNull(); 1989 bool IsGlobal = isKnownGlobal(); 1990 1991 takeAssumedDerefBytesMinimum( 1992 computeAssumedDerefenceableBytes(A, V, IsNonNull, IsGlobal)); 1993 updateAssumedNonNullGlobalState(IsNonNull, IsGlobal); 1994 1995 return BeforeState == static_cast<DerefState>(*this) ? ChangeStatus::UNCHANGED 1996 : ChangeStatus::CHANGED; 1997 } 1998 1999 // ------------------------ Align Argument Attribute ------------------------ 2000 2001 struct AAAlignImpl : AAAlign, IntegerState { 2002 2003 // Max alignemnt value allowed in IR 2004 static const unsigned MAX_ALIGN = 1U << 29; 2005 2006 AAAlignImpl(Value *AssociatedVal, Value &AnchoredValue, 2007 InformationCache &InfoCache) 2008 : AAAlign(AssociatedVal, AnchoredValue, InfoCache), 2009 IntegerState(MAX_ALIGN) {} 2010 2011 AAAlignImpl(Value &V, InformationCache &InfoCache) 2012 : AAAlignImpl(&V, V, InfoCache) {} 2013 2014 /// See AbstractAttribute::getState() 2015 /// { 2016 AbstractState &getState() override { return *this; } 2017 const AbstractState &getState() const override { return *this; } 2018 /// } 2019 2020 virtual const std::string getAsStr() const override { 2021 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + 2022 "-" + std::to_string(getAssumedAlign()) + ">") 2023 : "unknown-align"; 2024 } 2025 2026 /// See AAAlign::getAssumedAlign(). 2027 unsigned getAssumedAlign() const override { return getAssumed(); } 2028 2029 /// See AAAlign::getKnownAlign(). 2030 unsigned getKnownAlign() const override { return getKnown(); } 2031 2032 /// See AbstractAttriubute::initialize(...). 2033 void initialize(Attributor &A) override { 2034 Function &F = getAnchorScope(); 2035 2036 unsigned AttrIdx = 2037 getAttrIndex(getManifestPosition(), getArgNo(getAnchoredValue())); 2038 2039 // Already the function has align attribute on return value or argument. 2040 if (F.getAttributes().hasAttribute(AttrIdx, ID)) 2041 addKnownBits(F.getAttribute(AttrIdx, ID).getAlignment()); 2042 } 2043 2044 /// See AbstractAttribute::getDeducedAttributes 2045 virtual void 2046 getDeducedAttributes(SmallVectorImpl<Attribute> &Attrs) const override { 2047 LLVMContext &Ctx = AnchoredVal.getContext(); 2048 2049 Attrs.emplace_back(Attribute::getWithAlignment(Ctx, getAssumedAlign())); 2050 } 2051 }; 2052 2053 /// Align attribute for function return value. 2054 struct AAAlignReturned : AAAlignImpl { 2055 2056 AAAlignReturned(Function &F, InformationCache &InfoCache) 2057 : AAAlignImpl(F, InfoCache) {} 2058 2059 /// See AbstractAttribute::getManifestPosition(). 2060 virtual ManifestPosition getManifestPosition() const override { 2061 return MP_RETURNED; 2062 } 2063 2064 /// See AbstractAttribute::updateImpl(...). 2065 virtual ChangeStatus updateImpl(Attributor &A) override; 2066 }; 2067 2068 ChangeStatus AAAlignReturned::updateImpl(Attributor &A) { 2069 Function &F = getAnchorScope(); 2070 auto *AARetValImpl = A.getAAFor<AAReturnedValuesImpl>(*this, F); 2071 if (!AARetValImpl) { 2072 indicatePessimisticFixpoint(); 2073 return ChangeStatus::CHANGED; 2074 } 2075 2076 // Currently, align<n> is deduced if alignments in return values are assumed 2077 // as greater than n. We reach pessimistic fixpoint if any of the return value 2078 // wouldn't have align. If no assumed state was used for reasoning, an 2079 // optimistic fixpoint is reached earlier. 2080 2081 base_t BeforeState = getAssumed(); 2082 std::function<bool(Value &)> Pred = [&](Value &RV) -> bool { 2083 auto *AlignAA = A.getAAFor<AAAlign>(*this, RV); 2084 2085 if (AlignAA) 2086 takeAssumedMinimum(AlignAA->getAssumedAlign()); 2087 else 2088 // Use IR information. 2089 takeAssumedMinimum(RV.getPointerAlignment( 2090 getAnchorScope().getParent()->getDataLayout())); 2091 2092 return isValidState(); 2093 }; 2094 2095 if (!AARetValImpl->checkForallReturnedValues(Pred)) { 2096 indicatePessimisticFixpoint(); 2097 return ChangeStatus::CHANGED; 2098 } 2099 2100 return (getAssumed() != BeforeState) ? ChangeStatus::CHANGED 2101 : ChangeStatus::UNCHANGED; 2102 } 2103 2104 /// Align attribute for function argument. 2105 struct AAAlignArgument : AAAlignImpl { 2106 2107 AAAlignArgument(Argument &A, InformationCache &InfoCache) 2108 : AAAlignImpl(A, InfoCache) {} 2109 2110 /// See AbstractAttribute::getManifestPosition(). 2111 virtual ManifestPosition getManifestPosition() const override { 2112 return MP_ARGUMENT; 2113 } 2114 2115 /// See AbstractAttribute::updateImpl(...). 2116 virtual ChangeStatus updateImpl(Attributor &A) override; 2117 }; 2118 2119 ChangeStatus AAAlignArgument::updateImpl(Attributor &A) { 2120 2121 Function &F = getAnchorScope(); 2122 Argument &Arg = cast<Argument>(getAnchoredValue()); 2123 2124 unsigned ArgNo = Arg.getArgNo(); 2125 const DataLayout &DL = F.getParent()->getDataLayout(); 2126 2127 auto BeforeState = getAssumed(); 2128 2129 // Callback function 2130 std::function<bool(CallSite)> CallSiteCheck = [&](CallSite CS) { 2131 assert(CS && "Sanity check: Call site was not initialized properly!"); 2132 2133 auto *AlignAA = A.getAAFor<AAAlign>(*this, *CS.getInstruction(), ArgNo); 2134 2135 // Check that AlignAA is AAAlignCallSiteArgument. 2136 if (AlignAA) { 2137 ImmutableCallSite ICS(&AlignAA->getAnchoredValue()); 2138 if (ICS && CS.getInstruction() == ICS.getInstruction()) { 2139 takeAssumedMinimum(AlignAA->getAssumedAlign()); 2140 return isValidState(); 2141 } 2142 } 2143 2144 Value *V = CS.getArgOperand(ArgNo); 2145 takeAssumedMinimum(V->getPointerAlignment(DL)); 2146 return isValidState(); 2147 }; 2148 2149 if (!A.checkForAllCallSites(F, CallSiteCheck, true)) 2150 indicatePessimisticFixpoint(); 2151 2152 return BeforeState == getAssumed() ? ChangeStatus::UNCHANGED 2153 : ChangeStatus ::CHANGED; 2154 } 2155 2156 struct AAAlignCallSiteArgument : AAAlignImpl { 2157 2158 /// See AANonNullImpl::AANonNullImpl(...). 2159 AAAlignCallSiteArgument(CallSite CS, unsigned ArgNo, 2160 InformationCache &InfoCache) 2161 : AAAlignImpl(CS.getArgOperand(ArgNo), *CS.getInstruction(), InfoCache), 2162 ArgNo(ArgNo) {} 2163 2164 /// See AbstractAttribute::initialize(...). 2165 void initialize(Attributor &A) override { 2166 CallSite CS(&getAnchoredValue()); 2167 takeKnownMaximum(getAssociatedValue()->getPointerAlignment( 2168 getAnchorScope().getParent()->getDataLayout())); 2169 } 2170 2171 /// See AbstractAttribute::updateImpl(Attributor &A). 2172 ChangeStatus updateImpl(Attributor &A) override; 2173 2174 /// See AbstractAttribute::getManifestPosition(). 2175 ManifestPosition getManifestPosition() const override { 2176 return MP_CALL_SITE_ARGUMENT; 2177 }; 2178 2179 // Return argument index of associated value. 2180 int getArgNo() const { return ArgNo; } 2181 2182 private: 2183 unsigned ArgNo; 2184 }; 2185 2186 ChangeStatus AAAlignCallSiteArgument::updateImpl(Attributor &A) { 2187 // NOTE: Never look at the argument of the callee in this method. 2188 // If we do this, "align" is always deduced because of the assumption. 2189 2190 auto BeforeState = getAssumed(); 2191 2192 Value &V = *getAssociatedValue(); 2193 2194 auto *AlignAA = A.getAAFor<AAAlign>(*this, V); 2195 2196 if (AlignAA) 2197 takeAssumedMinimum(AlignAA->getAssumedAlign()); 2198 else 2199 indicatePessimisticFixpoint(); 2200 2201 return BeforeState == getAssumed() ? ChangeStatus::UNCHANGED 2202 : ChangeStatus::CHANGED; 2203 } 2204 2205 /// ---------------------------------------------------------------------------- 2206 /// Attributor 2207 /// ---------------------------------------------------------------------------- 2208 2209 bool Attributor::checkForAllCallSites(Function &F, 2210 std::function<bool(CallSite)> &Pred, 2211 bool RequireAllCallSites) { 2212 // We can try to determine information from 2213 // the call sites. However, this is only possible all call sites are known, 2214 // hence the function has internal linkage. 2215 if (RequireAllCallSites && !F.hasInternalLinkage()) { 2216 LLVM_DEBUG( 2217 dbgs() 2218 << "Attributor: Function " << F.getName() 2219 << " has no internal linkage, hence not all call sites are known\n"); 2220 return false; 2221 } 2222 2223 for (const Use &U : F.uses()) { 2224 2225 CallSite CS(U.getUser()); 2226 if (!CS || !CS.isCallee(&U) || !CS.getCaller()->hasExactDefinition()) { 2227 if (!RequireAllCallSites) 2228 continue; 2229 2230 LLVM_DEBUG(dbgs() << "Attributor: User " << *U.getUser() 2231 << " is an invalid use of " << F.getName() << "\n"); 2232 return false; 2233 } 2234 2235 if (Pred(CS)) 2236 continue; 2237 2238 LLVM_DEBUG(dbgs() << "Attributor: Call site callback failed for " 2239 << *CS.getInstruction() << "\n"); 2240 return false; 2241 } 2242 2243 return true; 2244 } 2245 2246 ChangeStatus Attributor::run() { 2247 // Initialize all abstract attributes. 2248 for (AbstractAttribute *AA : AllAbstractAttributes) 2249 AA->initialize(*this); 2250 2251 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized " 2252 << AllAbstractAttributes.size() 2253 << " abstract attributes.\n"); 2254 2255 // Now that all abstract attributes are collected and initialized we start 2256 // the abstract analysis. 2257 2258 unsigned IterationCounter = 1; 2259 2260 SmallVector<AbstractAttribute *, 64> ChangedAAs; 2261 SetVector<AbstractAttribute *> Worklist; 2262 Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end()); 2263 2264 do { 2265 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter 2266 << ", Worklist size: " << Worklist.size() << "\n"); 2267 2268 // Add all abstract attributes that are potentially dependent on one that 2269 // changed to the work list. 2270 for (AbstractAttribute *ChangedAA : ChangedAAs) { 2271 auto &QuerriedAAs = QueryMap[ChangedAA]; 2272 Worklist.insert(QuerriedAAs.begin(), QuerriedAAs.end()); 2273 } 2274 2275 // Reset the changed set. 2276 ChangedAAs.clear(); 2277 2278 // Update all abstract attribute in the work list and record the ones that 2279 // changed. 2280 for (AbstractAttribute *AA : Worklist) 2281 if (AA->update(*this) == ChangeStatus::CHANGED) 2282 ChangedAAs.push_back(AA); 2283 2284 // Reset the work list and repopulate with the changed abstract attributes. 2285 // Note that dependent ones are added above. 2286 Worklist.clear(); 2287 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end()); 2288 2289 } while (!Worklist.empty() && ++IterationCounter < MaxFixpointIterations); 2290 2291 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: " 2292 << IterationCounter << "/" << MaxFixpointIterations 2293 << " iterations\n"); 2294 2295 bool FinishedAtFixpoint = Worklist.empty(); 2296 2297 // Reset abstract arguments not settled in a sound fixpoint by now. This 2298 // happens when we stopped the fixpoint iteration early. Note that only the 2299 // ones marked as "changed" *and* the ones transitively depending on them 2300 // need to be reverted to a pessimistic state. Others might not be in a 2301 // fixpoint state but we can use the optimistic results for them anyway. 2302 SmallPtrSet<AbstractAttribute *, 32> Visited; 2303 for (unsigned u = 0; u < ChangedAAs.size(); u++) { 2304 AbstractAttribute *ChangedAA = ChangedAAs[u]; 2305 if (!Visited.insert(ChangedAA).second) 2306 continue; 2307 2308 AbstractState &State = ChangedAA->getState(); 2309 if (!State.isAtFixpoint()) { 2310 State.indicatePessimisticFixpoint(); 2311 2312 NumAttributesTimedOut++; 2313 } 2314 2315 auto &QuerriedAAs = QueryMap[ChangedAA]; 2316 ChangedAAs.append(QuerriedAAs.begin(), QuerriedAAs.end()); 2317 } 2318 2319 LLVM_DEBUG({ 2320 if (!Visited.empty()) 2321 dbgs() << "\n[Attributor] Finalized " << Visited.size() 2322 << " abstract attributes.\n"; 2323 }); 2324 2325 unsigned NumManifested = 0; 2326 unsigned NumAtFixpoint = 0; 2327 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED; 2328 for (AbstractAttribute *AA : AllAbstractAttributes) { 2329 AbstractState &State = AA->getState(); 2330 2331 // If there is not already a fixpoint reached, we can now take the 2332 // optimistic state. This is correct because we enforced a pessimistic one 2333 // on abstract attributes that were transitively dependent on a changed one 2334 // already above. 2335 if (!State.isAtFixpoint()) 2336 State.indicateOptimisticFixpoint(); 2337 2338 // If the state is invalid, we do not try to manifest it. 2339 if (!State.isValidState()) 2340 continue; 2341 2342 // Manifest the state and record if we changed the IR. 2343 ChangeStatus LocalChange = AA->manifest(*this); 2344 ManifestChange = ManifestChange | LocalChange; 2345 2346 NumAtFixpoint++; 2347 NumManifested += (LocalChange == ChangeStatus::CHANGED); 2348 } 2349 2350 (void)NumManifested; 2351 (void)NumAtFixpoint; 2352 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested 2353 << " arguments while " << NumAtFixpoint 2354 << " were in a valid fixpoint state\n"); 2355 2356 // If verification is requested, we finished this run at a fixpoint, and the 2357 // IR was changed, we re-run the whole fixpoint analysis, starting at 2358 // re-initialization of the arguments. This re-run should not result in an IR 2359 // change. Though, the (virtual) state of attributes at the end of the re-run 2360 // might be more optimistic than the known state or the IR state if the better 2361 // state cannot be manifested. 2362 if (VerifyAttributor && FinishedAtFixpoint && 2363 ManifestChange == ChangeStatus::CHANGED) { 2364 VerifyAttributor = false; 2365 ChangeStatus VerifyStatus = run(); 2366 if (VerifyStatus != ChangeStatus::UNCHANGED) 2367 llvm_unreachable( 2368 "Attributor verification failed, re-run did result in an IR change " 2369 "even after a fixpoint was reached in the original run. (False " 2370 "positives possible!)"); 2371 VerifyAttributor = true; 2372 } 2373 2374 NumAttributesManifested += NumManifested; 2375 NumAttributesValidFixpoint += NumAtFixpoint; 2376 2377 return ManifestChange; 2378 } 2379 2380 void Attributor::identifyDefaultAbstractAttributes( 2381 Function &F, InformationCache &InfoCache, 2382 DenseSet</* Attribute::AttrKind */ unsigned> *Whitelist) { 2383 2384 // Every function can be nounwind. 2385 registerAA(*new AANoUnwindFunction(F, InfoCache)); 2386 2387 // Every function might be marked "nosync" 2388 registerAA(*new AANoSyncFunction(F, InfoCache)); 2389 2390 // Every function might be "no-free". 2391 registerAA(*new AANoFreeFunction(F, InfoCache)); 2392 2393 // Return attributes are only appropriate if the return type is non void. 2394 Type *ReturnType = F.getReturnType(); 2395 if (!ReturnType->isVoidTy()) { 2396 // Argument attribute "returned" --- Create only one per function even 2397 // though it is an argument attribute. 2398 if (!Whitelist || Whitelist->count(AAReturnedValues::ID)) 2399 registerAA(*new AAReturnedValuesImpl(F, InfoCache)); 2400 2401 if (ReturnType->isPointerTy()) { 2402 // Every function with pointer return type might be marked align. 2403 if (!Whitelist || Whitelist->count(AAAlignReturned::ID)) 2404 registerAA(*new AAAlignReturned(F, InfoCache)); 2405 2406 // Every function with pointer return type might be marked nonnull. 2407 if (!Whitelist || Whitelist->count(AANonNullReturned::ID)) 2408 registerAA(*new AANonNullReturned(F, InfoCache)); 2409 2410 // Every function with pointer return type might be marked noalias. 2411 if (!Whitelist || Whitelist->count(AANoAliasReturned::ID)) 2412 registerAA(*new AANoAliasReturned(F, InfoCache)); 2413 2414 // Every function with pointer return type might be marked 2415 // dereferenceable. 2416 if (ReturnType->isPointerTy() && 2417 (!Whitelist || Whitelist->count(AADereferenceableReturned::ID))) 2418 registerAA(*new AADereferenceableReturned(F, InfoCache)); 2419 } 2420 } 2421 2422 for (Argument &Arg : F.args()) { 2423 if (Arg.getType()->isPointerTy()) { 2424 // Every argument with pointer type might be marked nonnull. 2425 if (!Whitelist || Whitelist->count(AANonNullArgument::ID)) 2426 registerAA(*new AANonNullArgument(Arg, InfoCache)); 2427 2428 // Every argument with pointer type might be marked dereferenceable. 2429 if (!Whitelist || Whitelist->count(AADereferenceableArgument::ID)) 2430 registerAA(*new AADereferenceableArgument(Arg, InfoCache)); 2431 2432 // Every argument with pointer type might be marked align. 2433 if (!Whitelist || Whitelist->count(AAAlignArgument::ID)) 2434 registerAA(*new AAAlignArgument(Arg, InfoCache)); 2435 } 2436 } 2437 2438 // Every function might be "will-return". 2439 registerAA(*new AAWillReturnFunction(F, InfoCache)); 2440 2441 // Check for dead BasicBlocks in every function. 2442 registerAA(*new AAIsDeadFunction(F, InfoCache)); 2443 2444 // Walk all instructions to find more attribute opportunities and also 2445 // interesting instructions that might be queried by abstract attributes 2446 // during their initialization or update. 2447 auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F]; 2448 auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F]; 2449 2450 for (Instruction &I : instructions(&F)) { 2451 bool IsInterestingOpcode = false; 2452 2453 // To allow easy access to all instructions in a function with a given 2454 // opcode we store them in the InfoCache. As not all opcodes are interesting 2455 // to concrete attributes we only cache the ones that are as identified in 2456 // the following switch. 2457 // Note: There are no concrete attributes now so this is initially empty. 2458 switch (I.getOpcode()) { 2459 default: 2460 assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) && 2461 "New call site/base instruction type needs to be known int the " 2462 "attributor."); 2463 break; 2464 case Instruction::Call: 2465 case Instruction::CallBr: 2466 case Instruction::Invoke: 2467 case Instruction::CleanupRet: 2468 case Instruction::CatchSwitch: 2469 case Instruction::Resume: 2470 case Instruction::Ret: 2471 IsInterestingOpcode = true; 2472 } 2473 if (IsInterestingOpcode) 2474 InstOpcodeMap[I.getOpcode()].push_back(&I); 2475 if (I.mayReadOrWriteMemory()) 2476 ReadOrWriteInsts.push_back(&I); 2477 2478 CallSite CS(&I); 2479 if (CS && CS.getCalledFunction()) { 2480 for (int i = 0, e = CS.getCalledFunction()->arg_size(); i < e; i++) { 2481 if (!CS.getArgument(i)->getType()->isPointerTy()) 2482 continue; 2483 2484 // Call site argument attribute "non-null". 2485 if (!Whitelist || Whitelist->count(AANonNullCallSiteArgument::ID)) 2486 registerAA(*new AANonNullCallSiteArgument(CS, i, InfoCache), i); 2487 2488 // Call site argument attribute "dereferenceable". 2489 if (!Whitelist || 2490 Whitelist->count(AADereferenceableCallSiteArgument::ID)) 2491 registerAA(*new AADereferenceableCallSiteArgument(CS, i, InfoCache), 2492 i); 2493 2494 // Call site argument attribute "align". 2495 if (!Whitelist || Whitelist->count(AAAlignCallSiteArgument::ID)) 2496 registerAA(*new AAAlignCallSiteArgument(CS, i, InfoCache), i); 2497 } 2498 } 2499 } 2500 } 2501 2502 /// Helpers to ease debugging through output streams and print calls. 2503 /// 2504 ///{ 2505 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) { 2506 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged"); 2507 } 2508 2509 raw_ostream &llvm::operator<<(raw_ostream &OS, 2510 AbstractAttribute::ManifestPosition AP) { 2511 switch (AP) { 2512 case AbstractAttribute::MP_ARGUMENT: 2513 return OS << "arg"; 2514 case AbstractAttribute::MP_CALL_SITE_ARGUMENT: 2515 return OS << "cs_arg"; 2516 case AbstractAttribute::MP_FUNCTION: 2517 return OS << "fn"; 2518 case AbstractAttribute::MP_RETURNED: 2519 return OS << "fn_ret"; 2520 } 2521 llvm_unreachable("Unknown attribute position!"); 2522 } 2523 2524 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) { 2525 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : "")); 2526 } 2527 2528 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) { 2529 AA.print(OS); 2530 return OS; 2531 } 2532 2533 void AbstractAttribute::print(raw_ostream &OS) const { 2534 OS << "[" << getManifestPosition() << "][" << getAsStr() << "][" 2535 << AnchoredVal.getName() << "]"; 2536 } 2537 ///} 2538 2539 /// ---------------------------------------------------------------------------- 2540 /// Pass (Manager) Boilerplate 2541 /// ---------------------------------------------------------------------------- 2542 2543 static bool runAttributorOnModule(Module &M) { 2544 if (DisableAttributor) 2545 return false; 2546 2547 LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size() 2548 << " functions.\n"); 2549 2550 // Create an Attributor and initially empty information cache that is filled 2551 // while we identify default attribute opportunities. 2552 Attributor A; 2553 InformationCache InfoCache; 2554 2555 for (Function &F : M) { 2556 // TODO: Not all attributes require an exact definition. Find a way to 2557 // enable deduction for some but not all attributes in case the 2558 // definition might be changed at runtime, see also 2559 // http://lists.llvm.org/pipermail/llvm-dev/2018-February/121275.html. 2560 // TODO: We could always determine abstract attributes and if sufficient 2561 // information was found we could duplicate the functions that do not 2562 // have an exact definition. 2563 if (!F.hasExactDefinition()) { 2564 NumFnWithoutExactDefinition++; 2565 continue; 2566 } 2567 2568 // For now we ignore naked and optnone functions. 2569 if (F.hasFnAttribute(Attribute::Naked) || 2570 F.hasFnAttribute(Attribute::OptimizeNone)) 2571 continue; 2572 2573 NumFnWithExactDefinition++; 2574 2575 // Populate the Attributor with abstract attribute opportunities in the 2576 // function and the information cache with IR information. 2577 A.identifyDefaultAbstractAttributes(F, InfoCache); 2578 } 2579 2580 return A.run() == ChangeStatus::CHANGED; 2581 } 2582 2583 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) { 2584 if (runAttributorOnModule(M)) { 2585 // FIXME: Think about passes we will preserve and add them here. 2586 return PreservedAnalyses::none(); 2587 } 2588 return PreservedAnalyses::all(); 2589 } 2590 2591 namespace { 2592 2593 struct AttributorLegacyPass : public ModulePass { 2594 static char ID; 2595 2596 AttributorLegacyPass() : ModulePass(ID) { 2597 initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry()); 2598 } 2599 2600 bool runOnModule(Module &M) override { 2601 if (skipModule(M)) 2602 return false; 2603 return runAttributorOnModule(M); 2604 } 2605 2606 void getAnalysisUsage(AnalysisUsage &AU) const override { 2607 // FIXME: Think about passes we will preserve and add them here. 2608 AU.setPreservesCFG(); 2609 } 2610 }; 2611 2612 } // end anonymous namespace 2613 2614 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); } 2615 2616 char AttributorLegacyPass::ID = 0; 2617 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor", 2618 "Deduce and propagate attributes", false, false) 2619 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor", 2620 "Deduce and propagate attributes", false, false) 2621