1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements an inter procedural pass that deduces and/or propagating 10 // attributes. This is done in an abstract interpretation style fixpoint 11 // iteration. See the Attributor.h file comment and the class descriptions in 12 // that file for more information. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/IPO/Attributor.h" 17 18 #include "llvm/ADT/DepthFirstIterator.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/CaptureTracking.h" 24 #include "llvm/Analysis/EHPersonalities.h" 25 #include "llvm/Analysis/GlobalsModRef.h" 26 #include "llvm/Analysis/Loads.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/IR/Argument.h" 29 #include "llvm/IR/Attributes.h" 30 #include "llvm/IR/CFG.h" 31 #include "llvm/IR/InstIterator.h" 32 #include "llvm/IR/IntrinsicInst.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 37 #include "llvm/Transforms/Utils/Local.h" 38 39 #include <cassert> 40 41 using namespace llvm; 42 43 #define DEBUG_TYPE "attributor" 44 45 STATISTIC(NumFnWithExactDefinition, 46 "Number of function with exact definitions"); 47 STATISTIC(NumFnWithoutExactDefinition, 48 "Number of function without exact definitions"); 49 STATISTIC(NumAttributesTimedOut, 50 "Number of abstract attributes timed out before fixpoint"); 51 STATISTIC(NumAttributesValidFixpoint, 52 "Number of abstract attributes in a valid fixpoint state"); 53 STATISTIC(NumAttributesManifested, 54 "Number of abstract attributes manifested in IR"); 55 56 // Some helper macros to deal with statistics tracking. 57 // 58 // Usage: 59 // For simple IR attribute tracking overload trackStatistics in the abstract 60 // attribute and choose the right STATS_DECLTRACK_********* macro, 61 // e.g.,: 62 // void trackStatistics() const override { 63 // STATS_DECLTRACK_ARG_ATTR(returned) 64 // } 65 // If there is a single "increment" side one can use the macro 66 // STATS_DECLTRACK with a custom message. If there are multiple increment 67 // sides, STATS_DECL and STATS_TRACK can also be used separatly. 68 // 69 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ 70 ("Number of " #TYPE " marked '" #NAME "'") 71 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME 72 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); 73 #define STATS_DECL(NAME, TYPE, MSG) \ 74 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); 75 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); 76 #define STATS_DECLTRACK(NAME, TYPE, MSG) \ 77 { \ 78 STATS_DECL(NAME, TYPE, MSG) \ 79 STATS_TRACK(NAME, TYPE) \ 80 } 81 #define STATS_DECLTRACK_ARG_ATTR(NAME) \ 82 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) 83 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ 84 STATS_DECLTRACK(NAME, CSArguments, \ 85 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) 86 #define STATS_DECLTRACK_FN_ATTR(NAME) \ 87 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) 88 #define STATS_DECLTRACK_CS_ATTR(NAME) \ 89 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) 90 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ 91 STATS_DECLTRACK(NAME, FunctionReturn, \ 92 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) 93 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ 94 STATS_DECLTRACK(NAME, CSReturn, \ 95 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) 96 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ 97 STATS_DECLTRACK(NAME, Floating, \ 98 ("Number of floating values known to be '" #NAME "'")) 99 100 // TODO: Determine a good default value. 101 // 102 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads 103 // (when run with the first 5 abstract attributes). The results also indicate 104 // that we never reach 32 iterations but always find a fixpoint sooner. 105 // 106 // This will become more evolved once we perform two interleaved fixpoint 107 // iterations: bottom-up and top-down. 108 static cl::opt<unsigned> 109 MaxFixpointIterations("attributor-max-iterations", cl::Hidden, 110 cl::desc("Maximal number of fixpoint iterations."), 111 cl::init(32)); 112 static cl::opt<bool> VerifyMaxFixpointIterations( 113 "attributor-max-iterations-verify", cl::Hidden, 114 cl::desc("Verify that max-iterations is a tight bound for a fixpoint"), 115 cl::init(false)); 116 117 static cl::opt<bool> DisableAttributor( 118 "attributor-disable", cl::Hidden, 119 cl::desc("Disable the attributor inter-procedural deduction pass."), 120 cl::init(true)); 121 122 static cl::opt<bool> ManifestInternal( 123 "attributor-manifest-internal", cl::Hidden, 124 cl::desc("Manifest Attributor internal string attributes."), 125 cl::init(false)); 126 127 static cl::opt<bool> VerifyAttributor( 128 "attributor-verify", cl::Hidden, 129 cl::desc("Verify the Attributor deduction and " 130 "manifestation of attributes -- may issue false-positive errors"), 131 cl::init(false)); 132 133 static cl::opt<unsigned> DepRecInterval( 134 "attributor-dependence-recompute-interval", cl::Hidden, 135 cl::desc("Number of iterations until dependences are recomputed."), 136 cl::init(4)); 137 138 /// Logic operators for the change status enum class. 139 /// 140 ///{ 141 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) { 142 return l == ChangeStatus::CHANGED ? l : r; 143 } 144 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) { 145 return l == ChangeStatus::UNCHANGED ? l : r; 146 } 147 ///} 148 149 /// Recursively visit all values that might become \p IRP at some point. This 150 /// will be done by looking through cast instructions, selects, phis, and calls 151 /// with the "returned" attribute. Once we cannot look through the value any 152 /// further, the callback \p VisitValueCB is invoked and passed the current 153 /// value, the \p State, and a flag to indicate if we stripped anything. To 154 /// limit how much effort is invested, we will never visit more values than 155 /// specified by \p MaxValues. 156 template <typename AAType, typename StateTy> 157 bool genericValueTraversal( 158 Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State, 159 const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB, 160 int MaxValues = 8) { 161 162 const AAIsDead *LivenessAA = nullptr; 163 if (IRP.getAnchorScope()) 164 LivenessAA = &A.getAAFor<AAIsDead>( 165 QueryingAA, IRPosition::function(*IRP.getAnchorScope()), 166 /* TrackDependence */ false); 167 bool AnyDead = false; 168 169 // TODO: Use Positions here to allow context sensitivity in VisitValueCB 170 SmallPtrSet<Value *, 16> Visited; 171 SmallVector<Value *, 16> Worklist; 172 Worklist.push_back(&IRP.getAssociatedValue()); 173 174 int Iteration = 0; 175 do { 176 Value *V = Worklist.pop_back_val(); 177 178 // Check if we should process the current value. To prevent endless 179 // recursion keep a record of the values we followed! 180 if (!Visited.insert(V).second) 181 continue; 182 183 // Make sure we limit the compile time for complex expressions. 184 if (Iteration++ >= MaxValues) 185 return false; 186 187 // Explicitly look through calls with a "returned" attribute if we do 188 // not have a pointer as stripPointerCasts only works on them. 189 Value *NewV = nullptr; 190 if (V->getType()->isPointerTy()) { 191 NewV = V->stripPointerCasts(); 192 } else { 193 CallSite CS(V); 194 if (CS && CS.getCalledFunction()) { 195 for (Argument &Arg : CS.getCalledFunction()->args()) 196 if (Arg.hasReturnedAttr()) { 197 NewV = CS.getArgOperand(Arg.getArgNo()); 198 break; 199 } 200 } 201 } 202 if (NewV && NewV != V) { 203 Worklist.push_back(NewV); 204 continue; 205 } 206 207 // Look through select instructions, visit both potential values. 208 if (auto *SI = dyn_cast<SelectInst>(V)) { 209 Worklist.push_back(SI->getTrueValue()); 210 Worklist.push_back(SI->getFalseValue()); 211 continue; 212 } 213 214 // Look through phi nodes, visit all live operands. 215 if (auto *PHI = dyn_cast<PHINode>(V)) { 216 assert(LivenessAA && 217 "Expected liveness in the presence of instructions!"); 218 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 219 const BasicBlock *IncomingBB = PHI->getIncomingBlock(u); 220 if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) { 221 AnyDead = true; 222 continue; 223 } 224 Worklist.push_back(PHI->getIncomingValue(u)); 225 } 226 continue; 227 } 228 229 // Once a leaf is reached we inform the user through the callback. 230 if (!VisitValueCB(*V, State, Iteration > 1)) 231 return false; 232 } while (!Worklist.empty()); 233 234 // If we actually used liveness information so we have to record a dependence. 235 if (AnyDead) 236 A.recordDependence(*LivenessAA, QueryingAA); 237 238 // All values have been visited. 239 return true; 240 } 241 242 /// Return true if \p New is equal or worse than \p Old. 243 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) { 244 if (!Old.isIntAttribute()) 245 return true; 246 247 return Old.getValueAsInt() >= New.getValueAsInt(); 248 } 249 250 /// Return true if the information provided by \p Attr was added to the 251 /// attribute list \p Attrs. This is only the case if it was not already present 252 /// in \p Attrs at the position describe by \p PK and \p AttrIdx. 253 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr, 254 AttributeList &Attrs, int AttrIdx) { 255 256 if (Attr.isEnumAttribute()) { 257 Attribute::AttrKind Kind = Attr.getKindAsEnum(); 258 if (Attrs.hasAttribute(AttrIdx, Kind)) 259 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind))) 260 return false; 261 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr); 262 return true; 263 } 264 if (Attr.isStringAttribute()) { 265 StringRef Kind = Attr.getKindAsString(); 266 if (Attrs.hasAttribute(AttrIdx, Kind)) 267 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind))) 268 return false; 269 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr); 270 return true; 271 } 272 if (Attr.isIntAttribute()) { 273 Attribute::AttrKind Kind = Attr.getKindAsEnum(); 274 if (Attrs.hasAttribute(AttrIdx, Kind)) 275 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind))) 276 return false; 277 Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind); 278 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr); 279 return true; 280 } 281 282 llvm_unreachable("Expected enum or string attribute!"); 283 } 284 285 ChangeStatus AbstractAttribute::update(Attributor &A) { 286 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 287 if (getState().isAtFixpoint()) 288 return HasChanged; 289 290 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n"); 291 292 HasChanged = updateImpl(A); 293 294 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this 295 << "\n"); 296 297 return HasChanged; 298 } 299 300 ChangeStatus 301 IRAttributeManifest::manifestAttrs(Attributor &A, IRPosition &IRP, 302 const ArrayRef<Attribute> &DeducedAttrs) { 303 Function *ScopeFn = IRP.getAssociatedFunction(); 304 IRPosition::Kind PK = IRP.getPositionKind(); 305 306 // In the following some generic code that will manifest attributes in 307 // DeducedAttrs if they improve the current IR. Due to the different 308 // annotation positions we use the underlying AttributeList interface. 309 310 AttributeList Attrs; 311 switch (PK) { 312 case IRPosition::IRP_INVALID: 313 case IRPosition::IRP_FLOAT: 314 return ChangeStatus::UNCHANGED; 315 case IRPosition::IRP_ARGUMENT: 316 case IRPosition::IRP_FUNCTION: 317 case IRPosition::IRP_RETURNED: 318 Attrs = ScopeFn->getAttributes(); 319 break; 320 case IRPosition::IRP_CALL_SITE: 321 case IRPosition::IRP_CALL_SITE_RETURNED: 322 case IRPosition::IRP_CALL_SITE_ARGUMENT: 323 Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes(); 324 break; 325 } 326 327 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 328 LLVMContext &Ctx = IRP.getAnchorValue().getContext(); 329 for (const Attribute &Attr : DeducedAttrs) { 330 if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx())) 331 continue; 332 333 HasChanged = ChangeStatus::CHANGED; 334 } 335 336 if (HasChanged == ChangeStatus::UNCHANGED) 337 return HasChanged; 338 339 switch (PK) { 340 case IRPosition::IRP_ARGUMENT: 341 case IRPosition::IRP_FUNCTION: 342 case IRPosition::IRP_RETURNED: 343 ScopeFn->setAttributes(Attrs); 344 break; 345 case IRPosition::IRP_CALL_SITE: 346 case IRPosition::IRP_CALL_SITE_RETURNED: 347 case IRPosition::IRP_CALL_SITE_ARGUMENT: 348 CallSite(&IRP.getAnchorValue()).setAttributes(Attrs); 349 break; 350 case IRPosition::IRP_INVALID: 351 case IRPosition::IRP_FLOAT: 352 break; 353 } 354 355 return HasChanged; 356 } 357 358 const IRPosition IRPosition::EmptyKey(255); 359 const IRPosition IRPosition::TombstoneKey(256); 360 361 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) { 362 IRPositions.emplace_back(IRP); 363 364 ImmutableCallSite ICS(&IRP.getAnchorValue()); 365 switch (IRP.getPositionKind()) { 366 case IRPosition::IRP_INVALID: 367 case IRPosition::IRP_FLOAT: 368 case IRPosition::IRP_FUNCTION: 369 return; 370 case IRPosition::IRP_ARGUMENT: 371 case IRPosition::IRP_RETURNED: 372 IRPositions.emplace_back( 373 IRPosition::function(*IRP.getAssociatedFunction())); 374 return; 375 case IRPosition::IRP_CALL_SITE: 376 assert(ICS && "Expected call site!"); 377 // TODO: We need to look at the operand bundles similar to the redirection 378 // in CallBase. 379 if (!ICS.hasOperandBundles()) 380 if (const Function *Callee = ICS.getCalledFunction()) 381 IRPositions.emplace_back(IRPosition::function(*Callee)); 382 return; 383 case IRPosition::IRP_CALL_SITE_RETURNED: 384 assert(ICS && "Expected call site!"); 385 // TODO: We need to look at the operand bundles similar to the redirection 386 // in CallBase. 387 if (!ICS.hasOperandBundles()) { 388 if (const Function *Callee = ICS.getCalledFunction()) { 389 IRPositions.emplace_back(IRPosition::returned(*Callee)); 390 IRPositions.emplace_back(IRPosition::function(*Callee)); 391 } 392 } 393 IRPositions.emplace_back( 394 IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction()))); 395 return; 396 case IRPosition::IRP_CALL_SITE_ARGUMENT: { 397 int ArgNo = IRP.getArgNo(); 398 assert(ICS && ArgNo >= 0 && "Expected call site!"); 399 // TODO: We need to look at the operand bundles similar to the redirection 400 // in CallBase. 401 if (!ICS.hasOperandBundles()) { 402 const Function *Callee = ICS.getCalledFunction(); 403 if (Callee && Callee->arg_size() > unsigned(ArgNo)) 404 IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo))); 405 if (Callee) 406 IRPositions.emplace_back(IRPosition::function(*Callee)); 407 } 408 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue())); 409 return; 410 } 411 } 412 } 413 414 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs) const { 415 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) 416 for (Attribute::AttrKind AK : AKs) 417 if (EquivIRP.getAttr(AK).getKindAsEnum() == AK) 418 return true; 419 return false; 420 } 421 422 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs, 423 SmallVectorImpl<Attribute> &Attrs) const { 424 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) 425 for (Attribute::AttrKind AK : AKs) { 426 const Attribute &Attr = EquivIRP.getAttr(AK); 427 if (Attr.getKindAsEnum() == AK) 428 Attrs.push_back(Attr); 429 } 430 } 431 432 void IRPosition::verify() { 433 switch (KindOrArgNo) { 434 default: 435 assert(KindOrArgNo >= 0 && "Expected argument or call site argument!"); 436 assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) && 437 "Expected call base or argument for positive attribute index!"); 438 if (isa<Argument>(AnchorVal)) { 439 assert(cast<Argument>(AnchorVal)->getArgNo() == unsigned(getArgNo()) && 440 "Argument number mismatch!"); 441 assert(cast<Argument>(AnchorVal) == &getAssociatedValue() && 442 "Associated value mismatch!"); 443 } else { 444 assert(cast<CallBase>(*AnchorVal).arg_size() > unsigned(getArgNo()) && 445 "Call site argument number mismatch!"); 446 assert(cast<CallBase>(*AnchorVal).getArgOperand(getArgNo()) == 447 &getAssociatedValue() && 448 "Associated value mismatch!"); 449 } 450 break; 451 case IRP_INVALID: 452 assert(!AnchorVal && "Expected no value for an invalid position!"); 453 break; 454 case IRP_FLOAT: 455 assert((!isa<CallBase>(&getAssociatedValue()) && 456 !isa<Argument>(&getAssociatedValue())) && 457 "Expected specialized kind for call base and argument values!"); 458 break; 459 case IRP_RETURNED: 460 assert(isa<Function>(AnchorVal) && 461 "Expected function for a 'returned' position!"); 462 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!"); 463 break; 464 case IRP_CALL_SITE_RETURNED: 465 assert((isa<CallBase>(AnchorVal)) && 466 "Expected call base for 'call site returned' position!"); 467 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!"); 468 break; 469 case IRP_CALL_SITE: 470 assert((isa<CallBase>(AnchorVal)) && 471 "Expected call base for 'call site function' position!"); 472 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!"); 473 break; 474 case IRP_FUNCTION: 475 assert(isa<Function>(AnchorVal) && 476 "Expected function for a 'function' position!"); 477 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!"); 478 break; 479 } 480 } 481 482 /// Helper functions to clamp a state \p S of type \p StateType with the 483 /// information in \p R and indicate/return if \p S did change (as-in update is 484 /// required to be run again). 485 /// 486 ///{ 487 template <typename StateType> 488 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R); 489 490 template <> 491 ChangeStatus clampStateAndIndicateChange<IntegerState>(IntegerState &S, 492 const IntegerState &R) { 493 auto Assumed = S.getAssumed(); 494 S ^= R; 495 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 496 : ChangeStatus::CHANGED; 497 } 498 499 template <> 500 ChangeStatus clampStateAndIndicateChange<BooleanState>(BooleanState &S, 501 const BooleanState &R) { 502 return clampStateAndIndicateChange<IntegerState>(S, R); 503 } 504 ///} 505 506 /// Clamp the information known for all returned values of a function 507 /// (identified by \p QueryingAA) into \p S. 508 template <typename AAType, typename StateType = typename AAType::StateType> 509 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA, 510 StateType &S) { 511 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " 512 << static_cast<const AbstractAttribute &>(QueryingAA) 513 << " into " << S << "\n"); 514 515 assert((QueryingAA.getIRPosition().getPositionKind() == 516 IRPosition::IRP_RETURNED || 517 QueryingAA.getIRPosition().getPositionKind() == 518 IRPosition::IRP_CALL_SITE_RETURNED) && 519 "Can only clamp returned value states for a function returned or call " 520 "site returned position!"); 521 522 // Use an optional state as there might not be any return values and we want 523 // to join (IntegerState::operator&) the state of all there are. 524 Optional<StateType> T; 525 526 // Callback for each possibly returned value. 527 auto CheckReturnValue = [&](Value &RV) -> bool { 528 const IRPosition &RVPos = IRPosition::value(RV); 529 const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos); 530 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() 531 << " @ " << RVPos << "\n"); 532 const StateType &AAS = static_cast<const StateType &>(AA.getState()); 533 if (T.hasValue()) 534 *T &= AAS; 535 else 536 T = AAS; 537 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T 538 << "\n"); 539 return T->isValidState(); 540 }; 541 542 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) 543 S.indicatePessimisticFixpoint(); 544 else if (T.hasValue()) 545 S ^= *T; 546 } 547 548 /// Helper class for generic deduction: return value -> returned position. 549 template <typename AAType, typename Base, 550 typename StateType = typename AAType::StateType> 551 struct AAReturnedFromReturnedValues : public Base { 552 AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {} 553 554 /// See AbstractAttribute::updateImpl(...). 555 ChangeStatus updateImpl(Attributor &A) override { 556 StateType S; 557 clampReturnedValueStates<AAType, StateType>(A, *this, S); 558 // TODO: If we know we visited all returned values, thus no are assumed 559 // dead, we can take the known information from the state T. 560 return clampStateAndIndicateChange<StateType>(this->getState(), S); 561 } 562 }; 563 564 /// Clamp the information known at all call sites for a given argument 565 /// (identified by \p QueryingAA) into \p S. 566 template <typename AAType, typename StateType = typename AAType::StateType> 567 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, 568 StateType &S) { 569 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " 570 << static_cast<const AbstractAttribute &>(QueryingAA) 571 << " into " << S << "\n"); 572 573 assert(QueryingAA.getIRPosition().getPositionKind() == 574 IRPosition::IRP_ARGUMENT && 575 "Can only clamp call site argument states for an argument position!"); 576 577 // Use an optional state as there might not be any return values and we want 578 // to join (IntegerState::operator&) the state of all there are. 579 Optional<StateType> T; 580 581 // The argument number which is also the call site argument number. 582 unsigned ArgNo = QueryingAA.getIRPosition().getArgNo(); 583 584 auto CallSiteCheck = [&](CallSite CS) { 585 const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo); 586 const AAType &AA = A.getAAFor<AAType>(QueryingAA, CSArgPos); 587 LLVM_DEBUG(dbgs() << "[Attributor] CS: " << *CS.getInstruction() 588 << " AA: " << AA.getAsStr() << " @" << CSArgPos << "\n"); 589 const StateType &AAS = static_cast<const StateType &>(AA.getState()); 590 if (T.hasValue()) 591 *T &= AAS; 592 else 593 T = AAS; 594 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T 595 << "\n"); 596 return T->isValidState(); 597 }; 598 599 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true)) 600 S.indicatePessimisticFixpoint(); 601 else if (T.hasValue()) 602 S ^= *T; 603 } 604 605 /// Helper class for generic deduction: call site argument -> argument position. 606 template <typename AAType, typename Base, 607 typename StateType = typename AAType::StateType> 608 struct AAArgumentFromCallSiteArguments : public Base { 609 AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {} 610 611 /// See AbstractAttribute::updateImpl(...). 612 ChangeStatus updateImpl(Attributor &A) override { 613 StateType S; 614 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); 615 // TODO: If we know we visited all incoming values, thus no are assumed 616 // dead, we can take the known information from the state T. 617 return clampStateAndIndicateChange<StateType>(this->getState(), S); 618 } 619 }; 620 621 /// Helper class for generic replication: function returned -> cs returned. 622 template <typename AAType, typename Base> 623 struct AACallSiteReturnedFromReturned : public Base { 624 AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {} 625 626 /// See AbstractAttribute::updateImpl(...). 627 ChangeStatus updateImpl(Attributor &A) override { 628 assert(this->getIRPosition().getPositionKind() == 629 IRPosition::IRP_CALL_SITE_RETURNED && 630 "Can only wrap function returned positions for call site returned " 631 "positions!"); 632 auto &S = this->getState(); 633 634 const Function *AssociatedFunction = 635 this->getIRPosition().getAssociatedFunction(); 636 if (!AssociatedFunction) 637 return S.indicatePessimisticFixpoint(); 638 639 IRPosition FnPos = IRPosition::returned(*AssociatedFunction); 640 const AAType &AA = A.getAAFor<AAType>(*this, FnPos); 641 return clampStateAndIndicateChange( 642 S, static_cast<const typename AAType::StateType &>(AA.getState())); 643 } 644 }; 645 646 /// -----------------------NoUnwind Function Attribute-------------------------- 647 648 struct AANoUnwindImpl : AANoUnwind { 649 AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {} 650 651 const std::string getAsStr() const override { 652 return getAssumed() ? "nounwind" : "may-unwind"; 653 } 654 655 /// See AbstractAttribute::updateImpl(...). 656 ChangeStatus updateImpl(Attributor &A) override { 657 auto Opcodes = { 658 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 659 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 660 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 661 662 auto CheckForNoUnwind = [&](Instruction &I) { 663 if (!I.mayThrow()) 664 return true; 665 666 if (ImmutableCallSite ICS = ImmutableCallSite(&I)) { 667 const auto &NoUnwindAA = 668 A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS)); 669 return NoUnwindAA.isAssumedNoUnwind(); 670 } 671 return false; 672 }; 673 674 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes)) 675 return indicatePessimisticFixpoint(); 676 677 return ChangeStatus::UNCHANGED; 678 } 679 }; 680 681 struct AANoUnwindFunction final : public AANoUnwindImpl { 682 AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {} 683 684 /// See AbstractAttribute::trackStatistics() 685 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } 686 }; 687 688 /// NoUnwind attribute deduction for a call sites. 689 struct AANoUnwindCallSite final : AANoUnwindImpl { 690 AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {} 691 692 /// See AbstractAttribute::initialize(...). 693 void initialize(Attributor &A) override { 694 AANoUnwindImpl::initialize(A); 695 Function *F = getAssociatedFunction(); 696 if (!F) 697 indicatePessimisticFixpoint(); 698 } 699 700 /// See AbstractAttribute::updateImpl(...). 701 ChangeStatus updateImpl(Attributor &A) override { 702 // TODO: Once we have call site specific value information we can provide 703 // call site specific liveness information and then it makes 704 // sense to specialize attributes for call sites arguments instead of 705 // redirecting requests to the callee argument. 706 Function *F = getAssociatedFunction(); 707 const IRPosition &FnPos = IRPosition::function(*F); 708 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos); 709 return clampStateAndIndicateChange( 710 getState(), 711 static_cast<const AANoUnwind::StateType &>(FnAA.getState())); 712 } 713 714 /// See AbstractAttribute::trackStatistics() 715 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } 716 }; 717 718 /// --------------------- Function Return Values ------------------------------- 719 720 /// "Attribute" that collects all potential returned values and the return 721 /// instructions that they arise from. 722 /// 723 /// If there is a unique returned value R, the manifest method will: 724 /// - mark R with the "returned" attribute, if R is an argument. 725 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { 726 727 /// Mapping of values potentially returned by the associated function to the 728 /// return instructions that might return them. 729 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; 730 731 /// Mapping to remember the number of returned values for a call site such 732 /// that we can avoid updates if nothing changed. 733 DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA; 734 735 /// Set of unresolved calls returned by the associated function. 736 SmallSetVector<CallBase *, 4> UnresolvedCalls; 737 738 /// State flags 739 /// 740 ///{ 741 bool IsFixed = false; 742 bool IsValidState = true; 743 ///} 744 745 public: 746 AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {} 747 748 /// See AbstractAttribute::initialize(...). 749 void initialize(Attributor &A) override { 750 // Reset the state. 751 IsFixed = false; 752 IsValidState = true; 753 ReturnedValues.clear(); 754 755 Function *F = getAssociatedFunction(); 756 if (!F) { 757 indicatePessimisticFixpoint(); 758 return; 759 } 760 761 // The map from instruction opcodes to those instructions in the function. 762 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); 763 764 // Look through all arguments, if one is marked as returned we are done. 765 for (Argument &Arg : F->args()) { 766 if (Arg.hasReturnedAttr()) { 767 auto &ReturnInstSet = ReturnedValues[&Arg]; 768 for (Instruction *RI : OpcodeInstMap[Instruction::Ret]) 769 ReturnInstSet.insert(cast<ReturnInst>(RI)); 770 771 indicateOptimisticFixpoint(); 772 return; 773 } 774 } 775 776 if (!F->hasExactDefinition()) 777 indicatePessimisticFixpoint(); 778 } 779 780 /// See AbstractAttribute::manifest(...). 781 ChangeStatus manifest(Attributor &A) override; 782 783 /// See AbstractAttribute::getState(...). 784 AbstractState &getState() override { return *this; } 785 786 /// See AbstractAttribute::getState(...). 787 const AbstractState &getState() const override { return *this; } 788 789 /// See AbstractAttribute::updateImpl(Attributor &A). 790 ChangeStatus updateImpl(Attributor &A) override; 791 792 llvm::iterator_range<iterator> returned_values() override { 793 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 794 } 795 796 llvm::iterator_range<const_iterator> returned_values() const override { 797 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 798 } 799 800 const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override { 801 return UnresolvedCalls; 802 } 803 804 /// Return the number of potential return values, -1 if unknown. 805 size_t getNumReturnValues() const override { 806 return isValidState() ? ReturnedValues.size() : -1; 807 } 808 809 /// Return an assumed unique return value if a single candidate is found. If 810 /// there cannot be one, return a nullptr. If it is not clear yet, return the 811 /// Optional::NoneType. 812 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 813 814 /// See AbstractState::checkForAllReturnedValues(...). 815 bool checkForAllReturnedValuesAndReturnInsts( 816 const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> 817 &Pred) const override; 818 819 /// Pretty print the attribute similar to the IR representation. 820 const std::string getAsStr() const override; 821 822 /// See AbstractState::isAtFixpoint(). 823 bool isAtFixpoint() const override { return IsFixed; } 824 825 /// See AbstractState::isValidState(). 826 bool isValidState() const override { return IsValidState; } 827 828 /// See AbstractState::indicateOptimisticFixpoint(...). 829 ChangeStatus indicateOptimisticFixpoint() override { 830 IsFixed = true; 831 return ChangeStatus::UNCHANGED; 832 } 833 834 ChangeStatus indicatePessimisticFixpoint() override { 835 IsFixed = true; 836 IsValidState = false; 837 return ChangeStatus::CHANGED; 838 } 839 }; 840 841 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 842 ChangeStatus Changed = ChangeStatus::UNCHANGED; 843 844 // Bookkeeping. 845 assert(isValidState()); 846 STATS_DECLTRACK(KnownReturnValues, FunctionReturn, 847 "Number of function with known return values"); 848 849 // Check if we have an assumed unique return value that we could manifest. 850 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); 851 852 if (!UniqueRV.hasValue() || !UniqueRV.getValue()) 853 return Changed; 854 855 // Bookkeeping. 856 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, 857 "Number of function with unique return"); 858 859 // Callback to replace the uses of CB with the constant C. 860 auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) { 861 if (CB.getNumUses() == 0) 862 return ChangeStatus::UNCHANGED; 863 CB.replaceAllUsesWith(&C); 864 return ChangeStatus::CHANGED; 865 }; 866 867 // If the assumed unique return value is an argument, annotate it. 868 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { 869 getIRPosition() = IRPosition::argument(*UniqueRVArg); 870 Changed = IRAttribute::manifest(A); 871 } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) { 872 // We can replace the returned value with the unique returned constant. 873 Value &AnchorValue = getAnchorValue(); 874 if (Function *F = dyn_cast<Function>(&AnchorValue)) { 875 for (const Use &U : F->uses()) 876 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) 877 if (CB->isCallee(&U)) 878 Changed = ReplaceCallSiteUsersWith(*CB, *RVC) | Changed; 879 } else { 880 assert(isa<CallBase>(AnchorValue) && 881 "Expcected a function or call base anchor!"); 882 Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVC); 883 } 884 if (Changed == ChangeStatus::CHANGED) 885 STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn, 886 "Number of function returns replaced by constant return"); 887 } 888 889 return Changed; 890 } 891 892 const std::string AAReturnedValuesImpl::getAsStr() const { 893 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 894 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + 895 ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]"; 896 } 897 898 Optional<Value *> 899 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { 900 // If checkForAllReturnedValues provides a unique value, ignoring potential 901 // undef values that can also be present, it is assumed to be the actual 902 // return value and forwarded to the caller of this method. If there are 903 // multiple, a nullptr is returned indicating there cannot be a unique 904 // returned value. 905 Optional<Value *> UniqueRV; 906 907 auto Pred = [&](Value &RV) -> bool { 908 // If we found a second returned value and neither the current nor the saved 909 // one is an undef, there is no unique returned value. Undefs are special 910 // since we can pretend they have any value. 911 if (UniqueRV.hasValue() && UniqueRV != &RV && 912 !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) { 913 UniqueRV = nullptr; 914 return false; 915 } 916 917 // Do not overwrite a value with an undef. 918 if (!UniqueRV.hasValue() || !isa<UndefValue>(RV)) 919 UniqueRV = &RV; 920 921 return true; 922 }; 923 924 if (!A.checkForAllReturnedValues(Pred, *this)) 925 UniqueRV = nullptr; 926 927 return UniqueRV; 928 } 929 930 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( 931 const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> 932 &Pred) const { 933 if (!isValidState()) 934 return false; 935 936 // Check all returned values but ignore call sites as long as we have not 937 // encountered an overdefined one during an update. 938 for (auto &It : ReturnedValues) { 939 Value *RV = It.first; 940 941 CallBase *CB = dyn_cast<CallBase>(RV); 942 if (CB && !UnresolvedCalls.count(CB)) 943 continue; 944 945 if (!Pred(*RV, It.second)) 946 return false; 947 } 948 949 return true; 950 } 951 952 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 953 size_t NumUnresolvedCalls = UnresolvedCalls.size(); 954 bool Changed = false; 955 956 // State used in the value traversals starting in returned values. 957 struct RVState { 958 // The map in which we collect return values -> return instrs. 959 decltype(ReturnedValues) &RetValsMap; 960 // The flag to indicate a change. 961 bool &Changed; 962 // The return instrs we come from. 963 SmallSetVector<ReturnInst *, 4> RetInsts; 964 }; 965 966 // Callback for a leaf value returned by the associated function. 967 auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool { 968 auto Size = RVS.RetValsMap[&Val].size(); 969 RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end()); 970 bool Inserted = RVS.RetValsMap[&Val].size() != Size; 971 RVS.Changed |= Inserted; 972 LLVM_DEBUG({ 973 if (Inserted) 974 dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val 975 << " => " << RVS.RetInsts.size() << "\n"; 976 }); 977 return true; 978 }; 979 980 // Helper method to invoke the generic value traversal. 981 auto VisitReturnedValue = [&](Value &RV, RVState &RVS) { 982 IRPosition RetValPos = IRPosition::value(RV); 983 return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this, 984 RVS, VisitValueCB); 985 }; 986 987 // Callback for all "return intructions" live in the associated function. 988 auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) { 989 ReturnInst &Ret = cast<ReturnInst>(I); 990 RVState RVS({ReturnedValues, Changed, {}}); 991 RVS.RetInsts.insert(&Ret); 992 return VisitReturnedValue(*Ret.getReturnValue(), RVS); 993 }; 994 995 // Start by discovering returned values from all live returned instructions in 996 // the associated function. 997 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret})) 998 return indicatePessimisticFixpoint(); 999 1000 // Once returned values "directly" present in the code are handled we try to 1001 // resolve returned calls. 1002 decltype(ReturnedValues) NewRVsMap; 1003 for (auto &It : ReturnedValues) { 1004 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first 1005 << " by #" << It.second.size() << " RIs\n"); 1006 CallBase *CB = dyn_cast<CallBase>(It.first); 1007 if (!CB || UnresolvedCalls.count(CB)) 1008 continue; 1009 1010 if (!CB->getCalledFunction()) { 1011 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB 1012 << "\n"); 1013 UnresolvedCalls.insert(CB); 1014 continue; 1015 } 1016 1017 // TODO: use the function scope once we have call site AAReturnedValues. 1018 const auto &RetValAA = A.getAAFor<AAReturnedValues>( 1019 *this, IRPosition::function(*CB->getCalledFunction())); 1020 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: " 1021 << static_cast<const AbstractAttribute &>(RetValAA) 1022 << "\n"); 1023 1024 // Skip dead ends, thus if we do not know anything about the returned 1025 // call we mark it as unresolved and it will stay that way. 1026 if (!RetValAA.getState().isValidState()) { 1027 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB 1028 << "\n"); 1029 UnresolvedCalls.insert(CB); 1030 continue; 1031 } 1032 1033 // Do not try to learn partial information. If the callee has unresolved 1034 // return values we will treat the call as unresolved/opaque. 1035 auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls(); 1036 if (!RetValAAUnresolvedCalls.empty()) { 1037 UnresolvedCalls.insert(CB); 1038 continue; 1039 } 1040 1041 // Now check if we can track transitively returned values. If possible, thus 1042 // if all return value can be represented in the current scope, do so. 1043 bool Unresolved = false; 1044 for (auto &RetValAAIt : RetValAA.returned_values()) { 1045 Value *RetVal = RetValAAIt.first; 1046 if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) || 1047 isa<Constant>(RetVal)) 1048 continue; 1049 // Anything that did not fit in the above categories cannot be resolved, 1050 // mark the call as unresolved. 1051 LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value " 1052 "cannot be translated: " 1053 << *RetVal << "\n"); 1054 UnresolvedCalls.insert(CB); 1055 Unresolved = true; 1056 break; 1057 } 1058 1059 if (Unresolved) 1060 continue; 1061 1062 // Now track transitively returned values. 1063 unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB]; 1064 if (NumRetAA == RetValAA.getNumReturnValues()) { 1065 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not " 1066 "changed since it was seen last\n"); 1067 continue; 1068 } 1069 NumRetAA = RetValAA.getNumReturnValues(); 1070 1071 for (auto &RetValAAIt : RetValAA.returned_values()) { 1072 Value *RetVal = RetValAAIt.first; 1073 if (Argument *Arg = dyn_cast<Argument>(RetVal)) { 1074 // Arguments are mapped to call site operands and we begin the traversal 1075 // again. 1076 bool Unused = false; 1077 RVState RVS({NewRVsMap, Unused, RetValAAIt.second}); 1078 VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS); 1079 continue; 1080 } else if (isa<CallBase>(RetVal)) { 1081 // Call sites are resolved by the callee attribute over time, no need to 1082 // do anything for us. 1083 continue; 1084 } else if (isa<Constant>(RetVal)) { 1085 // Constants are valid everywhere, we can simply take them. 1086 NewRVsMap[RetVal].insert(It.second.begin(), It.second.end()); 1087 continue; 1088 } 1089 } 1090 } 1091 1092 // To avoid modifications to the ReturnedValues map while we iterate over it 1093 // we kept record of potential new entries in a copy map, NewRVsMap. 1094 for (auto &It : NewRVsMap) { 1095 assert(!It.second.empty() && "Entry does not add anything."); 1096 auto &ReturnInsts = ReturnedValues[It.first]; 1097 for (ReturnInst *RI : It.second) 1098 if (ReturnInsts.insert(RI)) { 1099 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value " 1100 << *It.first << " => " << *RI << "\n"); 1101 Changed = true; 1102 } 1103 } 1104 1105 Changed |= (NumUnresolvedCalls != UnresolvedCalls.size()); 1106 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 1107 } 1108 1109 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { 1110 AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {} 1111 1112 /// See AbstractAttribute::trackStatistics() 1113 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } 1114 }; 1115 1116 /// Returned values information for a call sites. 1117 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { 1118 AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {} 1119 1120 /// See AbstractAttribute::initialize(...). 1121 void initialize(Attributor &A) override { 1122 // TODO: Once we have call site specific value information we can provide 1123 // call site specific liveness information and then it makes 1124 // sense to specialize attributes for call sites instead of 1125 // redirecting requests to the callee. 1126 llvm_unreachable("Abstract attributes for returned values are not " 1127 "supported for call sites yet!"); 1128 } 1129 1130 /// See AbstractAttribute::updateImpl(...). 1131 ChangeStatus updateImpl(Attributor &A) override { 1132 return indicatePessimisticFixpoint(); 1133 } 1134 1135 /// See AbstractAttribute::trackStatistics() 1136 void trackStatistics() const override {} 1137 }; 1138 1139 /// ------------------------ NoSync Function Attribute ------------------------- 1140 1141 struct AANoSyncImpl : AANoSync { 1142 AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {} 1143 1144 const std::string getAsStr() const override { 1145 return getAssumed() ? "nosync" : "may-sync"; 1146 } 1147 1148 /// See AbstractAttribute::updateImpl(...). 1149 ChangeStatus updateImpl(Attributor &A) override; 1150 1151 /// Helper function used to determine whether an instruction is non-relaxed 1152 /// atomic. In other words, if an atomic instruction does not have unordered 1153 /// or monotonic ordering 1154 static bool isNonRelaxedAtomic(Instruction *I); 1155 1156 /// Helper function used to determine whether an instruction is volatile. 1157 static bool isVolatile(Instruction *I); 1158 1159 /// Helper function uset to check if intrinsic is volatile (memcpy, memmove, 1160 /// memset). 1161 static bool isNoSyncIntrinsic(Instruction *I); 1162 }; 1163 1164 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) { 1165 if (!I->isAtomic()) 1166 return false; 1167 1168 AtomicOrdering Ordering; 1169 switch (I->getOpcode()) { 1170 case Instruction::AtomicRMW: 1171 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 1172 break; 1173 case Instruction::Store: 1174 Ordering = cast<StoreInst>(I)->getOrdering(); 1175 break; 1176 case Instruction::Load: 1177 Ordering = cast<LoadInst>(I)->getOrdering(); 1178 break; 1179 case Instruction::Fence: { 1180 auto *FI = cast<FenceInst>(I); 1181 if (FI->getSyncScopeID() == SyncScope::SingleThread) 1182 return false; 1183 Ordering = FI->getOrdering(); 1184 break; 1185 } 1186 case Instruction::AtomicCmpXchg: { 1187 AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering(); 1188 AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering(); 1189 // Only if both are relaxed, than it can be treated as relaxed. 1190 // Otherwise it is non-relaxed. 1191 if (Success != AtomicOrdering::Unordered && 1192 Success != AtomicOrdering::Monotonic) 1193 return true; 1194 if (Failure != AtomicOrdering::Unordered && 1195 Failure != AtomicOrdering::Monotonic) 1196 return true; 1197 return false; 1198 } 1199 default: 1200 llvm_unreachable( 1201 "New atomic operations need to be known in the attributor."); 1202 } 1203 1204 // Relaxed. 1205 if (Ordering == AtomicOrdering::Unordered || 1206 Ordering == AtomicOrdering::Monotonic) 1207 return false; 1208 return true; 1209 } 1210 1211 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics. 1212 /// FIXME: We should ipmrove the handling of intrinsics. 1213 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) { 1214 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 1215 switch (II->getIntrinsicID()) { 1216 /// Element wise atomic memory intrinsics are can only be unordered, 1217 /// therefore nosync. 1218 case Intrinsic::memset_element_unordered_atomic: 1219 case Intrinsic::memmove_element_unordered_atomic: 1220 case Intrinsic::memcpy_element_unordered_atomic: 1221 return true; 1222 case Intrinsic::memset: 1223 case Intrinsic::memmove: 1224 case Intrinsic::memcpy: 1225 if (!cast<MemIntrinsic>(II)->isVolatile()) 1226 return true; 1227 return false; 1228 default: 1229 return false; 1230 } 1231 } 1232 return false; 1233 } 1234 1235 bool AANoSyncImpl::isVolatile(Instruction *I) { 1236 assert(!ImmutableCallSite(I) && !isa<CallBase>(I) && 1237 "Calls should not be checked here"); 1238 1239 switch (I->getOpcode()) { 1240 case Instruction::AtomicRMW: 1241 return cast<AtomicRMWInst>(I)->isVolatile(); 1242 case Instruction::Store: 1243 return cast<StoreInst>(I)->isVolatile(); 1244 case Instruction::Load: 1245 return cast<LoadInst>(I)->isVolatile(); 1246 case Instruction::AtomicCmpXchg: 1247 return cast<AtomicCmpXchgInst>(I)->isVolatile(); 1248 default: 1249 return false; 1250 } 1251 } 1252 1253 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { 1254 1255 auto CheckRWInstForNoSync = [&](Instruction &I) { 1256 /// We are looking for volatile instructions or Non-Relaxed atomics. 1257 /// FIXME: We should ipmrove the handling of intrinsics. 1258 1259 if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I)) 1260 return true; 1261 1262 if (ImmutableCallSite ICS = ImmutableCallSite(&I)) { 1263 if (ICS.hasFnAttr(Attribute::NoSync)) 1264 return true; 1265 1266 const auto &NoSyncAA = 1267 A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS)); 1268 if (NoSyncAA.isAssumedNoSync()) 1269 return true; 1270 return false; 1271 } 1272 1273 if (!isVolatile(&I) && !isNonRelaxedAtomic(&I)) 1274 return true; 1275 1276 return false; 1277 }; 1278 1279 auto CheckForNoSync = [&](Instruction &I) { 1280 // At this point we handled all read/write effects and they are all 1281 // nosync, so they can be skipped. 1282 if (I.mayReadOrWriteMemory()) 1283 return true; 1284 1285 // non-convergent and readnone imply nosync. 1286 return !ImmutableCallSite(&I).isConvergent(); 1287 }; 1288 1289 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) || 1290 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this)) 1291 return indicatePessimisticFixpoint(); 1292 1293 return ChangeStatus::UNCHANGED; 1294 } 1295 1296 struct AANoSyncFunction final : public AANoSyncImpl { 1297 AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {} 1298 1299 /// See AbstractAttribute::trackStatistics() 1300 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } 1301 }; 1302 1303 /// NoSync attribute deduction for a call sites. 1304 struct AANoSyncCallSite final : AANoSyncImpl { 1305 AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {} 1306 1307 /// See AbstractAttribute::initialize(...). 1308 void initialize(Attributor &A) override { 1309 AANoSyncImpl::initialize(A); 1310 Function *F = getAssociatedFunction(); 1311 if (!F) 1312 indicatePessimisticFixpoint(); 1313 } 1314 1315 /// See AbstractAttribute::updateImpl(...). 1316 ChangeStatus updateImpl(Attributor &A) override { 1317 // TODO: Once we have call site specific value information we can provide 1318 // call site specific liveness information and then it makes 1319 // sense to specialize attributes for call sites arguments instead of 1320 // redirecting requests to the callee argument. 1321 Function *F = getAssociatedFunction(); 1322 const IRPosition &FnPos = IRPosition::function(*F); 1323 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos); 1324 return clampStateAndIndicateChange( 1325 getState(), static_cast<const AANoSync::StateType &>(FnAA.getState())); 1326 } 1327 1328 /// See AbstractAttribute::trackStatistics() 1329 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } 1330 }; 1331 1332 /// ------------------------ No-Free Attributes ---------------------------- 1333 1334 struct AANoFreeImpl : public AANoFree { 1335 AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {} 1336 1337 /// See AbstractAttribute::updateImpl(...). 1338 ChangeStatus updateImpl(Attributor &A) override { 1339 auto CheckForNoFree = [&](Instruction &I) { 1340 ImmutableCallSite ICS(&I); 1341 if (ICS.hasFnAttr(Attribute::NoFree)) 1342 return true; 1343 1344 const auto &NoFreeAA = 1345 A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS)); 1346 return NoFreeAA.isAssumedNoFree(); 1347 }; 1348 1349 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this)) 1350 return indicatePessimisticFixpoint(); 1351 return ChangeStatus::UNCHANGED; 1352 } 1353 1354 /// See AbstractAttribute::getAsStr(). 1355 const std::string getAsStr() const override { 1356 return getAssumed() ? "nofree" : "may-free"; 1357 } 1358 }; 1359 1360 struct AANoFreeFunction final : public AANoFreeImpl { 1361 AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {} 1362 1363 /// See AbstractAttribute::trackStatistics() 1364 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } 1365 }; 1366 1367 /// NoFree attribute deduction for a call sites. 1368 struct AANoFreeCallSite final : AANoFreeImpl { 1369 AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {} 1370 1371 /// See AbstractAttribute::initialize(...). 1372 void initialize(Attributor &A) override { 1373 AANoFreeImpl::initialize(A); 1374 Function *F = getAssociatedFunction(); 1375 if (!F) 1376 indicatePessimisticFixpoint(); 1377 } 1378 1379 /// See AbstractAttribute::updateImpl(...). 1380 ChangeStatus updateImpl(Attributor &A) override { 1381 // TODO: Once we have call site specific value information we can provide 1382 // call site specific liveness information and then it makes 1383 // sense to specialize attributes for call sites arguments instead of 1384 // redirecting requests to the callee argument. 1385 Function *F = getAssociatedFunction(); 1386 const IRPosition &FnPos = IRPosition::function(*F); 1387 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos); 1388 return clampStateAndIndicateChange( 1389 getState(), static_cast<const AANoFree::StateType &>(FnAA.getState())); 1390 } 1391 1392 /// See AbstractAttribute::trackStatistics() 1393 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } 1394 }; 1395 1396 /// ------------------------ NonNull Argument Attribute ------------------------ 1397 struct AANonNullImpl : AANonNull { 1398 AANonNullImpl(const IRPosition &IRP) : AANonNull(IRP) {} 1399 1400 /// See AbstractAttribute::initialize(...). 1401 void initialize(Attributor &A) override { 1402 if (hasAttr({Attribute::NonNull, Attribute::Dereferenceable})) 1403 indicateOptimisticFixpoint(); 1404 else 1405 AANonNull::initialize(A); 1406 } 1407 1408 /// See AbstractAttribute::getAsStr(). 1409 const std::string getAsStr() const override { 1410 return getAssumed() ? "nonnull" : "may-null"; 1411 } 1412 }; 1413 1414 /// NonNull attribute for a floating value. 1415 struct AANonNullFloating : AANonNullImpl { 1416 AANonNullFloating(const IRPosition &IRP) : AANonNullImpl(IRP) {} 1417 1418 /// See AbstractAttribute::initialize(...). 1419 void initialize(Attributor &A) override { 1420 AANonNullImpl::initialize(A); 1421 1422 if (isAtFixpoint()) 1423 return; 1424 1425 const IRPosition &IRP = getIRPosition(); 1426 const Value &V = IRP.getAssociatedValue(); 1427 const DataLayout &DL = A.getDataLayout(); 1428 1429 // TODO: This context sensitive query should be removed once we can do 1430 // context sensitive queries in the genericValueTraversal below. 1431 if (isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, IRP.getCtxI(), 1432 /* TODO: DT */ nullptr)) 1433 indicateOptimisticFixpoint(); 1434 } 1435 1436 /// See AbstractAttribute::updateImpl(...). 1437 ChangeStatus updateImpl(Attributor &A) override { 1438 const DataLayout &DL = A.getDataLayout(); 1439 1440 auto VisitValueCB = [&](Value &V, AAAlign::StateType &T, 1441 bool Stripped) -> bool { 1442 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V)); 1443 if (!Stripped && this == &AA) { 1444 if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, 1445 /* TODO: CtxI */ nullptr, 1446 /* TODO: DT */ nullptr)) 1447 T.indicatePessimisticFixpoint(); 1448 } else { 1449 // Use abstract attribute information. 1450 const AANonNull::StateType &NS = 1451 static_cast<const AANonNull::StateType &>(AA.getState()); 1452 T ^= NS; 1453 } 1454 return T.isValidState(); 1455 }; 1456 1457 StateType T; 1458 if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this, 1459 T, VisitValueCB)) 1460 return indicatePessimisticFixpoint(); 1461 1462 return clampStateAndIndicateChange(getState(), T); 1463 } 1464 1465 /// See AbstractAttribute::trackStatistics() 1466 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 1467 }; 1468 1469 /// NonNull attribute for function return value. 1470 struct AANonNullReturned final 1471 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> { 1472 AANonNullReturned(const IRPosition &IRP) 1473 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {} 1474 1475 /// See AbstractAttribute::trackStatistics() 1476 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 1477 }; 1478 1479 /// NonNull attribute for function argument. 1480 struct AANonNullArgument final 1481 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { 1482 AANonNullArgument(const IRPosition &IRP) 1483 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP) {} 1484 1485 /// See AbstractAttribute::trackStatistics() 1486 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } 1487 }; 1488 1489 struct AANonNullCallSiteArgument final : AANonNullFloating { 1490 AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {} 1491 1492 /// See AbstractAttribute::trackStatistics() 1493 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } 1494 }; 1495 1496 /// NonNull attribute for a call site return position. 1497 struct AANonNullCallSiteReturned final 1498 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { 1499 AANonNullCallSiteReturned(const IRPosition &IRP) 1500 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP) {} 1501 1502 /// See AbstractAttribute::trackStatistics() 1503 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } 1504 }; 1505 1506 /// ------------------------ No-Recurse Attributes ---------------------------- 1507 1508 struct AANoRecurseImpl : public AANoRecurse { 1509 AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {} 1510 1511 /// See AbstractAttribute::getAsStr() 1512 const std::string getAsStr() const override { 1513 return getAssumed() ? "norecurse" : "may-recurse"; 1514 } 1515 }; 1516 1517 struct AANoRecurseFunction final : AANoRecurseImpl { 1518 AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {} 1519 1520 /// See AbstractAttribute::updateImpl(...). 1521 ChangeStatus updateImpl(Attributor &A) override { 1522 // TODO: Implement this. 1523 return indicatePessimisticFixpoint(); 1524 } 1525 1526 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } 1527 }; 1528 1529 /// NoRecurse attribute deduction for a call sites. 1530 struct AANoRecurseCallSite final : AANoRecurseImpl { 1531 AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {} 1532 1533 /// See AbstractAttribute::initialize(...). 1534 void initialize(Attributor &A) override { 1535 AANoRecurseImpl::initialize(A); 1536 Function *F = getAssociatedFunction(); 1537 if (!F) 1538 indicatePessimisticFixpoint(); 1539 } 1540 1541 /// See AbstractAttribute::updateImpl(...). 1542 ChangeStatus updateImpl(Attributor &A) override { 1543 // TODO: Once we have call site specific value information we can provide 1544 // call site specific liveness information and then it makes 1545 // sense to specialize attributes for call sites arguments instead of 1546 // redirecting requests to the callee argument. 1547 Function *F = getAssociatedFunction(); 1548 const IRPosition &FnPos = IRPosition::function(*F); 1549 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos); 1550 return clampStateAndIndicateChange( 1551 getState(), 1552 static_cast<const AANoRecurse::StateType &>(FnAA.getState())); 1553 } 1554 1555 /// See AbstractAttribute::trackStatistics() 1556 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } 1557 }; 1558 1559 /// ------------------------ Will-Return Attributes ---------------------------- 1560 1561 // Helper function that checks whether a function has any cycle. 1562 // TODO: Replace with more efficent code 1563 static bool containsCycle(Function &F) { 1564 SmallPtrSet<BasicBlock *, 32> Visited; 1565 1566 // Traverse BB by dfs and check whether successor is already visited. 1567 for (BasicBlock *BB : depth_first(&F)) { 1568 Visited.insert(BB); 1569 for (auto *SuccBB : successors(BB)) { 1570 if (Visited.count(SuccBB)) 1571 return true; 1572 } 1573 } 1574 return false; 1575 } 1576 1577 // Helper function that checks the function have a loop which might become an 1578 // endless loop 1579 // FIXME: Any cycle is regarded as endless loop for now. 1580 // We have to allow some patterns. 1581 static bool containsPossiblyEndlessLoop(Function *F) { 1582 return !F || !F->hasExactDefinition() || containsCycle(*F); 1583 } 1584 1585 struct AAWillReturnImpl : public AAWillReturn { 1586 AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {} 1587 1588 /// See AbstractAttribute::initialize(...). 1589 void initialize(Attributor &A) override { 1590 AAWillReturn::initialize(A); 1591 1592 Function *F = getAssociatedFunction(); 1593 if (containsPossiblyEndlessLoop(F)) 1594 indicatePessimisticFixpoint(); 1595 } 1596 1597 /// See AbstractAttribute::updateImpl(...). 1598 ChangeStatus updateImpl(Attributor &A) override { 1599 auto CheckForWillReturn = [&](Instruction &I) { 1600 IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I)); 1601 const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos); 1602 if (WillReturnAA.isKnownWillReturn()) 1603 return true; 1604 if (!WillReturnAA.isAssumedWillReturn()) 1605 return false; 1606 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos); 1607 return NoRecurseAA.isAssumedNoRecurse(); 1608 }; 1609 1610 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this)) 1611 return indicatePessimisticFixpoint(); 1612 1613 return ChangeStatus::UNCHANGED; 1614 } 1615 1616 /// See AbstractAttribute::getAsStr() 1617 const std::string getAsStr() const override { 1618 return getAssumed() ? "willreturn" : "may-noreturn"; 1619 } 1620 }; 1621 1622 struct AAWillReturnFunction final : AAWillReturnImpl { 1623 AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {} 1624 1625 /// See AbstractAttribute::trackStatistics() 1626 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } 1627 }; 1628 1629 /// WillReturn attribute deduction for a call sites. 1630 struct AAWillReturnCallSite final : AAWillReturnImpl { 1631 AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {} 1632 1633 /// See AbstractAttribute::initialize(...). 1634 void initialize(Attributor &A) override { 1635 AAWillReturnImpl::initialize(A); 1636 Function *F = getAssociatedFunction(); 1637 if (!F) 1638 indicatePessimisticFixpoint(); 1639 } 1640 1641 /// See AbstractAttribute::updateImpl(...). 1642 ChangeStatus updateImpl(Attributor &A) override { 1643 // TODO: Once we have call site specific value information we can provide 1644 // call site specific liveness information and then it makes 1645 // sense to specialize attributes for call sites arguments instead of 1646 // redirecting requests to the callee argument. 1647 Function *F = getAssociatedFunction(); 1648 const IRPosition &FnPos = IRPosition::function(*F); 1649 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos); 1650 return clampStateAndIndicateChange( 1651 getState(), 1652 static_cast<const AAWillReturn::StateType &>(FnAA.getState())); 1653 } 1654 1655 /// See AbstractAttribute::trackStatistics() 1656 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } 1657 }; 1658 1659 /// ------------------------ NoAlias Argument Attribute ------------------------ 1660 1661 struct AANoAliasImpl : AANoAlias { 1662 AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {} 1663 1664 const std::string getAsStr() const override { 1665 return getAssumed() ? "noalias" : "may-alias"; 1666 } 1667 }; 1668 1669 /// NoAlias attribute for a floating value. 1670 struct AANoAliasFloating final : AANoAliasImpl { 1671 AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {} 1672 1673 /// See AbstractAttribute::initialize(...). 1674 void initialize(Attributor &A) override { 1675 // TODO: It isn't sound to initialize as the same with `AANoAliasImpl` 1676 // because `noalias` may not be valid in the current position. 1677 } 1678 1679 /// See AbstractAttribute::updateImpl(...). 1680 ChangeStatus updateImpl(Attributor &A) override { 1681 // TODO: Implement this. 1682 return indicatePessimisticFixpoint(); 1683 } 1684 1685 /// See AbstractAttribute::trackStatistics() 1686 void trackStatistics() const override { 1687 STATS_DECLTRACK_FLOATING_ATTR(noalias) 1688 } 1689 }; 1690 1691 /// NoAlias attribute for an argument. 1692 struct AANoAliasArgument final 1693 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { 1694 AANoAliasArgument(const IRPosition &IRP) 1695 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>(IRP) {} 1696 1697 /// See AbstractAttribute::trackStatistics() 1698 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } 1699 }; 1700 1701 struct AANoAliasCallSiteArgument final : AANoAliasImpl { 1702 AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {} 1703 1704 /// See AbstractAttribute::initialize(...). 1705 void initialize(Attributor &A) override { 1706 // See callsite argument attribute and callee argument attribute. 1707 ImmutableCallSite ICS(&getAnchorValue()); 1708 if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias)) 1709 indicateOptimisticFixpoint(); 1710 } 1711 1712 /// See AbstractAttribute::updateImpl(...). 1713 ChangeStatus updateImpl(Attributor &A) override { 1714 // TODO: Implement this. 1715 return indicatePessimisticFixpoint(); 1716 } 1717 1718 /// See AbstractAttribute::trackStatistics() 1719 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } 1720 }; 1721 1722 /// NoAlias attribute for function return value. 1723 struct AANoAliasReturned final : AANoAliasImpl { 1724 AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {} 1725 1726 /// See AbstractAttribute::updateImpl(...). 1727 virtual ChangeStatus updateImpl(Attributor &A) override { 1728 1729 auto CheckReturnValue = [&](Value &RV) -> bool { 1730 if (Constant *C = dyn_cast<Constant>(&RV)) 1731 if (C->isNullValue() || isa<UndefValue>(C)) 1732 return true; 1733 1734 /// For now, we can only deduce noalias if we have call sites. 1735 /// FIXME: add more support. 1736 ImmutableCallSite ICS(&RV); 1737 if (!ICS) 1738 return false; 1739 1740 const IRPosition &RVPos = IRPosition::value(RV); 1741 const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos); 1742 if (!NoAliasAA.isAssumedNoAlias()) 1743 return false; 1744 1745 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos); 1746 return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); 1747 }; 1748 1749 if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) 1750 return indicatePessimisticFixpoint(); 1751 1752 return ChangeStatus::UNCHANGED; 1753 } 1754 1755 /// See AbstractAttribute::trackStatistics() 1756 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } 1757 }; 1758 1759 /// NoAlias attribute deduction for a call site return value. 1760 struct AANoAliasCallSiteReturned final : AANoAliasImpl { 1761 AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {} 1762 1763 /// See AbstractAttribute::initialize(...). 1764 void initialize(Attributor &A) override { 1765 AANoAliasImpl::initialize(A); 1766 Function *F = getAssociatedFunction(); 1767 if (!F) 1768 indicatePessimisticFixpoint(); 1769 } 1770 1771 /// See AbstractAttribute::updateImpl(...). 1772 ChangeStatus updateImpl(Attributor &A) override { 1773 // TODO: Once we have call site specific value information we can provide 1774 // call site specific liveness information and then it makes 1775 // sense to specialize attributes for call sites arguments instead of 1776 // redirecting requests to the callee argument. 1777 Function *F = getAssociatedFunction(); 1778 const IRPosition &FnPos = IRPosition::returned(*F); 1779 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos); 1780 return clampStateAndIndicateChange( 1781 getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState())); 1782 } 1783 1784 /// See AbstractAttribute::trackStatistics() 1785 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } 1786 }; 1787 1788 /// -------------------AAIsDead Function Attribute----------------------- 1789 1790 struct AAIsDeadImpl : public AAIsDead { 1791 AAIsDeadImpl(const IRPosition &IRP) : AAIsDead(IRP) {} 1792 1793 void initialize(Attributor &A) override { 1794 const Function *F = getAssociatedFunction(); 1795 if (F && !F->isDeclaration()) 1796 exploreFromEntry(A, F); 1797 } 1798 1799 void exploreFromEntry(Attributor &A, const Function *F) { 1800 ToBeExploredPaths.insert(&(F->getEntryBlock().front())); 1801 assumeLive(A, F->getEntryBlock()); 1802 1803 for (size_t i = 0; i < ToBeExploredPaths.size(); ++i) 1804 if (const Instruction *NextNoReturnI = 1805 findNextNoReturn(A, ToBeExploredPaths[i])) 1806 NoReturnCalls.insert(NextNoReturnI); 1807 } 1808 1809 /// Find the next assumed noreturn instruction in the block of \p I starting 1810 /// from, thus including, \p I. 1811 /// 1812 /// The caller is responsible to monitor the ToBeExploredPaths set as new 1813 /// instructions discovered in other basic block will be placed in there. 1814 /// 1815 /// \returns The next assumed noreturn instructions in the block of \p I 1816 /// starting from, thus including, \p I. 1817 const Instruction *findNextNoReturn(Attributor &A, const Instruction *I); 1818 1819 /// See AbstractAttribute::getAsStr(). 1820 const std::string getAsStr() const override { 1821 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + 1822 std::to_string(getAssociatedFunction()->size()) + "][#NRI " + 1823 std::to_string(NoReturnCalls.size()) + "]"; 1824 } 1825 1826 /// See AbstractAttribute::manifest(...). 1827 ChangeStatus manifest(Attributor &A) override { 1828 assert(getState().isValidState() && 1829 "Attempted to manifest an invalid state!"); 1830 1831 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 1832 Function &F = *getAssociatedFunction(); 1833 1834 if (AssumedLiveBlocks.empty()) { 1835 A.deleteAfterManifest(F); 1836 return ChangeStatus::CHANGED; 1837 } 1838 1839 // Flag to determine if we can change an invoke to a call assuming the 1840 // callee is nounwind. This is not possible if the personality of the 1841 // function allows to catch asynchronous exceptions. 1842 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 1843 1844 for (const Instruction *NRC : NoReturnCalls) { 1845 Instruction *I = const_cast<Instruction *>(NRC); 1846 BasicBlock *BB = I->getParent(); 1847 Instruction *SplitPos = I->getNextNode(); 1848 // TODO: mark stuff before unreachable instructions as dead. 1849 if (isa_and_nonnull<UnreachableInst>(SplitPos)) 1850 continue; 1851 1852 if (auto *II = dyn_cast<InvokeInst>(I)) { 1853 // If we keep the invoke the split position is at the beginning of the 1854 // normal desitination block (it invokes a noreturn function after all). 1855 BasicBlock *NormalDestBB = II->getNormalDest(); 1856 SplitPos = &NormalDestBB->front(); 1857 1858 /// Invoke is replaced with a call and unreachable is placed after it if 1859 /// the callee is nounwind and noreturn. Otherwise, we keep the invoke 1860 /// and only place an unreachable in the normal successor. 1861 if (Invoke2CallAllowed) { 1862 if (II->getCalledFunction()) { 1863 const IRPosition &IPos = IRPosition::callsite_function(*II); 1864 const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos); 1865 if (AANoUnw.isAssumedNoUnwind()) { 1866 LLVM_DEBUG(dbgs() 1867 << "[AAIsDead] Replace invoke with call inst\n"); 1868 // We do not need an invoke (II) but instead want a call followed 1869 // by an unreachable. However, we do not remove II as other 1870 // abstract attributes might have it cached as part of their 1871 // results. Given that we modify the CFG anyway, we simply keep II 1872 // around but in a new dead block. To avoid II being live through 1873 // a different edge we have to ensure the block we place it in is 1874 // only reached from the current block of II and then not reached 1875 // at all when we insert the unreachable. 1876 SplitBlockPredecessors(NormalDestBB, {BB}, ".i2c"); 1877 CallInst *CI = createCallMatchingInvoke(II); 1878 CI->insertBefore(II); 1879 CI->takeName(II); 1880 II->replaceAllUsesWith(CI); 1881 SplitPos = CI->getNextNode(); 1882 } 1883 } 1884 } 1885 1886 if (SplitPos == &NormalDestBB->front()) { 1887 // If this is an invoke of a noreturn function the edge to the normal 1888 // destination block is dead but not necessarily the block itself. 1889 // TODO: We need to move to an edge based system during deduction and 1890 // also manifest. 1891 assert(!NormalDestBB->isLandingPad() && 1892 "Expected the normal destination not to be a landingpad!"); 1893 BasicBlock *SplitBB = 1894 SplitBlockPredecessors(NormalDestBB, {BB}, ".dead"); 1895 // The split block is live even if it contains only an unreachable 1896 // instruction at the end. 1897 assumeLive(A, *SplitBB); 1898 SplitPos = SplitBB->getTerminator(); 1899 } 1900 } 1901 1902 BB = SplitPos->getParent(); 1903 SplitBlock(BB, SplitPos); 1904 changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false); 1905 HasChanged = ChangeStatus::CHANGED; 1906 } 1907 1908 for (BasicBlock &BB : F) 1909 if (!AssumedLiveBlocks.count(&BB)) 1910 A.deleteAfterManifest(BB); 1911 1912 return HasChanged; 1913 } 1914 1915 /// See AbstractAttribute::updateImpl(...). 1916 ChangeStatus updateImpl(Attributor &A) override; 1917 1918 /// See AAIsDead::isAssumedDead(BasicBlock *). 1919 bool isAssumedDead(const BasicBlock *BB) const override { 1920 assert(BB->getParent() == getAssociatedFunction() && 1921 "BB must be in the same anchor scope function."); 1922 1923 if (!getAssumed()) 1924 return false; 1925 return !AssumedLiveBlocks.count(BB); 1926 } 1927 1928 /// See AAIsDead::isKnownDead(BasicBlock *). 1929 bool isKnownDead(const BasicBlock *BB) const override { 1930 return getKnown() && isAssumedDead(BB); 1931 } 1932 1933 /// See AAIsDead::isAssumed(Instruction *I). 1934 bool isAssumedDead(const Instruction *I) const override { 1935 assert(I->getParent()->getParent() == getAssociatedFunction() && 1936 "Instruction must be in the same anchor scope function."); 1937 1938 if (!getAssumed()) 1939 return false; 1940 1941 // If it is not in AssumedLiveBlocks then it for sure dead. 1942 // Otherwise, it can still be after noreturn call in a live block. 1943 if (!AssumedLiveBlocks.count(I->getParent())) 1944 return true; 1945 1946 // If it is not after a noreturn call, than it is live. 1947 return isAfterNoReturn(I); 1948 } 1949 1950 /// See AAIsDead::isKnownDead(Instruction *I). 1951 bool isKnownDead(const Instruction *I) const override { 1952 return getKnown() && isAssumedDead(I); 1953 } 1954 1955 /// Check if instruction is after noreturn call, in other words, assumed dead. 1956 bool isAfterNoReturn(const Instruction *I) const; 1957 1958 /// Determine if \p F might catch asynchronous exceptions. 1959 static bool mayCatchAsynchronousExceptions(const Function &F) { 1960 return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F); 1961 } 1962 1963 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A 1964 /// that internal function called from \p BB should now be looked at. 1965 void assumeLive(Attributor &A, const BasicBlock &BB) { 1966 if (!AssumedLiveBlocks.insert(&BB).second) 1967 return; 1968 1969 // We assume that all of BB is (probably) live now and if there are calls to 1970 // internal functions we will assume that those are now live as well. This 1971 // is a performance optimization for blocks with calls to a lot of internal 1972 // functions. It can however cause dead functions to be treated as live. 1973 for (const Instruction &I : BB) 1974 if (ImmutableCallSite ICS = ImmutableCallSite(&I)) 1975 if (const Function *F = ICS.getCalledFunction()) 1976 if (F->hasInternalLinkage()) 1977 A.markLiveInternalFunction(*F); 1978 } 1979 1980 /// Collection of to be explored paths. 1981 SmallSetVector<const Instruction *, 8> ToBeExploredPaths; 1982 1983 /// Collection of all assumed live BasicBlocks. 1984 DenseSet<const BasicBlock *> AssumedLiveBlocks; 1985 1986 /// Collection of calls with noreturn attribute, assumed or knwon. 1987 SmallSetVector<const Instruction *, 4> NoReturnCalls; 1988 }; 1989 1990 struct AAIsDeadFunction final : public AAIsDeadImpl { 1991 AAIsDeadFunction(const IRPosition &IRP) : AAIsDeadImpl(IRP) {} 1992 1993 /// See AbstractAttribute::trackStatistics() 1994 void trackStatistics() const override { 1995 STATS_DECL(PartiallyDeadBlocks, Function, 1996 "Number of basic blocks classified as partially dead"); 1997 BUILD_STAT_NAME(PartiallyDeadBlocks, Function) += NoReturnCalls.size(); 1998 } 1999 }; 2000 2001 bool AAIsDeadImpl::isAfterNoReturn(const Instruction *I) const { 2002 const Instruction *PrevI = I->getPrevNode(); 2003 while (PrevI) { 2004 if (NoReturnCalls.count(PrevI)) 2005 return true; 2006 PrevI = PrevI->getPrevNode(); 2007 } 2008 return false; 2009 } 2010 2011 const Instruction *AAIsDeadImpl::findNextNoReturn(Attributor &A, 2012 const Instruction *I) { 2013 const BasicBlock *BB = I->getParent(); 2014 const Function &F = *BB->getParent(); 2015 2016 // Flag to determine if we can change an invoke to a call assuming the callee 2017 // is nounwind. This is not possible if the personality of the function allows 2018 // to catch asynchronous exceptions. 2019 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 2020 2021 // TODO: We should have a function that determines if an "edge" is dead. 2022 // Edges could be from an instruction to the next or from a terminator 2023 // to the successor. For now, we need to special case the unwind block 2024 // of InvokeInst below. 2025 2026 while (I) { 2027 ImmutableCallSite ICS(I); 2028 2029 if (ICS) { 2030 const IRPosition &IPos = IRPosition::callsite_function(ICS); 2031 // Regarless of the no-return property of an invoke instruction we only 2032 // learn that the regular successor is not reachable through this 2033 // instruction but the unwind block might still be. 2034 if (auto *Invoke = dyn_cast<InvokeInst>(I)) { 2035 // Use nounwind to justify the unwind block is dead as well. 2036 const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos); 2037 if (!Invoke2CallAllowed || !AANoUnw.isAssumedNoUnwind()) { 2038 assumeLive(A, *Invoke->getUnwindDest()); 2039 ToBeExploredPaths.insert(&Invoke->getUnwindDest()->front()); 2040 } 2041 } 2042 2043 const auto &NoReturnAA = A.getAAFor<AANoReturn>(*this, IPos); 2044 if (NoReturnAA.isAssumedNoReturn()) 2045 return I; 2046 } 2047 2048 I = I->getNextNode(); 2049 } 2050 2051 // get new paths (reachable blocks). 2052 for (const BasicBlock *SuccBB : successors(BB)) { 2053 assumeLive(A, *SuccBB); 2054 ToBeExploredPaths.insert(&SuccBB->front()); 2055 } 2056 2057 // No noreturn instruction found. 2058 return nullptr; 2059 } 2060 2061 ChangeStatus AAIsDeadImpl::updateImpl(Attributor &A) { 2062 ChangeStatus Status = ChangeStatus::UNCHANGED; 2063 2064 // Temporary collection to iterate over existing noreturn instructions. This 2065 // will alow easier modification of NoReturnCalls collection 2066 SmallVector<const Instruction *, 8> NoReturnChanged; 2067 2068 for (const Instruction *I : NoReturnCalls) 2069 NoReturnChanged.push_back(I); 2070 2071 for (const Instruction *I : NoReturnChanged) { 2072 size_t Size = ToBeExploredPaths.size(); 2073 2074 const Instruction *NextNoReturnI = findNextNoReturn(A, I); 2075 if (NextNoReturnI != I) { 2076 Status = ChangeStatus::CHANGED; 2077 NoReturnCalls.remove(I); 2078 if (NextNoReturnI) 2079 NoReturnCalls.insert(NextNoReturnI); 2080 } 2081 2082 // Explore new paths. 2083 while (Size != ToBeExploredPaths.size()) { 2084 Status = ChangeStatus::CHANGED; 2085 if (const Instruction *NextNoReturnI = 2086 findNextNoReturn(A, ToBeExploredPaths[Size++])) 2087 NoReturnCalls.insert(NextNoReturnI); 2088 } 2089 } 2090 2091 LLVM_DEBUG(dbgs() << "[AAIsDead] AssumedLiveBlocks: " 2092 << AssumedLiveBlocks.size() << " Total number of blocks: " 2093 << getAssociatedFunction()->size() << "\n"); 2094 2095 // If we know everything is live there is no need to query for liveness. 2096 if (NoReturnCalls.empty() && 2097 getAssociatedFunction()->size() == AssumedLiveBlocks.size()) { 2098 // Indicating a pessimistic fixpoint will cause the state to be "invalid" 2099 // which will cause the Attributor to not return the AAIsDead on request, 2100 // which will prevent us from querying isAssumedDead(). 2101 indicatePessimisticFixpoint(); 2102 assert(!isValidState() && "Expected an invalid state!"); 2103 Status = ChangeStatus::CHANGED; 2104 } 2105 2106 return Status; 2107 } 2108 2109 /// Liveness information for a call sites. 2110 struct AAIsDeadCallSite final : AAIsDeadImpl { 2111 AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadImpl(IRP) {} 2112 2113 /// See AbstractAttribute::initialize(...). 2114 void initialize(Attributor &A) override { 2115 // TODO: Once we have call site specific value information we can provide 2116 // call site specific liveness information and then it makes 2117 // sense to specialize attributes for call sites instead of 2118 // redirecting requests to the callee. 2119 llvm_unreachable("Abstract attributes for liveness are not " 2120 "supported for call sites yet!"); 2121 } 2122 2123 /// See AbstractAttribute::updateImpl(...). 2124 ChangeStatus updateImpl(Attributor &A) override { 2125 return indicatePessimisticFixpoint(); 2126 } 2127 2128 /// See AbstractAttribute::trackStatistics() 2129 void trackStatistics() const override {} 2130 }; 2131 2132 /// -------------------- Dereferenceable Argument Attribute -------------------- 2133 2134 template <> 2135 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, 2136 const DerefState &R) { 2137 ChangeStatus CS0 = clampStateAndIndicateChange<IntegerState>( 2138 S.DerefBytesState, R.DerefBytesState); 2139 ChangeStatus CS1 = 2140 clampStateAndIndicateChange<IntegerState>(S.GlobalState, R.GlobalState); 2141 return CS0 | CS1; 2142 } 2143 2144 struct AADereferenceableImpl : AADereferenceable { 2145 AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {} 2146 using StateType = DerefState; 2147 2148 void initialize(Attributor &A) override { 2149 SmallVector<Attribute, 4> Attrs; 2150 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, 2151 Attrs); 2152 for (const Attribute &Attr : Attrs) 2153 takeKnownDerefBytesMaximum(Attr.getValueAsInt()); 2154 2155 NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition()); 2156 2157 const IRPosition &IRP = this->getIRPosition(); 2158 bool IsFnInterface = IRP.isFnInterfaceKind(); 2159 const Function *FnScope = IRP.getAnchorScope(); 2160 if (IsFnInterface && (!FnScope || !FnScope->hasExactDefinition())) 2161 indicatePessimisticFixpoint(); 2162 } 2163 2164 /// See AbstractAttribute::getState() 2165 /// { 2166 StateType &getState() override { return *this; } 2167 const StateType &getState() const override { return *this; } 2168 /// } 2169 2170 void getDeducedAttributes(LLVMContext &Ctx, 2171 SmallVectorImpl<Attribute> &Attrs) const override { 2172 // TODO: Add *_globally support 2173 if (isAssumedNonNull()) 2174 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 2175 Ctx, getAssumedDereferenceableBytes())); 2176 else 2177 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 2178 Ctx, getAssumedDereferenceableBytes())); 2179 } 2180 2181 /// See AbstractAttribute::getAsStr(). 2182 const std::string getAsStr() const override { 2183 if (!getAssumedDereferenceableBytes()) 2184 return "unknown-dereferenceable"; 2185 return std::string("dereferenceable") + 2186 (isAssumedNonNull() ? "" : "_or_null") + 2187 (isAssumedGlobal() ? "_globally" : "") + "<" + 2188 std::to_string(getKnownDereferenceableBytes()) + "-" + 2189 std::to_string(getAssumedDereferenceableBytes()) + ">"; 2190 } 2191 }; 2192 2193 /// Dereferenceable attribute for a floating value. 2194 struct AADereferenceableFloating : AADereferenceableImpl { 2195 AADereferenceableFloating(const IRPosition &IRP) 2196 : AADereferenceableImpl(IRP) {} 2197 2198 /// See AbstractAttribute::updateImpl(...). 2199 ChangeStatus updateImpl(Attributor &A) override { 2200 const DataLayout &DL = A.getDataLayout(); 2201 2202 auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool { 2203 unsigned IdxWidth = 2204 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 2205 APInt Offset(IdxWidth, 0); 2206 const Value *Base = 2207 V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset); 2208 2209 const auto &AA = 2210 A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base)); 2211 int64_t DerefBytes = 0; 2212 if (!Stripped && this == &AA) { 2213 // Use IR information if we did not strip anything. 2214 // TODO: track globally. 2215 bool CanBeNull; 2216 DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull); 2217 T.GlobalState.indicatePessimisticFixpoint(); 2218 } else { 2219 const DerefState &DS = static_cast<const DerefState &>(AA.getState()); 2220 DerefBytes = DS.DerefBytesState.getAssumed(); 2221 T.GlobalState &= DS.GlobalState; 2222 } 2223 2224 // For now we do not try to "increase" dereferenceability due to negative 2225 // indices as we first have to come up with code to deal with loops and 2226 // for overflows of the dereferenceable bytes. 2227 int64_t OffsetSExt = Offset.getSExtValue(); 2228 if (OffsetSExt < 0) 2229 Offset = 0; 2230 2231 T.takeAssumedDerefBytesMinimum( 2232 std::max(int64_t(0), DerefBytes - OffsetSExt)); 2233 2234 if (this == &AA) { 2235 if (!Stripped) { 2236 // If nothing was stripped IR information is all we got. 2237 T.takeKnownDerefBytesMaximum( 2238 std::max(int64_t(0), DerefBytes - OffsetSExt)); 2239 T.indicatePessimisticFixpoint(); 2240 } else if (OffsetSExt > 0) { 2241 // If something was stripped but there is circular reasoning we look 2242 // for the offset. If it is positive we basically decrease the 2243 // dereferenceable bytes in a circluar loop now, which will simply 2244 // drive them down to the known value in a very slow way which we 2245 // can accelerate. 2246 T.indicatePessimisticFixpoint(); 2247 } 2248 } 2249 2250 return T.isValidState(); 2251 }; 2252 2253 DerefState T; 2254 if (!genericValueTraversal<AADereferenceable, DerefState>( 2255 A, getIRPosition(), *this, T, VisitValueCB)) 2256 return indicatePessimisticFixpoint(); 2257 2258 return clampStateAndIndicateChange(getState(), T); 2259 } 2260 2261 /// See AbstractAttribute::trackStatistics() 2262 void trackStatistics() const override { 2263 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) 2264 } 2265 }; 2266 2267 /// Dereferenceable attribute for a return value. 2268 struct AADereferenceableReturned final 2269 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl, 2270 DerefState> { 2271 AADereferenceableReturned(const IRPosition &IRP) 2272 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl, 2273 DerefState>(IRP) {} 2274 2275 /// See AbstractAttribute::trackStatistics() 2276 void trackStatistics() const override { 2277 STATS_DECLTRACK_FNRET_ATTR(dereferenceable) 2278 } 2279 }; 2280 2281 /// Dereferenceable attribute for an argument 2282 struct AADereferenceableArgument final 2283 : AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl, 2284 DerefState> { 2285 AADereferenceableArgument(const IRPosition &IRP) 2286 : AAArgumentFromCallSiteArguments<AADereferenceable, 2287 AADereferenceableImpl, DerefState>( 2288 IRP) {} 2289 2290 /// See AbstractAttribute::trackStatistics() 2291 void trackStatistics() const override { 2292 STATS_DECLTRACK_ARG_ATTR(dereferenceable) 2293 } 2294 }; 2295 2296 /// Dereferenceable attribute for a call site argument. 2297 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { 2298 AADereferenceableCallSiteArgument(const IRPosition &IRP) 2299 : AADereferenceableFloating(IRP) {} 2300 2301 /// See AbstractAttribute::trackStatistics() 2302 void trackStatistics() const override { 2303 STATS_DECLTRACK_CSARG_ATTR(dereferenceable) 2304 } 2305 }; 2306 2307 /// Dereferenceable attribute deduction for a call site return value. 2308 struct AADereferenceableCallSiteReturned final : AADereferenceableImpl { 2309 AADereferenceableCallSiteReturned(const IRPosition &IRP) 2310 : AADereferenceableImpl(IRP) {} 2311 2312 /// See AbstractAttribute::initialize(...). 2313 void initialize(Attributor &A) override { 2314 AADereferenceableImpl::initialize(A); 2315 Function *F = getAssociatedFunction(); 2316 if (!F) 2317 indicatePessimisticFixpoint(); 2318 } 2319 2320 /// See AbstractAttribute::updateImpl(...). 2321 ChangeStatus updateImpl(Attributor &A) override { 2322 // TODO: Once we have call site specific value information we can provide 2323 // call site specific liveness information and then it makes 2324 // sense to specialize attributes for call sites arguments instead of 2325 // redirecting requests to the callee argument. 2326 Function *F = getAssociatedFunction(); 2327 const IRPosition &FnPos = IRPosition::returned(*F); 2328 auto &FnAA = A.getAAFor<AADereferenceable>(*this, FnPos); 2329 return clampStateAndIndicateChange( 2330 getState(), static_cast<const DerefState &>(FnAA.getState())); 2331 } 2332 2333 /// See AbstractAttribute::trackStatistics() 2334 void trackStatistics() const override { 2335 STATS_DECLTRACK_CS_ATTR(dereferenceable); 2336 } 2337 }; 2338 2339 // ------------------------ Align Argument Attribute ------------------------ 2340 2341 struct AAAlignImpl : AAAlign { 2342 AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {} 2343 2344 // Max alignemnt value allowed in IR 2345 static const unsigned MAX_ALIGN = 1U << 29; 2346 2347 /// See AbstractAttribute::initialize(...). 2348 void initialize(Attributor &A) override { 2349 takeAssumedMinimum(MAX_ALIGN); 2350 2351 SmallVector<Attribute, 4> Attrs; 2352 getAttrs({Attribute::Alignment}, Attrs); 2353 for (const Attribute &Attr : Attrs) 2354 takeKnownMaximum(Attr.getValueAsInt()); 2355 2356 if (getIRPosition().isFnInterfaceKind() && 2357 (!getAssociatedFunction() || 2358 !getAssociatedFunction()->hasExactDefinition())) 2359 indicatePessimisticFixpoint(); 2360 } 2361 2362 /// See AbstractAttribute::manifest(...). 2363 ChangeStatus manifest(Attributor &A) override { 2364 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2365 2366 // Check for users that allow alignment annotations. 2367 Value &AnchorVal = getIRPosition().getAnchorValue(); 2368 for (const Use &U : AnchorVal.uses()) { 2369 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { 2370 if (SI->getPointerOperand() == &AnchorVal) 2371 if (SI->getAlignment() < getAssumedAlign()) { 2372 STATS_DECLTRACK(AAAlign, Store, 2373 "Number of times alignemnt added to a store"); 2374 SI->setAlignment(getAssumedAlign()); 2375 Changed = ChangeStatus::CHANGED; 2376 } 2377 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { 2378 if (LI->getPointerOperand() == &AnchorVal) 2379 if (LI->getAlignment() < getAssumedAlign()) { 2380 LI->setAlignment(getAssumedAlign()); 2381 STATS_DECLTRACK(AAAlign, Load, 2382 "Number of times alignemnt added to a load"); 2383 Changed = ChangeStatus::CHANGED; 2384 } 2385 } 2386 } 2387 2388 return AAAlign::manifest(A) | Changed; 2389 } 2390 2391 // TODO: Provide a helper to determine the implied ABI alignment and check in 2392 // the existing manifest method and a new one for AAAlignImpl that value 2393 // to avoid making the alignment explicit if it did not improve. 2394 2395 /// See AbstractAttribute::getDeducedAttributes 2396 virtual void 2397 getDeducedAttributes(LLVMContext &Ctx, 2398 SmallVectorImpl<Attribute> &Attrs) const override { 2399 if (getAssumedAlign() > 1) 2400 Attrs.emplace_back(Attribute::getWithAlignment(Ctx, getAssumedAlign())); 2401 } 2402 2403 /// See AbstractAttribute::getAsStr(). 2404 const std::string getAsStr() const override { 2405 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + 2406 "-" + std::to_string(getAssumedAlign()) + ">") 2407 : "unknown-align"; 2408 } 2409 }; 2410 2411 /// Align attribute for a floating value. 2412 struct AAAlignFloating : AAAlignImpl { 2413 AAAlignFloating(const IRPosition &IRP) : AAAlignImpl(IRP) {} 2414 2415 /// See AbstractAttribute::updateImpl(...). 2416 ChangeStatus updateImpl(Attributor &A) override { 2417 const DataLayout &DL = A.getDataLayout(); 2418 2419 auto VisitValueCB = [&](Value &V, AAAlign::StateType &T, 2420 bool Stripped) -> bool { 2421 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V)); 2422 if (!Stripped && this == &AA) { 2423 // Use only IR information if we did not strip anything. 2424 T.takeKnownMaximum(V.getPointerAlignment(DL)); 2425 T.indicatePessimisticFixpoint(); 2426 } else { 2427 // Use abstract attribute information. 2428 const AAAlign::StateType &DS = 2429 static_cast<const AAAlign::StateType &>(AA.getState()); 2430 T ^= DS; 2431 } 2432 return T.isValidState(); 2433 }; 2434 2435 StateType T; 2436 if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T, 2437 VisitValueCB)) 2438 return indicatePessimisticFixpoint(); 2439 2440 // TODO: If we know we visited all incoming values, thus no are assumed 2441 // dead, we can take the known information from the state T. 2442 return clampStateAndIndicateChange(getState(), T); 2443 } 2444 2445 /// See AbstractAttribute::trackStatistics() 2446 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } 2447 }; 2448 2449 /// Align attribute for function return value. 2450 struct AAAlignReturned final 2451 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { 2452 AAAlignReturned(const IRPosition &IRP) 2453 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {} 2454 2455 /// See AbstractAttribute::trackStatistics() 2456 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } 2457 }; 2458 2459 /// Align attribute for function argument. 2460 struct AAAlignArgument final 2461 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { 2462 AAAlignArgument(const IRPosition &IRP) 2463 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>(IRP) {} 2464 2465 /// See AbstractAttribute::trackStatistics() 2466 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } 2467 }; 2468 2469 struct AAAlignCallSiteArgument final : AAAlignFloating { 2470 AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {} 2471 2472 /// See AbstractAttribute::manifest(...). 2473 ChangeStatus manifest(Attributor &A) override { 2474 return AAAlignImpl::manifest(A); 2475 } 2476 2477 /// See AbstractAttribute::trackStatistics() 2478 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } 2479 }; 2480 2481 /// Align attribute deduction for a call site return value. 2482 struct AAAlignCallSiteReturned final : AAAlignImpl { 2483 AAAlignCallSiteReturned(const IRPosition &IRP) : AAAlignImpl(IRP) {} 2484 2485 /// See AbstractAttribute::initialize(...). 2486 void initialize(Attributor &A) override { 2487 AAAlignImpl::initialize(A); 2488 Function *F = getAssociatedFunction(); 2489 if (!F) 2490 indicatePessimisticFixpoint(); 2491 } 2492 2493 /// See AbstractAttribute::updateImpl(...). 2494 ChangeStatus updateImpl(Attributor &A) override { 2495 // TODO: Once we have call site specific value information we can provide 2496 // call site specific liveness information and then it makes 2497 // sense to specialize attributes for call sites arguments instead of 2498 // redirecting requests to the callee argument. 2499 Function *F = getAssociatedFunction(); 2500 const IRPosition &FnPos = IRPosition::returned(*F); 2501 auto &FnAA = A.getAAFor<AAAlign>(*this, FnPos); 2502 return clampStateAndIndicateChange( 2503 getState(), static_cast<const AAAlign::StateType &>(FnAA.getState())); 2504 } 2505 2506 /// See AbstractAttribute::trackStatistics() 2507 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } 2508 }; 2509 2510 /// ------------------ Function No-Return Attribute ---------------------------- 2511 struct AANoReturnImpl : public AANoReturn { 2512 AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {} 2513 2514 /// See AbstractAttribute::getAsStr(). 2515 const std::string getAsStr() const override { 2516 return getAssumed() ? "noreturn" : "may-return"; 2517 } 2518 2519 /// See AbstractAttribute::updateImpl(Attributor &A). 2520 virtual ChangeStatus updateImpl(Attributor &A) override { 2521 auto CheckForNoReturn = [](Instruction &) { return false; }; 2522 if (!A.checkForAllInstructions(CheckForNoReturn, *this, 2523 {(unsigned)Instruction::Ret})) 2524 return indicatePessimisticFixpoint(); 2525 return ChangeStatus::UNCHANGED; 2526 } 2527 }; 2528 2529 struct AANoReturnFunction final : AANoReturnImpl { 2530 AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {} 2531 2532 /// See AbstractAttribute::trackStatistics() 2533 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } 2534 }; 2535 2536 /// NoReturn attribute deduction for a call sites. 2537 struct AANoReturnCallSite final : AANoReturnImpl { 2538 AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {} 2539 2540 /// See AbstractAttribute::initialize(...). 2541 void initialize(Attributor &A) override { 2542 AANoReturnImpl::initialize(A); 2543 Function *F = getAssociatedFunction(); 2544 if (!F) 2545 indicatePessimisticFixpoint(); 2546 } 2547 2548 /// See AbstractAttribute::updateImpl(...). 2549 ChangeStatus updateImpl(Attributor &A) override { 2550 // TODO: Once we have call site specific value information we can provide 2551 // call site specific liveness information and then it makes 2552 // sense to specialize attributes for call sites arguments instead of 2553 // redirecting requests to the callee argument. 2554 Function *F = getAssociatedFunction(); 2555 const IRPosition &FnPos = IRPosition::function(*F); 2556 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos); 2557 return clampStateAndIndicateChange( 2558 getState(), 2559 static_cast<const AANoReturn::StateType &>(FnAA.getState())); 2560 } 2561 2562 /// See AbstractAttribute::trackStatistics() 2563 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } 2564 }; 2565 2566 /// ----------------------- Variable Capturing --------------------------------- 2567 2568 /// A class to hold the state of for no-capture attributes. 2569 struct AANoCaptureImpl : public AANoCapture { 2570 AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {} 2571 2572 /// See AbstractAttribute::initialize(...). 2573 void initialize(Attributor &A) override { 2574 AANoCapture::initialize(A); 2575 2576 const IRPosition &IRP = getIRPosition(); 2577 const Function *F = 2578 getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 2579 2580 // Check what state the associated function can actually capture. 2581 if (F) 2582 determineFunctionCaptureCapabilities(*F, *this); 2583 else 2584 indicatePessimisticFixpoint(); 2585 } 2586 2587 /// See AbstractAttribute::updateImpl(...). 2588 ChangeStatus updateImpl(Attributor &A) override; 2589 2590 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). 2591 virtual void 2592 getDeducedAttributes(LLVMContext &Ctx, 2593 SmallVectorImpl<Attribute> &Attrs) const override { 2594 if (!isAssumedNoCaptureMaybeReturned()) 2595 return; 2596 2597 if (isAssumedNoCapture()) 2598 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); 2599 else if (ManifestInternal) 2600 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); 2601 } 2602 2603 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known 2604 /// depending on the ability of the function associated with \p IRP to capture 2605 /// state in memory and through "returning/throwing", respectively. 2606 static void determineFunctionCaptureCapabilities(const Function &F, 2607 IntegerState &State) { 2608 // TODO: Once we have memory behavior attributes we should use them here. 2609 2610 // If we know we cannot communicate or write to memory, we do not care about 2611 // ptr2int anymore. 2612 if (F.onlyReadsMemory() && F.doesNotThrow() && 2613 F.getReturnType()->isVoidTy()) { 2614 State.addKnownBits(NO_CAPTURE); 2615 return; 2616 } 2617 2618 // A function cannot capture state in memory if it only reads memory, it can 2619 // however return/throw state and the state might be influenced by the 2620 // pointer value, e.g., loading from a returned pointer might reveal a bit. 2621 if (F.onlyReadsMemory()) 2622 State.addKnownBits(NOT_CAPTURED_IN_MEM); 2623 2624 // A function cannot communicate state back if it does not through 2625 // exceptions and doesn not return values. 2626 if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) 2627 State.addKnownBits(NOT_CAPTURED_IN_RET); 2628 } 2629 2630 /// See AbstractState::getAsStr(). 2631 const std::string getAsStr() const override { 2632 if (isKnownNoCapture()) 2633 return "known not-captured"; 2634 if (isAssumedNoCapture()) 2635 return "assumed not-captured"; 2636 if (isKnownNoCaptureMaybeReturned()) 2637 return "known not-captured-maybe-returned"; 2638 if (isAssumedNoCaptureMaybeReturned()) 2639 return "assumed not-captured-maybe-returned"; 2640 return "assumed-captured"; 2641 } 2642 }; 2643 2644 /// Attributor-aware capture tracker. 2645 struct AACaptureUseTracker final : public CaptureTracker { 2646 2647 /// Create a capture tracker that can lookup in-flight abstract attributes 2648 /// through the Attributor \p A. 2649 /// 2650 /// If a use leads to a potential capture, \p CapturedInMemory is set and the 2651 /// search is stopped. If a use leads to a return instruction, 2652 /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed. 2653 /// If a use leads to a ptr2int which may capture the value, 2654 /// \p CapturedInInteger is set. If a use is found that is currently assumed 2655 /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies 2656 /// set. All values in \p PotentialCopies are later tracked as well. For every 2657 /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0, 2658 /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger 2659 /// conservatively set to true. 2660 AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA, 2661 const AAIsDead &IsDeadAA, IntegerState &State, 2662 SmallVectorImpl<const Value *> &PotentialCopies, 2663 unsigned &RemainingUsesToExplore) 2664 : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State), 2665 PotentialCopies(PotentialCopies), 2666 RemainingUsesToExplore(RemainingUsesToExplore) {} 2667 2668 /// Determine if \p V maybe captured. *Also updates the state!* 2669 bool valueMayBeCaptured(const Value *V) { 2670 if (V->getType()->isPointerTy()) { 2671 PointerMayBeCaptured(V, this); 2672 } else { 2673 State.indicatePessimisticFixpoint(); 2674 } 2675 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 2676 } 2677 2678 /// See CaptureTracker::tooManyUses(). 2679 void tooManyUses() override { 2680 State.removeAssumedBits(AANoCapture::NO_CAPTURE); 2681 } 2682 2683 bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override { 2684 if (CaptureTracker::isDereferenceableOrNull(O, DL)) 2685 return true; 2686 const auto &DerefAA = 2687 A.getAAFor<AADereferenceable>(NoCaptureAA, IRPosition::value(*O)); 2688 return DerefAA.getAssumedDereferenceableBytes(); 2689 } 2690 2691 /// See CaptureTracker::captured(...). 2692 bool captured(const Use *U) override { 2693 Instruction *UInst = cast<Instruction>(U->getUser()); 2694 LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst 2695 << "\n"); 2696 2697 // Because we may reuse the tracker multiple times we keep track of the 2698 // number of explored uses ourselves as well. 2699 if (RemainingUsesToExplore-- == 0) { 2700 LLVM_DEBUG(dbgs() << " - too many uses to explore!\n"); 2701 return isCapturedIn(/* Memory */ true, /* Integer */ true, 2702 /* Return */ true); 2703 } 2704 2705 // Deal with ptr2int by following uses. 2706 if (isa<PtrToIntInst>(UInst)) { 2707 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n"); 2708 return valueMayBeCaptured(UInst); 2709 } 2710 2711 // Explicitly catch return instructions. 2712 if (isa<ReturnInst>(UInst)) 2713 return isCapturedIn(/* Memory */ false, /* Integer */ false, 2714 /* Return */ true); 2715 2716 // For now we only use special logic for call sites. However, the tracker 2717 // itself knows about a lot of other non-capturing cases already. 2718 CallSite CS(UInst); 2719 if (!CS || !CS.isArgOperand(U)) 2720 return isCapturedIn(/* Memory */ true, /* Integer */ true, 2721 /* Return */ true); 2722 2723 unsigned ArgNo = CS.getArgumentNo(U); 2724 const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo); 2725 // If we have a abstract no-capture attribute for the argument we can use 2726 // it to justify a non-capture attribute here. This allows recursion! 2727 auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos); 2728 if (ArgNoCaptureAA.isAssumedNoCapture()) 2729 return isCapturedIn(/* Memory */ false, /* Integer */ false, 2730 /* Return */ false); 2731 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 2732 addPotentialCopy(CS); 2733 return isCapturedIn(/* Memory */ false, /* Integer */ false, 2734 /* Return */ false); 2735 } 2736 2737 // Lastly, we could not find a reason no-capture can be assumed so we don't. 2738 return isCapturedIn(/* Memory */ true, /* Integer */ true, 2739 /* Return */ true); 2740 } 2741 2742 /// Register \p CS as potential copy of the value we are checking. 2743 void addPotentialCopy(CallSite CS) { 2744 PotentialCopies.push_back(CS.getInstruction()); 2745 } 2746 2747 /// See CaptureTracker::shouldExplore(...). 2748 bool shouldExplore(const Use *U) override { 2749 // Check liveness. 2750 return !IsDeadAA.isAssumedDead(cast<Instruction>(U->getUser())); 2751 } 2752 2753 /// Update the state according to \p CapturedInMem, \p CapturedInInt, and 2754 /// \p CapturedInRet, then return the appropriate value for use in the 2755 /// CaptureTracker::captured() interface. 2756 bool isCapturedIn(bool CapturedInMem, bool CapturedInInt, 2757 bool CapturedInRet) { 2758 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " 2759 << CapturedInInt << "|Ret " << CapturedInRet << "]\n"); 2760 if (CapturedInMem) 2761 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); 2762 if (CapturedInInt) 2763 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); 2764 if (CapturedInRet) 2765 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); 2766 return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 2767 } 2768 2769 private: 2770 /// The attributor providing in-flight abstract attributes. 2771 Attributor &A; 2772 2773 /// The abstract attribute currently updated. 2774 AANoCapture &NoCaptureAA; 2775 2776 /// The abstract liveness state. 2777 const AAIsDead &IsDeadAA; 2778 2779 /// The state currently updated. 2780 IntegerState &State; 2781 2782 /// Set of potential copies of the tracked value. 2783 SmallVectorImpl<const Value *> &PotentialCopies; 2784 2785 /// Global counter to limit the number of explored uses. 2786 unsigned &RemainingUsesToExplore; 2787 }; 2788 2789 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { 2790 const IRPosition &IRP = getIRPosition(); 2791 const Value *V = 2792 getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue(); 2793 if (!V) 2794 return indicatePessimisticFixpoint(); 2795 2796 const Function *F = 2797 getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 2798 assert(F && "Expected a function!"); 2799 const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, IRPosition::function(*F)); 2800 2801 AANoCapture::StateType T; 2802 // TODO: Once we have memory behavior attributes we should use them here 2803 // similar to the reasoning in 2804 // AANoCaptureImpl::determineFunctionCaptureCapabilities(...). 2805 2806 // TODO: Use the AAReturnedValues to learn if the argument can return or 2807 // not. 2808 2809 // Use the CaptureTracker interface and logic with the specialized tracker, 2810 // defined in AACaptureUseTracker, that can look at in-flight abstract 2811 // attributes and directly updates the assumed state. 2812 SmallVector<const Value *, 4> PotentialCopies; 2813 unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore; 2814 AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies, 2815 RemainingUsesToExplore); 2816 2817 // Check all potential copies of the associated value until we can assume 2818 // none will be captured or we have to assume at least one might be. 2819 unsigned Idx = 0; 2820 PotentialCopies.push_back(V); 2821 while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size()) 2822 Tracker.valueMayBeCaptured(PotentialCopies[Idx++]); 2823 2824 AAAlign::StateType &S = getState(); 2825 auto Assumed = S.getAssumed(); 2826 S.intersectAssumedBits(T.getAssumed()); 2827 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 2828 : ChangeStatus::CHANGED; 2829 } 2830 2831 /// NoCapture attribute for function arguments. 2832 struct AANoCaptureArgument final : AANoCaptureImpl { 2833 AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {} 2834 2835 /// See AbstractAttribute::trackStatistics() 2836 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } 2837 }; 2838 2839 /// NoCapture attribute for call site arguments. 2840 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { 2841 AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {} 2842 2843 /// See AbstractAttribute::updateImpl(...). 2844 ChangeStatus updateImpl(Attributor &A) override { 2845 // TODO: Once we have call site specific value information we can provide 2846 // call site specific liveness information and then it makes 2847 // sense to specialize attributes for call sites arguments instead of 2848 // redirecting requests to the callee argument. 2849 Argument *Arg = getAssociatedArgument(); 2850 if (!Arg) 2851 return indicatePessimisticFixpoint(); 2852 const IRPosition &ArgPos = IRPosition::argument(*Arg); 2853 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos); 2854 return clampStateAndIndicateChange( 2855 getState(), 2856 static_cast<const AANoCapture::StateType &>(ArgAA.getState())); 2857 } 2858 2859 /// See AbstractAttribute::trackStatistics() 2860 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)}; 2861 }; 2862 2863 /// NoCapture attribute for floating values. 2864 struct AANoCaptureFloating final : AANoCaptureImpl { 2865 AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {} 2866 2867 /// See AbstractAttribute::trackStatistics() 2868 void trackStatistics() const override { 2869 STATS_DECLTRACK_FLOATING_ATTR(nocapture) 2870 } 2871 }; 2872 2873 /// NoCapture attribute for function return value. 2874 struct AANoCaptureReturned final : AANoCaptureImpl { 2875 AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) { 2876 llvm_unreachable("NoCapture is not applicable to function returns!"); 2877 } 2878 2879 /// See AbstractAttribute::initialize(...). 2880 void initialize(Attributor &A) override { 2881 llvm_unreachable("NoCapture is not applicable to function returns!"); 2882 } 2883 2884 /// See AbstractAttribute::updateImpl(...). 2885 ChangeStatus updateImpl(Attributor &A) override { 2886 llvm_unreachable("NoCapture is not applicable to function returns!"); 2887 } 2888 2889 /// See AbstractAttribute::trackStatistics() 2890 void trackStatistics() const override {} 2891 }; 2892 2893 /// NoCapture attribute deduction for a call site return value. 2894 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { 2895 AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {} 2896 2897 /// See AbstractAttribute::trackStatistics() 2898 void trackStatistics() const override { 2899 STATS_DECLTRACK_CSRET_ATTR(nocapture) 2900 } 2901 }; 2902 2903 /// ------------------ Value Simplify Attribute ---------------------------- 2904 struct AAValueSimplifyImpl : AAValueSimplify { 2905 AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {} 2906 2907 /// See AbstractAttribute::getAsStr(). 2908 const std::string getAsStr() const override { 2909 return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple") 2910 : "not-simple"; 2911 } 2912 2913 /// See AbstractAttribute::trackStatistics() 2914 void trackStatistics() const override {} 2915 2916 /// See AAValueSimplify::getAssumedSimplifiedValue() 2917 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 2918 if (!getAssumed()) 2919 return const_cast<Value *>(&getAssociatedValue()); 2920 return SimplifiedAssociatedValue; 2921 } 2922 void initialize(Attributor &A) override {} 2923 2924 /// Helper function for querying AAValueSimplify and updating candicate. 2925 /// \param QueryingValue Value trying to unify with SimplifiedValue 2926 /// \param AccumulatedSimplifiedValue Current simplification result. 2927 static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, 2928 Value &QueryingValue, 2929 Optional<Value *> &AccumulatedSimplifiedValue) { 2930 // FIXME: Add a typecast support. 2931 2932 auto &ValueSimpifyAA = A.getAAFor<AAValueSimplify>( 2933 QueryingAA, IRPosition::value(QueryingValue)); 2934 2935 Optional<Value *> QueryingValueSimplified = 2936 ValueSimpifyAA.getAssumedSimplifiedValue(A); 2937 2938 if (!QueryingValueSimplified.hasValue()) 2939 return true; 2940 2941 if (!QueryingValueSimplified.getValue()) 2942 return false; 2943 2944 Value &QueryingValueSimplifiedUnwrapped = 2945 *QueryingValueSimplified.getValue(); 2946 2947 if (isa<UndefValue>(QueryingValueSimplifiedUnwrapped)) 2948 return true; 2949 2950 if (AccumulatedSimplifiedValue.hasValue()) 2951 return AccumulatedSimplifiedValue == QueryingValueSimplified; 2952 2953 LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << QueryingValue 2954 << " is assumed to be " 2955 << QueryingValueSimplifiedUnwrapped << "\n"); 2956 2957 AccumulatedSimplifiedValue = QueryingValueSimplified; 2958 return true; 2959 } 2960 2961 /// See AbstractAttribute::manifest(...). 2962 ChangeStatus manifest(Attributor &A) override { 2963 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2964 2965 if (!SimplifiedAssociatedValue.hasValue() || 2966 !SimplifiedAssociatedValue.getValue()) 2967 return Changed; 2968 2969 if (auto *C = dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())) { 2970 // We can replace the AssociatedValue with the constant. 2971 Value &V = getAssociatedValue(); 2972 if (!V.user_empty() && &V != C && V.getType() == C->getType()) { 2973 LLVM_DEBUG(dbgs() << "[Attributor][ValueSimplify] " << V << " -> " << *C 2974 << "\n"); 2975 V.replaceAllUsesWith(C); 2976 Changed = ChangeStatus::CHANGED; 2977 } 2978 } 2979 2980 return Changed | AAValueSimplify::manifest(A); 2981 } 2982 2983 protected: 2984 // An assumed simplified value. Initially, it is set to Optional::None, which 2985 // means that the value is not clear under current assumption. If in the 2986 // pessimistic state, getAssumedSimplifiedValue doesn't return this value but 2987 // returns orignal associated value. 2988 Optional<Value *> SimplifiedAssociatedValue; 2989 }; 2990 2991 struct AAValueSimplifyArgument final : AAValueSimplifyImpl { 2992 AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {} 2993 2994 /// See AbstractAttribute::updateImpl(...). 2995 ChangeStatus updateImpl(Attributor &A) override { 2996 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 2997 2998 auto PredForCallSite = [&](CallSite CS) { 2999 return checkAndUpdate(A, *this, *CS.getArgOperand(getArgNo()), 3000 SimplifiedAssociatedValue); 3001 }; 3002 3003 if (!A.checkForAllCallSites(PredForCallSite, *this, true)) 3004 return indicatePessimisticFixpoint(); 3005 3006 // If a candicate was found in this update, return CHANGED. 3007 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 3008 ? ChangeStatus::UNCHANGED 3009 : ChangeStatus ::CHANGED; 3010 } 3011 3012 /// See AbstractAttribute::trackStatistics() 3013 void trackStatistics() const override { 3014 STATS_DECLTRACK_ARG_ATTR(value_simplify) 3015 } 3016 }; 3017 3018 struct AAValueSimplifyReturned : AAValueSimplifyImpl { 3019 AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {} 3020 3021 /// See AbstractAttribute::updateImpl(...). 3022 ChangeStatus updateImpl(Attributor &A) override { 3023 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 3024 3025 auto PredForReturned = [&](Value &V) { 3026 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue); 3027 }; 3028 3029 if (!A.checkForAllReturnedValues(PredForReturned, *this)) 3030 return indicatePessimisticFixpoint(); 3031 3032 // If a candicate was found in this update, return CHANGED. 3033 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 3034 ? ChangeStatus::UNCHANGED 3035 : ChangeStatus ::CHANGED; 3036 } 3037 /// See AbstractAttribute::trackStatistics() 3038 void trackStatistics() const override { 3039 STATS_DECLTRACK_FNRET_ATTR(value_simplify) 3040 } 3041 }; 3042 3043 struct AAValueSimplifyFloating : AAValueSimplifyImpl { 3044 AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {} 3045 3046 /// See AbstractAttribute::initialize(...). 3047 void initialize(Attributor &A) override { 3048 Value &V = getAnchorValue(); 3049 3050 // TODO: add other stuffs 3051 if (isa<Constant>(V) || isa<UndefValue>(V)) 3052 indicatePessimisticFixpoint(); 3053 } 3054 3055 /// See AbstractAttribute::updateImpl(...). 3056 ChangeStatus updateImpl(Attributor &A) override { 3057 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 3058 3059 auto VisitValueCB = [&](Value &V, BooleanState, bool Stripped) -> bool { 3060 auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V)); 3061 if (!Stripped && this == &AA) { 3062 // TODO: Look the instruction and check recursively. 3063 LLVM_DEBUG( 3064 dbgs() << "[Attributor][ValueSimplify] Can't be stripped more : " 3065 << V << "\n"); 3066 indicatePessimisticFixpoint(); 3067 return false; 3068 } 3069 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue); 3070 }; 3071 3072 if (!genericValueTraversal<AAValueSimplify, BooleanState>( 3073 A, getIRPosition(), *this, static_cast<BooleanState &>(*this), 3074 VisitValueCB)) 3075 return indicatePessimisticFixpoint(); 3076 3077 // If a candicate was found in this update, return CHANGED. 3078 3079 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 3080 ? ChangeStatus::UNCHANGED 3081 : ChangeStatus ::CHANGED; 3082 } 3083 3084 /// See AbstractAttribute::trackStatistics() 3085 void trackStatistics() const override { 3086 STATS_DECLTRACK_FLOATING_ATTR(value_simplify) 3087 } 3088 }; 3089 3090 struct AAValueSimplifyFunction : AAValueSimplifyImpl { 3091 AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {} 3092 3093 /// See AbstractAttribute::initialize(...). 3094 void initialize(Attributor &A) override { 3095 SimplifiedAssociatedValue = &getAnchorValue(); 3096 indicateOptimisticFixpoint(); 3097 } 3098 /// See AbstractAttribute::initialize(...). 3099 ChangeStatus updateImpl(Attributor &A) override { 3100 llvm_unreachable( 3101 "AAValueSimplify(Function|CallSite)::updateImpl will not be called"); 3102 } 3103 /// See AbstractAttribute::trackStatistics() 3104 void trackStatistics() const override { 3105 STATS_DECLTRACK_FN_ATTR(value_simplify) 3106 } 3107 }; 3108 3109 struct AAValueSimplifyCallSite : AAValueSimplifyFunction { 3110 AAValueSimplifyCallSite(const IRPosition &IRP) 3111 : AAValueSimplifyFunction(IRP) {} 3112 /// See AbstractAttribute::trackStatistics() 3113 void trackStatistics() const override { 3114 STATS_DECLTRACK_CS_ATTR(value_simplify) 3115 } 3116 }; 3117 3118 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned { 3119 AAValueSimplifyCallSiteReturned(const IRPosition &IRP) 3120 : AAValueSimplifyReturned(IRP) {} 3121 3122 void trackStatistics() const override { 3123 STATS_DECLTRACK_CSRET_ATTR(value_simplify) 3124 } 3125 }; 3126 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { 3127 AAValueSimplifyCallSiteArgument(const IRPosition &IRP) 3128 : AAValueSimplifyFloating(IRP) {} 3129 3130 void trackStatistics() const override { 3131 STATS_DECLTRACK_CSARG_ATTR(value_simplify) 3132 } 3133 }; 3134 3135 /// ---------------------------------------------------------------------------- 3136 /// Attributor 3137 /// ---------------------------------------------------------------------------- 3138 3139 bool Attributor::isAssumedDead(const AbstractAttribute &AA, 3140 const AAIsDead *LivenessAA) { 3141 const Instruction *CtxI = AA.getIRPosition().getCtxI(); 3142 if (!CtxI) 3143 return false; 3144 3145 if (!LivenessAA) 3146 LivenessAA = 3147 &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()), 3148 /* TrackDependence */ false); 3149 3150 // Don't check liveness for AAIsDead. 3151 if (&AA == LivenessAA) 3152 return false; 3153 3154 if (!LivenessAA->isAssumedDead(CtxI)) 3155 return false; 3156 3157 // We actually used liveness information so we have to record a dependence. 3158 recordDependence(*LivenessAA, AA); 3159 3160 return true; 3161 } 3162 3163 bool Attributor::checkForAllCallSites(const function_ref<bool(CallSite)> &Pred, 3164 const AbstractAttribute &QueryingAA, 3165 bool RequireAllCallSites) { 3166 // We can try to determine information from 3167 // the call sites. However, this is only possible all call sites are known, 3168 // hence the function has internal linkage. 3169 const IRPosition &IRP = QueryingAA.getIRPosition(); 3170 const Function *AssociatedFunction = IRP.getAssociatedFunction(); 3171 if (!AssociatedFunction) 3172 return false; 3173 3174 if (RequireAllCallSites && !AssociatedFunction->hasInternalLinkage()) { 3175 LLVM_DEBUG( 3176 dbgs() 3177 << "[Attributor] Function " << AssociatedFunction->getName() 3178 << " has no internal linkage, hence not all call sites are known\n"); 3179 return false; 3180 } 3181 3182 for (const Use &U : AssociatedFunction->uses()) { 3183 Instruction *I = dyn_cast<Instruction>(U.getUser()); 3184 // TODO: Deal with abstract call sites here. 3185 if (!I) 3186 return false; 3187 3188 Function *Caller = I->getFunction(); 3189 3190 const auto &LivenessAA = getAAFor<AAIsDead>( 3191 QueryingAA, IRPosition::function(*Caller), /* TrackDependence */ false); 3192 3193 // Skip dead calls. 3194 if (LivenessAA.isAssumedDead(I)) { 3195 // We actually used liveness information so we have to record a 3196 // dependence. 3197 recordDependence(LivenessAA, QueryingAA); 3198 continue; 3199 } 3200 3201 CallSite CS(U.getUser()); 3202 if (!CS || !CS.isCallee(&U)) { 3203 if (!RequireAllCallSites) 3204 continue; 3205 3206 LLVM_DEBUG(dbgs() << "[Attributor] User " << *U.getUser() 3207 << " is an invalid use of " 3208 << AssociatedFunction->getName() << "\n"); 3209 return false; 3210 } 3211 3212 if (Pred(CS)) 3213 continue; 3214 3215 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for " 3216 << *CS.getInstruction() << "\n"); 3217 return false; 3218 } 3219 3220 return true; 3221 } 3222 3223 bool Attributor::checkForAllReturnedValuesAndReturnInsts( 3224 const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> 3225 &Pred, 3226 const AbstractAttribute &QueryingAA) { 3227 3228 const IRPosition &IRP = QueryingAA.getIRPosition(); 3229 // Since we need to provide return instructions we have to have an exact 3230 // definition. 3231 const Function *AssociatedFunction = IRP.getAssociatedFunction(); 3232 if (!AssociatedFunction) 3233 return false; 3234 3235 // If this is a call site query we use the call site specific return values 3236 // and liveness information. 3237 // TODO: use the function scope once we have call site AAReturnedValues. 3238 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction); 3239 const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP); 3240 if (!AARetVal.getState().isValidState()) 3241 return false; 3242 3243 return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred); 3244 } 3245 3246 bool Attributor::checkForAllReturnedValues( 3247 const function_ref<bool(Value &)> &Pred, 3248 const AbstractAttribute &QueryingAA) { 3249 3250 const IRPosition &IRP = QueryingAA.getIRPosition(); 3251 const Function *AssociatedFunction = IRP.getAssociatedFunction(); 3252 if (!AssociatedFunction) 3253 return false; 3254 3255 // TODO: use the function scope once we have call site AAReturnedValues. 3256 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction); 3257 const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP); 3258 if (!AARetVal.getState().isValidState()) 3259 return false; 3260 3261 return AARetVal.checkForAllReturnedValuesAndReturnInsts( 3262 [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) { 3263 return Pred(RV); 3264 }); 3265 } 3266 3267 bool Attributor::checkForAllInstructions( 3268 const llvm::function_ref<bool(Instruction &)> &Pred, 3269 const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) { 3270 3271 const IRPosition &IRP = QueryingAA.getIRPosition(); 3272 // Since we need to provide instructions we have to have an exact definition. 3273 const Function *AssociatedFunction = IRP.getAssociatedFunction(); 3274 if (!AssociatedFunction) 3275 return false; 3276 3277 // TODO: use the function scope once we have call site AAReturnedValues. 3278 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction); 3279 const auto &LivenessAA = 3280 getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false); 3281 bool AnyDead = false; 3282 3283 auto &OpcodeInstMap = 3284 InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction); 3285 for (unsigned Opcode : Opcodes) { 3286 for (Instruction *I : OpcodeInstMap[Opcode]) { 3287 // Skip dead instructions. 3288 if (LivenessAA.isAssumedDead(I)) { 3289 AnyDead = true; 3290 continue; 3291 } 3292 3293 if (!Pred(*I)) 3294 return false; 3295 } 3296 } 3297 3298 // If we actually used liveness information so we have to record a dependence. 3299 if (AnyDead) 3300 recordDependence(LivenessAA, QueryingAA); 3301 3302 return true; 3303 } 3304 3305 bool Attributor::checkForAllReadWriteInstructions( 3306 const llvm::function_ref<bool(Instruction &)> &Pred, 3307 AbstractAttribute &QueryingAA) { 3308 3309 const Function *AssociatedFunction = 3310 QueryingAA.getIRPosition().getAssociatedFunction(); 3311 if (!AssociatedFunction) 3312 return false; 3313 3314 // TODO: use the function scope once we have call site AAReturnedValues. 3315 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction); 3316 const auto &LivenessAA = 3317 getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false); 3318 bool AnyDead = false; 3319 3320 for (Instruction *I : 3321 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) { 3322 // Skip dead instructions. 3323 if (LivenessAA.isAssumedDead(I)) { 3324 AnyDead = true; 3325 continue; 3326 } 3327 3328 if (!Pred(*I)) 3329 return false; 3330 } 3331 3332 // If we actually used liveness information so we have to record a dependence. 3333 if (AnyDead) 3334 recordDependence(LivenessAA, QueryingAA); 3335 3336 return true; 3337 } 3338 3339 ChangeStatus Attributor::run(Module &M) { 3340 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized " 3341 << AllAbstractAttributes.size() 3342 << " abstract attributes.\n"); 3343 3344 // Now that all abstract attributes are collected and initialized we start 3345 // the abstract analysis. 3346 3347 unsigned IterationCounter = 1; 3348 3349 SmallVector<AbstractAttribute *, 64> ChangedAAs; 3350 SetVector<AbstractAttribute *> Worklist; 3351 Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end()); 3352 3353 bool RecomputeDependences = false; 3354 3355 do { 3356 // Remember the size to determine new attributes. 3357 size_t NumAAs = AllAbstractAttributes.size(); 3358 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter 3359 << ", Worklist size: " << Worklist.size() << "\n"); 3360 3361 // If dependences (=QueryMap) are recomputed we have to look at all abstract 3362 // attributes again, regardless of what changed in the last iteration. 3363 if (RecomputeDependences) { 3364 LLVM_DEBUG( 3365 dbgs() << "[Attributor] Run all AAs to recompute dependences\n"); 3366 QueryMap.clear(); 3367 ChangedAAs.clear(); 3368 Worklist.insert(AllAbstractAttributes.begin(), 3369 AllAbstractAttributes.end()); 3370 } 3371 3372 // Add all abstract attributes that are potentially dependent on one that 3373 // changed to the work list. 3374 for (AbstractAttribute *ChangedAA : ChangedAAs) { 3375 auto &QuerriedAAs = QueryMap[ChangedAA]; 3376 Worklist.insert(QuerriedAAs.begin(), QuerriedAAs.end()); 3377 } 3378 3379 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter 3380 << ", Worklist+Dependent size: " << Worklist.size() 3381 << "\n"); 3382 3383 // Reset the changed set. 3384 ChangedAAs.clear(); 3385 3386 // Update all abstract attribute in the work list and record the ones that 3387 // changed. 3388 for (AbstractAttribute *AA : Worklist) 3389 if (!isAssumedDead(*AA, nullptr)) 3390 if (AA->update(*this) == ChangeStatus::CHANGED) 3391 ChangedAAs.push_back(AA); 3392 3393 // Check if we recompute the dependences in the next iteration. 3394 RecomputeDependences = (DepRecomputeInterval > 0 && 3395 IterationCounter % DepRecomputeInterval == 0); 3396 3397 // Add attributes to the changed set if they have been created in the last 3398 // iteration. 3399 ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs, 3400 AllAbstractAttributes.end()); 3401 3402 // Reset the work list and repopulate with the changed abstract attributes. 3403 // Note that dependent ones are added above. 3404 Worklist.clear(); 3405 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end()); 3406 3407 } while (!Worklist.empty() && (IterationCounter++ < MaxFixpointIterations || 3408 VerifyMaxFixpointIterations)); 3409 3410 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: " 3411 << IterationCounter << "/" << MaxFixpointIterations 3412 << " iterations\n"); 3413 3414 size_t NumFinalAAs = AllAbstractAttributes.size(); 3415 3416 bool FinishedAtFixpoint = Worklist.empty(); 3417 3418 // Reset abstract arguments not settled in a sound fixpoint by now. This 3419 // happens when we stopped the fixpoint iteration early. Note that only the 3420 // ones marked as "changed" *and* the ones transitively depending on them 3421 // need to be reverted to a pessimistic state. Others might not be in a 3422 // fixpoint state but we can use the optimistic results for them anyway. 3423 SmallPtrSet<AbstractAttribute *, 32> Visited; 3424 for (unsigned u = 0; u < ChangedAAs.size(); u++) { 3425 AbstractAttribute *ChangedAA = ChangedAAs[u]; 3426 if (!Visited.insert(ChangedAA).second) 3427 continue; 3428 3429 AbstractState &State = ChangedAA->getState(); 3430 if (!State.isAtFixpoint()) { 3431 State.indicatePessimisticFixpoint(); 3432 3433 NumAttributesTimedOut++; 3434 } 3435 3436 auto &QuerriedAAs = QueryMap[ChangedAA]; 3437 ChangedAAs.append(QuerriedAAs.begin(), QuerriedAAs.end()); 3438 } 3439 3440 LLVM_DEBUG({ 3441 if (!Visited.empty()) 3442 dbgs() << "\n[Attributor] Finalized " << Visited.size() 3443 << " abstract attributes.\n"; 3444 }); 3445 3446 unsigned NumManifested = 0; 3447 unsigned NumAtFixpoint = 0; 3448 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED; 3449 for (AbstractAttribute *AA : AllAbstractAttributes) { 3450 AbstractState &State = AA->getState(); 3451 3452 // If there is not already a fixpoint reached, we can now take the 3453 // optimistic state. This is correct because we enforced a pessimistic one 3454 // on abstract attributes that were transitively dependent on a changed one 3455 // already above. 3456 if (!State.isAtFixpoint()) 3457 State.indicateOptimisticFixpoint(); 3458 3459 // If the state is invalid, we do not try to manifest it. 3460 if (!State.isValidState()) 3461 continue; 3462 3463 // Skip dead code. 3464 if (isAssumedDead(*AA, nullptr)) 3465 continue; 3466 // Manifest the state and record if we changed the IR. 3467 ChangeStatus LocalChange = AA->manifest(*this); 3468 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled()) 3469 AA->trackStatistics(); 3470 3471 ManifestChange = ManifestChange | LocalChange; 3472 3473 NumAtFixpoint++; 3474 NumManifested += (LocalChange == ChangeStatus::CHANGED); 3475 } 3476 3477 (void)NumManifested; 3478 (void)NumAtFixpoint; 3479 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested 3480 << " arguments while " << NumAtFixpoint 3481 << " were in a valid fixpoint state\n"); 3482 3483 // If verification is requested, we finished this run at a fixpoint, and the 3484 // IR was changed, we re-run the whole fixpoint analysis, starting at 3485 // re-initialization of the arguments. This re-run should not result in an IR 3486 // change. Though, the (virtual) state of attributes at the end of the re-run 3487 // might be more optimistic than the known state or the IR state if the better 3488 // state cannot be manifested. 3489 if (VerifyAttributor && FinishedAtFixpoint && 3490 ManifestChange == ChangeStatus::CHANGED) { 3491 VerifyAttributor = false; 3492 ChangeStatus VerifyStatus = run(M); 3493 if (VerifyStatus != ChangeStatus::UNCHANGED) 3494 llvm_unreachable( 3495 "Attributor verification failed, re-run did result in an IR change " 3496 "even after a fixpoint was reached in the original run. (False " 3497 "positives possible!)"); 3498 VerifyAttributor = true; 3499 } 3500 3501 NumAttributesManifested += NumManifested; 3502 NumAttributesValidFixpoint += NumAtFixpoint; 3503 3504 (void)NumFinalAAs; 3505 assert( 3506 NumFinalAAs == AllAbstractAttributes.size() && 3507 "Expected the final number of abstract attributes to remain unchanged!"); 3508 3509 // Delete stuff at the end to avoid invalid references and a nice order. 3510 { 3511 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least " 3512 << ToBeDeletedFunctions.size() << " functions and " 3513 << ToBeDeletedBlocks.size() << " blocks and " 3514 << ToBeDeletedInsts.size() << " instructions\n"); 3515 for (Instruction *I : ToBeDeletedInsts) { 3516 if (!I->use_empty()) 3517 I->replaceAllUsesWith(UndefValue::get(I->getType())); 3518 I->eraseFromParent(); 3519 } 3520 3521 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) { 3522 SmallVector<BasicBlock *, 8> ToBeDeletedBBs; 3523 ToBeDeletedBBs.reserve(NumDeadBlocks); 3524 ToBeDeletedBBs.append(ToBeDeletedBlocks.begin(), ToBeDeletedBlocks.end()); 3525 DeleteDeadBlocks(ToBeDeletedBBs); 3526 STATS_DECLTRACK(AAIsDead, BasicBlock, 3527 "Number of dead basic blocks deleted."); 3528 } 3529 3530 STATS_DECL(AAIsDead, Function, "Number of dead functions deleted."); 3531 for (Function *Fn : ToBeDeletedFunctions) { 3532 Fn->replaceAllUsesWith(UndefValue::get(Fn->getType())); 3533 Fn->eraseFromParent(); 3534 STATS_TRACK(AAIsDead, Function); 3535 } 3536 3537 // Identify dead internal functions and delete them. This happens outside 3538 // the other fixpoint analysis as we might treat potentially dead functions 3539 // as live to lower the number of iterations. If they happen to be dead, the 3540 // below fixpoint loop will identify and eliminate them. 3541 SmallVector<Function *, 8> InternalFns; 3542 for (Function &F : M) 3543 if (F.hasInternalLinkage()) 3544 InternalFns.push_back(&F); 3545 3546 bool FoundDeadFn = true; 3547 while (FoundDeadFn) { 3548 FoundDeadFn = false; 3549 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) { 3550 Function *F = InternalFns[u]; 3551 if (!F) 3552 continue; 3553 3554 const auto *LivenessAA = 3555 lookupAAFor<AAIsDead>(IRPosition::function(*F)); 3556 if (LivenessAA && 3557 !checkForAllCallSites([](CallSite CS) { return false; }, 3558 *LivenessAA, true)) 3559 continue; 3560 3561 STATS_TRACK(AAIsDead, Function); 3562 F->replaceAllUsesWith(UndefValue::get(F->getType())); 3563 F->eraseFromParent(); 3564 InternalFns[u] = nullptr; 3565 FoundDeadFn = true; 3566 } 3567 } 3568 } 3569 3570 if (VerifyMaxFixpointIterations && 3571 IterationCounter != MaxFixpointIterations) { 3572 errs() << "\n[Attributor] Fixpoint iteration done after: " 3573 << IterationCounter << "/" << MaxFixpointIterations 3574 << " iterations\n"; 3575 llvm_unreachable("The fixpoint was not reached with exactly the number of " 3576 "specified iterations!"); 3577 } 3578 3579 return ManifestChange; 3580 } 3581 3582 void Attributor::identifyDefaultAbstractAttributes(Function &F) { 3583 if (!VisitedFunctions.insert(&F).second) 3584 return; 3585 3586 IRPosition FPos = IRPosition::function(F); 3587 3588 // Check for dead BasicBlocks in every function. 3589 // We need dead instruction detection because we do not want to deal with 3590 // broken IR in which SSA rules do not apply. 3591 getOrCreateAAFor<AAIsDead>(FPos); 3592 3593 // Every function might be "will-return". 3594 getOrCreateAAFor<AAWillReturn>(FPos); 3595 3596 // Every function can be nounwind. 3597 getOrCreateAAFor<AANoUnwind>(FPos); 3598 3599 // Every function might be marked "nosync" 3600 getOrCreateAAFor<AANoSync>(FPos); 3601 3602 // Every function might be "no-free". 3603 getOrCreateAAFor<AANoFree>(FPos); 3604 3605 // Every function might be "no-return". 3606 getOrCreateAAFor<AANoReturn>(FPos); 3607 3608 // Return attributes are only appropriate if the return type is non void. 3609 Type *ReturnType = F.getReturnType(); 3610 if (!ReturnType->isVoidTy()) { 3611 // Argument attribute "returned" --- Create only one per function even 3612 // though it is an argument attribute. 3613 getOrCreateAAFor<AAReturnedValues>(FPos); 3614 3615 IRPosition RetPos = IRPosition::returned(F); 3616 3617 // Every function might be simplified. 3618 getOrCreateAAFor<AAValueSimplify>(RetPos); 3619 3620 if (ReturnType->isPointerTy()) { 3621 3622 // Every function with pointer return type might be marked align. 3623 getOrCreateAAFor<AAAlign>(RetPos); 3624 3625 // Every function with pointer return type might be marked nonnull. 3626 getOrCreateAAFor<AANonNull>(RetPos); 3627 3628 // Every function with pointer return type might be marked noalias. 3629 getOrCreateAAFor<AANoAlias>(RetPos); 3630 3631 // Every function with pointer return type might be marked 3632 // dereferenceable. 3633 getOrCreateAAFor<AADereferenceable>(RetPos); 3634 } 3635 } 3636 3637 for (Argument &Arg : F.args()) { 3638 IRPosition ArgPos = IRPosition::argument(Arg); 3639 3640 // Every argument might be simplified. 3641 getOrCreateAAFor<AAValueSimplify>(ArgPos); 3642 3643 if (Arg.getType()->isPointerTy()) { 3644 // Every argument with pointer type might be marked nonnull. 3645 getOrCreateAAFor<AANonNull>(ArgPos); 3646 3647 // Every argument with pointer type might be marked noalias. 3648 getOrCreateAAFor<AANoAlias>(ArgPos); 3649 3650 // Every argument with pointer type might be marked dereferenceable. 3651 getOrCreateAAFor<AADereferenceable>(ArgPos); 3652 3653 // Every argument with pointer type might be marked align. 3654 getOrCreateAAFor<AAAlign>(ArgPos); 3655 3656 // Every argument with pointer type might be marked nocapture. 3657 getOrCreateAAFor<AANoCapture>(ArgPos); 3658 } 3659 } 3660 3661 // Walk all instructions to find more attribute opportunities and also 3662 // interesting instructions that might be queried by abstract attributes 3663 // during their initialization or update. 3664 auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F]; 3665 auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F]; 3666 3667 for (Instruction &I : instructions(&F)) { 3668 bool IsInterestingOpcode = false; 3669 3670 // To allow easy access to all instructions in a function with a given 3671 // opcode we store them in the InfoCache. As not all opcodes are interesting 3672 // to concrete attributes we only cache the ones that are as identified in 3673 // the following switch. 3674 // Note: There are no concrete attributes now so this is initially empty. 3675 switch (I.getOpcode()) { 3676 default: 3677 assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) && 3678 "New call site/base instruction type needs to be known int the " 3679 "attributor."); 3680 break; 3681 case Instruction::Load: 3682 // The alignment of a pointer is interesting for loads. 3683 getOrCreateAAFor<AAAlign>( 3684 IRPosition::value(*cast<LoadInst>(I).getPointerOperand())); 3685 break; 3686 case Instruction::Store: 3687 // The alignment of a pointer is interesting for stores. 3688 getOrCreateAAFor<AAAlign>( 3689 IRPosition::value(*cast<StoreInst>(I).getPointerOperand())); 3690 break; 3691 case Instruction::Call: 3692 case Instruction::CallBr: 3693 case Instruction::Invoke: 3694 case Instruction::CleanupRet: 3695 case Instruction::CatchSwitch: 3696 case Instruction::Resume: 3697 case Instruction::Ret: 3698 IsInterestingOpcode = true; 3699 } 3700 if (IsInterestingOpcode) 3701 InstOpcodeMap[I.getOpcode()].push_back(&I); 3702 if (I.mayReadOrWriteMemory()) 3703 ReadOrWriteInsts.push_back(&I); 3704 3705 CallSite CS(&I); 3706 if (CS && CS.getCalledFunction()) { 3707 for (int i = 0, e = CS.getCalledFunction()->arg_size(); i < e; i++) { 3708 3709 IRPosition CSArgPos = IRPosition::callsite_argument(CS, i); 3710 3711 // Call site argument might be simplified. 3712 getOrCreateAAFor<AAValueSimplify>(CSArgPos); 3713 3714 if (!CS.getArgument(i)->getType()->isPointerTy()) 3715 continue; 3716 3717 // Call site argument attribute "non-null". 3718 getOrCreateAAFor<AANonNull>(CSArgPos); 3719 3720 // Call site argument attribute "no-alias". 3721 getOrCreateAAFor<AANoAlias>(CSArgPos); 3722 3723 // Call site argument attribute "dereferenceable". 3724 getOrCreateAAFor<AADereferenceable>(CSArgPos); 3725 3726 // Call site argument attribute "align". 3727 getOrCreateAAFor<AAAlign>(CSArgPos); 3728 } 3729 } 3730 } 3731 } 3732 3733 /// Helpers to ease debugging through output streams and print calls. 3734 /// 3735 ///{ 3736 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) { 3737 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged"); 3738 } 3739 3740 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) { 3741 switch (AP) { 3742 case IRPosition::IRP_INVALID: 3743 return OS << "inv"; 3744 case IRPosition::IRP_FLOAT: 3745 return OS << "flt"; 3746 case IRPosition::IRP_RETURNED: 3747 return OS << "fn_ret"; 3748 case IRPosition::IRP_CALL_SITE_RETURNED: 3749 return OS << "cs_ret"; 3750 case IRPosition::IRP_FUNCTION: 3751 return OS << "fn"; 3752 case IRPosition::IRP_CALL_SITE: 3753 return OS << "cs"; 3754 case IRPosition::IRP_ARGUMENT: 3755 return OS << "arg"; 3756 case IRPosition::IRP_CALL_SITE_ARGUMENT: 3757 return OS << "cs_arg"; 3758 } 3759 llvm_unreachable("Unknown attribute position!"); 3760 } 3761 3762 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) { 3763 const Value &AV = Pos.getAssociatedValue(); 3764 return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " [" 3765 << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}"; 3766 } 3767 3768 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerState &S) { 3769 return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")" 3770 << static_cast<const AbstractState &>(S); 3771 } 3772 3773 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) { 3774 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : "")); 3775 } 3776 3777 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) { 3778 AA.print(OS); 3779 return OS; 3780 } 3781 3782 void AbstractAttribute::print(raw_ostream &OS) const { 3783 OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState() 3784 << "]"; 3785 } 3786 ///} 3787 3788 /// ---------------------------------------------------------------------------- 3789 /// Pass (Manager) Boilerplate 3790 /// ---------------------------------------------------------------------------- 3791 3792 static bool runAttributorOnModule(Module &M) { 3793 if (DisableAttributor) 3794 return false; 3795 3796 LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size() 3797 << " functions.\n"); 3798 3799 // Create an Attributor and initially empty information cache that is filled 3800 // while we identify default attribute opportunities. 3801 InformationCache InfoCache(M.getDataLayout()); 3802 Attributor A(InfoCache, DepRecInterval); 3803 3804 for (Function &F : M) { 3805 if (F.hasExactDefinition()) 3806 NumFnWithExactDefinition++; 3807 else 3808 NumFnWithoutExactDefinition++; 3809 3810 // For now we ignore naked and optnone functions. 3811 if (F.hasFnAttribute(Attribute::Naked) || 3812 F.hasFnAttribute(Attribute::OptimizeNone)) 3813 continue; 3814 3815 // We look at internal functions only on-demand but if any use is not a 3816 // direct call, we have to do it eagerly. 3817 if (F.hasInternalLinkage()) { 3818 if (llvm::all_of(F.uses(), [](const Use &U) { 3819 return ImmutableCallSite(U.getUser()) && 3820 ImmutableCallSite(U.getUser()).isCallee(&U); 3821 })) 3822 continue; 3823 } 3824 3825 // Populate the Attributor with abstract attribute opportunities in the 3826 // function and the information cache with IR information. 3827 A.identifyDefaultAbstractAttributes(F); 3828 } 3829 3830 return A.run(M) == ChangeStatus::CHANGED; 3831 } 3832 3833 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) { 3834 if (runAttributorOnModule(M)) { 3835 // FIXME: Think about passes we will preserve and add them here. 3836 return PreservedAnalyses::none(); 3837 } 3838 return PreservedAnalyses::all(); 3839 } 3840 3841 namespace { 3842 3843 struct AttributorLegacyPass : public ModulePass { 3844 static char ID; 3845 3846 AttributorLegacyPass() : ModulePass(ID) { 3847 initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry()); 3848 } 3849 3850 bool runOnModule(Module &M) override { 3851 if (skipModule(M)) 3852 return false; 3853 return runAttributorOnModule(M); 3854 } 3855 3856 void getAnalysisUsage(AnalysisUsage &AU) const override { 3857 // FIXME: Think about passes we will preserve and add them here. 3858 } 3859 }; 3860 3861 } // end anonymous namespace 3862 3863 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); } 3864 3865 char AttributorLegacyPass::ID = 0; 3866 3867 const char AAReturnedValues::ID = 0; 3868 const char AANoUnwind::ID = 0; 3869 const char AANoSync::ID = 0; 3870 const char AANoFree::ID = 0; 3871 const char AANonNull::ID = 0; 3872 const char AANoRecurse::ID = 0; 3873 const char AAWillReturn::ID = 0; 3874 const char AANoAlias::ID = 0; 3875 const char AANoReturn::ID = 0; 3876 const char AAIsDead::ID = 0; 3877 const char AADereferenceable::ID = 0; 3878 const char AAAlign::ID = 0; 3879 const char AANoCapture::ID = 0; 3880 const char AAValueSimplify::ID = 0; 3881 3882 // Macro magic to create the static generator function for attributes that 3883 // follow the naming scheme. 3884 3885 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ 3886 case IRPosition::PK: \ 3887 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); 3888 3889 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ 3890 case IRPosition::PK: \ 3891 AA = new CLASS##SUFFIX(IRP); \ 3892 break; 3893 3894 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 3895 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 3896 CLASS *AA = nullptr; \ 3897 switch (IRP.getPositionKind()) { \ 3898 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 3899 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 3900 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 3901 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 3902 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 3903 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 3904 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 3905 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 3906 } \ 3907 return *AA; \ 3908 } 3909 3910 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 3911 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 3912 CLASS *AA = nullptr; \ 3913 switch (IRP.getPositionKind()) { \ 3914 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 3915 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ 3916 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 3917 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 3918 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 3919 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 3920 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 3921 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 3922 } \ 3923 return *AA; \ 3924 } 3925 3926 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 3927 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 3928 CLASS *AA = nullptr; \ 3929 switch (IRP.getPositionKind()) { \ 3930 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 3931 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 3932 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 3933 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 3934 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 3935 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 3936 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 3937 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 3938 } \ 3939 return *AA; \ 3940 } 3941 3942 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) 3943 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) 3944 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) 3945 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) 3946 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) 3947 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) 3948 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) 3949 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) 3950 3951 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) 3952 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) 3953 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) 3954 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) 3955 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) 3956 3957 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) 3958 3959 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION 3960 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION 3961 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION 3962 #undef SWITCH_PK_CREATE 3963 #undef SWITCH_PK_INV 3964 3965 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor", 3966 "Deduce and propagate attributes", false, false) 3967 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor", 3968 "Deduce and propagate attributes", false, false) 3969