1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // See the Attributor.h file comment and the class descriptions in that file for 10 // more information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/IPO/Attributor.h" 15 16 #include "llvm/ADT/SmallPtrSet.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/Analysis/AssumeBundleQueries.h" 19 #include "llvm/Analysis/CaptureTracking.h" 20 #include "llvm/Analysis/LazyValueInfo.h" 21 #include "llvm/Analysis/MemoryBuiltins.h" 22 #include "llvm/Analysis/ScalarEvolution.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/IR/IRBuilder.h" 25 #include "llvm/IR/IntrinsicInst.h" 26 #include "llvm/IR/NoFolder.h" 27 #include "llvm/Transforms/IPO/ArgumentPromotion.h" 28 #include "llvm/Transforms/Utils/Local.h" 29 30 #include <cassert> 31 32 using namespace llvm; 33 34 #define DEBUG_TYPE "attributor" 35 36 static cl::opt<bool> ManifestInternal( 37 "attributor-manifest-internal", cl::Hidden, 38 cl::desc("Manifest Attributor internal string attributes."), 39 cl::init(false)); 40 41 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), 42 cl::Hidden); 43 44 STATISTIC(NumAAs, "Number of abstract attributes created"); 45 46 // Some helper macros to deal with statistics tracking. 47 // 48 // Usage: 49 // For simple IR attribute tracking overload trackStatistics in the abstract 50 // attribute and choose the right STATS_DECLTRACK_********* macro, 51 // e.g.,: 52 // void trackStatistics() const override { 53 // STATS_DECLTRACK_ARG_ATTR(returned) 54 // } 55 // If there is a single "increment" side one can use the macro 56 // STATS_DECLTRACK with a custom message. If there are multiple increment 57 // sides, STATS_DECL and STATS_TRACK can also be used separatly. 58 // 59 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ 60 ("Number of " #TYPE " marked '" #NAME "'") 61 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME 62 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); 63 #define STATS_DECL(NAME, TYPE, MSG) \ 64 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); 65 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); 66 #define STATS_DECLTRACK(NAME, TYPE, MSG) \ 67 { \ 68 STATS_DECL(NAME, TYPE, MSG) \ 69 STATS_TRACK(NAME, TYPE) \ 70 } 71 #define STATS_DECLTRACK_ARG_ATTR(NAME) \ 72 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) 73 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ 74 STATS_DECLTRACK(NAME, CSArguments, \ 75 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) 76 #define STATS_DECLTRACK_FN_ATTR(NAME) \ 77 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) 78 #define STATS_DECLTRACK_CS_ATTR(NAME) \ 79 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) 80 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ 81 STATS_DECLTRACK(NAME, FunctionReturn, \ 82 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) 83 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ 84 STATS_DECLTRACK(NAME, CSReturn, \ 85 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) 86 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ 87 STATS_DECLTRACK(NAME, Floating, \ 88 ("Number of floating values known to be '" #NAME "'")) 89 90 // Specialization of the operator<< for abstract attributes subclasses. This 91 // disambiguates situations where multiple operators are applicable. 92 namespace llvm { 93 #define PIPE_OPERATOR(CLASS) \ 94 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ 95 return OS << static_cast<const AbstractAttribute &>(AA); \ 96 } 97 98 PIPE_OPERATOR(AAIsDead) 99 PIPE_OPERATOR(AANoUnwind) 100 PIPE_OPERATOR(AANoSync) 101 PIPE_OPERATOR(AANoRecurse) 102 PIPE_OPERATOR(AAWillReturn) 103 PIPE_OPERATOR(AANoReturn) 104 PIPE_OPERATOR(AAReturnedValues) 105 PIPE_OPERATOR(AANonNull) 106 PIPE_OPERATOR(AANoAlias) 107 PIPE_OPERATOR(AADereferenceable) 108 PIPE_OPERATOR(AAAlign) 109 PIPE_OPERATOR(AANoCapture) 110 PIPE_OPERATOR(AAValueSimplify) 111 PIPE_OPERATOR(AANoFree) 112 PIPE_OPERATOR(AAHeapToStack) 113 PIPE_OPERATOR(AAReachability) 114 PIPE_OPERATOR(AAMemoryBehavior) 115 PIPE_OPERATOR(AAMemoryLocation) 116 PIPE_OPERATOR(AAValueConstantRange) 117 PIPE_OPERATOR(AAPrivatizablePtr) 118 PIPE_OPERATOR(AAUndefinedBehavior) 119 120 #undef PIPE_OPERATOR 121 } // namespace llvm 122 123 namespace { 124 125 static Optional<ConstantInt *> 126 getAssumedConstantInt(Attributor &A, const Value &V, 127 const AbstractAttribute &AA, 128 bool &UsedAssumedInformation) { 129 Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation); 130 if (C.hasValue()) 131 return dyn_cast_or_null<ConstantInt>(C.getValue()); 132 return llvm::None; 133 } 134 135 /// Get pointer operand of memory accessing instruction. If \p I is 136 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, 137 /// is set to false and the instruction is volatile, return nullptr. 138 static const Value *getPointerOperand(const Instruction *I, 139 bool AllowVolatile) { 140 if (auto *LI = dyn_cast<LoadInst>(I)) { 141 if (!AllowVolatile && LI->isVolatile()) 142 return nullptr; 143 return LI->getPointerOperand(); 144 } 145 146 if (auto *SI = dyn_cast<StoreInst>(I)) { 147 if (!AllowVolatile && SI->isVolatile()) 148 return nullptr; 149 return SI->getPointerOperand(); 150 } 151 152 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { 153 if (!AllowVolatile && CXI->isVolatile()) 154 return nullptr; 155 return CXI->getPointerOperand(); 156 } 157 158 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { 159 if (!AllowVolatile && RMWI->isVolatile()) 160 return nullptr; 161 return RMWI->getPointerOperand(); 162 } 163 164 return nullptr; 165 } 166 167 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and 168 /// advanced by \p Offset bytes. To aid later analysis the method tries to build 169 /// getelement pointer instructions that traverse the natural type of \p Ptr if 170 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence 171 /// through a cast to i8*. 172 /// 173 /// TODO: This could probably live somewhere more prominantly if it doesn't 174 /// already exist. 175 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset, 176 IRBuilder<NoFolder> &IRB, const DataLayout &DL) { 177 assert(Offset >= 0 && "Negative offset not supported yet!"); 178 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset 179 << "-bytes as " << *ResTy << "\n"); 180 181 // The initial type we are trying to traverse to get nice GEPs. 182 Type *Ty = Ptr->getType(); 183 184 SmallVector<Value *, 4> Indices; 185 std::string GEPName = Ptr->getName().str(); 186 while (Offset) { 187 uint64_t Idx, Rem; 188 189 if (auto *STy = dyn_cast<StructType>(Ty)) { 190 const StructLayout *SL = DL.getStructLayout(STy); 191 if (int64_t(SL->getSizeInBytes()) < Offset) 192 break; 193 Idx = SL->getElementContainingOffset(Offset); 194 assert(Idx < STy->getNumElements() && "Offset calculation error!"); 195 Rem = Offset - SL->getElementOffset(Idx); 196 Ty = STy->getElementType(Idx); 197 } else if (auto *PTy = dyn_cast<PointerType>(Ty)) { 198 Ty = PTy->getElementType(); 199 if (!Ty->isSized()) 200 break; 201 uint64_t ElementSize = DL.getTypeAllocSize(Ty); 202 assert(ElementSize && "Expected type with size!"); 203 Idx = Offset / ElementSize; 204 Rem = Offset % ElementSize; 205 } else { 206 // Non-aggregate type, we cast and make byte-wise progress now. 207 break; 208 } 209 210 LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset 211 << " Idx: " << Idx << " Rem: " << Rem << "\n"); 212 213 GEPName += "." + std::to_string(Idx); 214 Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx)); 215 Offset = Rem; 216 } 217 218 // Create a GEP if we collected indices above. 219 if (Indices.size()) 220 Ptr = IRB.CreateGEP(Ptr, Indices, GEPName); 221 222 // If an offset is left we use byte-wise adjustment. 223 if (Offset) { 224 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); 225 Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset), 226 GEPName + ".b" + Twine(Offset)); 227 } 228 229 // Ensure the result has the requested type. 230 Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast"); 231 232 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n"); 233 return Ptr; 234 } 235 236 /// Recursively visit all values that might become \p IRP at some point. This 237 /// will be done by looking through cast instructions, selects, phis, and calls 238 /// with the "returned" attribute. Once we cannot look through the value any 239 /// further, the callback \p VisitValueCB is invoked and passed the current 240 /// value, the \p State, and a flag to indicate if we stripped anything. 241 /// Stripped means that we unpacked the value associated with \p IRP at least 242 /// once. Note that the value used for the callback may still be the value 243 /// associated with \p IRP (due to PHIs). To limit how much effort is invested, 244 /// we will never visit more values than specified by \p MaxValues. 245 template <typename AAType, typename StateTy> 246 static bool genericValueTraversal( 247 Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State, 248 function_ref<bool(Value &, const Instruction *, StateTy &, bool)> 249 VisitValueCB, 250 const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16, 251 function_ref<Value *(Value *)> StripCB = nullptr) { 252 253 const AAIsDead *LivenessAA = nullptr; 254 if (IRP.getAnchorScope()) 255 LivenessAA = &A.getAAFor<AAIsDead>( 256 QueryingAA, IRPosition::function(*IRP.getAnchorScope()), 257 /* TrackDependence */ false); 258 bool AnyDead = false; 259 260 using Item = std::pair<Value *, const Instruction *>; 261 SmallSet<Item, 16> Visited; 262 SmallVector<Item, 16> Worklist; 263 Worklist.push_back({&IRP.getAssociatedValue(), CtxI}); 264 265 int Iteration = 0; 266 do { 267 Item I = Worklist.pop_back_val(); 268 Value *V = I.first; 269 CtxI = I.second; 270 if (StripCB) 271 V = StripCB(V); 272 273 // Check if we should process the current value. To prevent endless 274 // recursion keep a record of the values we followed! 275 if (!Visited.insert(I).second) 276 continue; 277 278 // Make sure we limit the compile time for complex expressions. 279 if (Iteration++ >= MaxValues) 280 return false; 281 282 // Explicitly look through calls with a "returned" attribute if we do 283 // not have a pointer as stripPointerCasts only works on them. 284 Value *NewV = nullptr; 285 if (V->getType()->isPointerTy()) { 286 NewV = V->stripPointerCasts(); 287 } else { 288 auto *CB = dyn_cast<CallBase>(V); 289 if (CB && CB->getCalledFunction()) { 290 for (Argument &Arg : CB->getCalledFunction()->args()) 291 if (Arg.hasReturnedAttr()) { 292 NewV = CB->getArgOperand(Arg.getArgNo()); 293 break; 294 } 295 } 296 } 297 if (NewV && NewV != V) { 298 Worklist.push_back({NewV, CtxI}); 299 continue; 300 } 301 302 // Look through select instructions, visit both potential values. 303 if (auto *SI = dyn_cast<SelectInst>(V)) { 304 Worklist.push_back({SI->getTrueValue(), CtxI}); 305 Worklist.push_back({SI->getFalseValue(), CtxI}); 306 continue; 307 } 308 309 // Look through phi nodes, visit all live operands. 310 if (auto *PHI = dyn_cast<PHINode>(V)) { 311 assert(LivenessAA && 312 "Expected liveness in the presence of instructions!"); 313 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 314 BasicBlock *IncomingBB = PHI->getIncomingBlock(u); 315 if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA, 316 LivenessAA, 317 /* CheckBBLivenessOnly */ true)) { 318 AnyDead = true; 319 continue; 320 } 321 Worklist.push_back( 322 {PHI->getIncomingValue(u), IncomingBB->getTerminator()}); 323 } 324 continue; 325 } 326 327 if (UseValueSimplify && !isa<Constant>(V)) { 328 bool UsedAssumedInformation = false; 329 Optional<Constant *> C = 330 A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation); 331 if (!C.hasValue()) 332 continue; 333 if (Value *NewV = C.getValue()) { 334 Worklist.push_back({NewV, CtxI}); 335 continue; 336 } 337 } 338 339 // Once a leaf is reached we inform the user through the callback. 340 if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) 341 return false; 342 } while (!Worklist.empty()); 343 344 // If we actually used liveness information so we have to record a dependence. 345 if (AnyDead) 346 A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL); 347 348 // All values have been visited. 349 return true; 350 } 351 352 const Value *stripAndAccumulateMinimalOffsets( 353 Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val, 354 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, 355 bool UseAssumed = false) { 356 357 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { 358 const IRPosition &Pos = IRPosition::value(V); 359 // Only track dependence if we are going to use the assumed info. 360 const AAValueConstantRange &ValueConstantRangeAA = 361 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, 362 /* TrackDependence */ UseAssumed); 363 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() 364 : ValueConstantRangeAA.getKnown(); 365 // We can only use the lower part of the range because the upper part can 366 // be higher than what the value can really be. 367 ROffset = Range.getSignedMin(); 368 return true; 369 }; 370 371 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, 372 AttributorAnalysis); 373 } 374 375 static const Value *getMinimalBaseOfAccsesPointerOperand( 376 Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I, 377 int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) { 378 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); 379 if (!Ptr) 380 return nullptr; 381 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 382 const Value *Base = stripAndAccumulateMinimalOffsets( 383 A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds); 384 385 BytesOffset = OffsetAPInt.getSExtValue(); 386 return Base; 387 } 388 389 static const Value * 390 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset, 391 const DataLayout &DL, 392 bool AllowNonInbounds = false) { 393 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); 394 if (!Ptr) 395 return nullptr; 396 397 return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL, 398 AllowNonInbounds); 399 } 400 401 /// Helper function to clamp a state \p S of type \p StateType with the 402 /// information in \p R and indicate/return if \p S did change (as-in update is 403 /// required to be run again). 404 template <typename StateType> 405 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) { 406 auto Assumed = S.getAssumed(); 407 S ^= R; 408 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 409 : ChangeStatus::CHANGED; 410 } 411 412 /// Clamp the information known for all returned values of a function 413 /// (identified by \p QueryingAA) into \p S. 414 template <typename AAType, typename StateType = typename AAType::StateType> 415 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA, 416 StateType &S) { 417 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " 418 << QueryingAA << " into " << S << "\n"); 419 420 assert((QueryingAA.getIRPosition().getPositionKind() == 421 IRPosition::IRP_RETURNED || 422 QueryingAA.getIRPosition().getPositionKind() == 423 IRPosition::IRP_CALL_SITE_RETURNED) && 424 "Can only clamp returned value states for a function returned or call " 425 "site returned position!"); 426 427 // Use an optional state as there might not be any return values and we want 428 // to join (IntegerState::operator&) the state of all there are. 429 Optional<StateType> T; 430 431 // Callback for each possibly returned value. 432 auto CheckReturnValue = [&](Value &RV) -> bool { 433 const IRPosition &RVPos = IRPosition::value(RV); 434 const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos); 435 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() 436 << " @ " << RVPos << "\n"); 437 const StateType &AAS = static_cast<const StateType &>(AA.getState()); 438 if (T.hasValue()) 439 *T &= AAS; 440 else 441 T = AAS; 442 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T 443 << "\n"); 444 return T->isValidState(); 445 }; 446 447 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) 448 S.indicatePessimisticFixpoint(); 449 else if (T.hasValue()) 450 S ^= *T; 451 } 452 453 /// Helper class for generic deduction: return value -> returned position. 454 template <typename AAType, typename BaseType, 455 typename StateType = typename BaseType::StateType> 456 struct AAReturnedFromReturnedValues : public BaseType { 457 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) 458 : BaseType(IRP, A) {} 459 460 /// See AbstractAttribute::updateImpl(...). 461 ChangeStatus updateImpl(Attributor &A) override { 462 StateType S(StateType::getBestState(this->getState())); 463 clampReturnedValueStates<AAType, StateType>(A, *this, S); 464 // TODO: If we know we visited all returned values, thus no are assumed 465 // dead, we can take the known information from the state T. 466 return clampStateAndIndicateChange<StateType>(this->getState(), S); 467 } 468 }; 469 470 /// Clamp the information known at all call sites for a given argument 471 /// (identified by \p QueryingAA) into \p S. 472 template <typename AAType, typename StateType = typename AAType::StateType> 473 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, 474 StateType &S) { 475 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " 476 << QueryingAA << " into " << S << "\n"); 477 478 assert(QueryingAA.getIRPosition().getPositionKind() == 479 IRPosition::IRP_ARGUMENT && 480 "Can only clamp call site argument states for an argument position!"); 481 482 // Use an optional state as there might not be any return values and we want 483 // to join (IntegerState::operator&) the state of all there are. 484 Optional<StateType> T; 485 486 // The argument number which is also the call site argument number. 487 unsigned ArgNo = QueryingAA.getIRPosition().getArgNo(); 488 489 auto CallSiteCheck = [&](AbstractCallSite ACS) { 490 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 491 // Check if a coresponding argument was found or if it is on not associated 492 // (which can happen for callback calls). 493 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 494 return false; 495 496 const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos); 497 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() 498 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n"); 499 const StateType &AAS = static_cast<const StateType &>(AA.getState()); 500 if (T.hasValue()) 501 *T &= AAS; 502 else 503 T = AAS; 504 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T 505 << "\n"); 506 return T->isValidState(); 507 }; 508 509 bool AllCallSitesKnown; 510 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, 511 AllCallSitesKnown)) 512 S.indicatePessimisticFixpoint(); 513 else if (T.hasValue()) 514 S ^= *T; 515 } 516 517 /// Helper class for generic deduction: call site argument -> argument position. 518 template <typename AAType, typename BaseType, 519 typename StateType = typename AAType::StateType> 520 struct AAArgumentFromCallSiteArguments : public BaseType { 521 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) 522 : BaseType(IRP, A) {} 523 524 /// See AbstractAttribute::updateImpl(...). 525 ChangeStatus updateImpl(Attributor &A) override { 526 StateType S(StateType::getBestState(this->getState())); 527 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); 528 // TODO: If we know we visited all incoming values, thus no are assumed 529 // dead, we can take the known information from the state T. 530 return clampStateAndIndicateChange<StateType>(this->getState(), S); 531 } 532 }; 533 534 /// Helper class for generic replication: function returned -> cs returned. 535 template <typename AAType, typename BaseType, 536 typename StateType = typename BaseType::StateType> 537 struct AACallSiteReturnedFromReturned : public BaseType { 538 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) 539 : BaseType(IRP, A) {} 540 541 /// See AbstractAttribute::updateImpl(...). 542 ChangeStatus updateImpl(Attributor &A) override { 543 assert(this->getIRPosition().getPositionKind() == 544 IRPosition::IRP_CALL_SITE_RETURNED && 545 "Can only wrap function returned positions for call site returned " 546 "positions!"); 547 auto &S = this->getState(); 548 549 const Function *AssociatedFunction = 550 this->getIRPosition().getAssociatedFunction(); 551 if (!AssociatedFunction) 552 return S.indicatePessimisticFixpoint(); 553 554 IRPosition FnPos = IRPosition::returned(*AssociatedFunction); 555 const AAType &AA = A.getAAFor<AAType>(*this, FnPos); 556 return clampStateAndIndicateChange( 557 S, static_cast<const StateType &>(AA.getState())); 558 } 559 }; 560 561 /// Helper function to accumulate uses. 562 template <class AAType, typename StateType = typename AAType::StateType> 563 static void followUsesInContext(AAType &AA, Attributor &A, 564 MustBeExecutedContextExplorer &Explorer, 565 const Instruction *CtxI, 566 SetVector<const Use *> &Uses, 567 StateType &State) { 568 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); 569 for (unsigned u = 0; u < Uses.size(); ++u) { 570 const Use *U = Uses[u]; 571 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { 572 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); 573 if (Found && AA.followUseInMBEC(A, U, UserI, State)) 574 for (const Use &Us : UserI->uses()) 575 Uses.insert(&Us); 576 } 577 } 578 } 579 580 /// Use the must-be-executed-context around \p I to add information into \p S. 581 /// The AAType class is required to have `followUseInMBEC` method with the 582 /// following signature and behaviour: 583 /// 584 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) 585 /// U - Underlying use. 586 /// I - The user of the \p U. 587 /// Returns true if the value should be tracked transitively. 588 /// 589 template <class AAType, typename StateType = typename AAType::StateType> 590 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, 591 Instruction &CtxI) { 592 593 // Container for (transitive) uses of the associated value. 594 SetVector<const Use *> Uses; 595 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) 596 Uses.insert(&U); 597 598 MustBeExecutedContextExplorer &Explorer = 599 A.getInfoCache().getMustBeExecutedContextExplorer(); 600 601 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); 602 603 if (S.isAtFixpoint()) 604 return; 605 606 SmallVector<const BranchInst *, 4> BrInsts; 607 auto Pred = [&](const Instruction *I) { 608 if (const BranchInst *Br = dyn_cast<BranchInst>(I)) 609 if (Br->isConditional()) 610 BrInsts.push_back(Br); 611 return true; 612 }; 613 614 // Here, accumulate conditional branch instructions in the context. We 615 // explore the child paths and collect the known states. The disjunction of 616 // those states can be merged to its own state. Let ParentState_i be a state 617 // to indicate the known information for an i-th branch instruction in the 618 // context. ChildStates are created for its successors respectively. 619 // 620 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} 621 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} 622 // ... 623 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} 624 // 625 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m 626 // 627 // FIXME: Currently, recursive branches are not handled. For example, we 628 // can't deduce that ptr must be dereferenced in below function. 629 // 630 // void f(int a, int c, int *ptr) { 631 // if(a) 632 // if (b) { 633 // *ptr = 0; 634 // } else { 635 // *ptr = 1; 636 // } 637 // else { 638 // if (b) { 639 // *ptr = 0; 640 // } else { 641 // *ptr = 1; 642 // } 643 // } 644 // } 645 646 Explorer.checkForAllContext(&CtxI, Pred); 647 for (const BranchInst *Br : BrInsts) { 648 StateType ParentState; 649 650 // The known state of the parent state is a conjunction of children's 651 // known states so it is initialized with a best state. 652 ParentState.indicateOptimisticFixpoint(); 653 654 for (const BasicBlock *BB : Br->successors()) { 655 StateType ChildState; 656 657 size_t BeforeSize = Uses.size(); 658 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); 659 660 // Erase uses which only appear in the child. 661 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) 662 It = Uses.erase(It); 663 664 ParentState &= ChildState; 665 } 666 667 // Use only known state. 668 S += ParentState; 669 } 670 } 671 672 /// -----------------------NoUnwind Function Attribute-------------------------- 673 674 struct AANoUnwindImpl : AANoUnwind { 675 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} 676 677 const std::string getAsStr() const override { 678 return getAssumed() ? "nounwind" : "may-unwind"; 679 } 680 681 /// See AbstractAttribute::updateImpl(...). 682 ChangeStatus updateImpl(Attributor &A) override { 683 auto Opcodes = { 684 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 685 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 686 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 687 688 auto CheckForNoUnwind = [&](Instruction &I) { 689 if (!I.mayThrow()) 690 return true; 691 692 if (const auto *CB = dyn_cast<CallBase>(&I)) { 693 const auto &NoUnwindAA = 694 A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB)); 695 return NoUnwindAA.isAssumedNoUnwind(); 696 } 697 return false; 698 }; 699 700 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes)) 701 return indicatePessimisticFixpoint(); 702 703 return ChangeStatus::UNCHANGED; 704 } 705 }; 706 707 struct AANoUnwindFunction final : public AANoUnwindImpl { 708 AANoUnwindFunction(const IRPosition &IRP, Attributor &A) 709 : AANoUnwindImpl(IRP, A) {} 710 711 /// See AbstractAttribute::trackStatistics() 712 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } 713 }; 714 715 /// NoUnwind attribute deduction for a call sites. 716 struct AANoUnwindCallSite final : AANoUnwindImpl { 717 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) 718 : AANoUnwindImpl(IRP, A) {} 719 720 /// See AbstractAttribute::initialize(...). 721 void initialize(Attributor &A) override { 722 AANoUnwindImpl::initialize(A); 723 Function *F = getAssociatedFunction(); 724 if (!F) 725 indicatePessimisticFixpoint(); 726 } 727 728 /// See AbstractAttribute::updateImpl(...). 729 ChangeStatus updateImpl(Attributor &A) override { 730 // TODO: Once we have call site specific value information we can provide 731 // call site specific liveness information and then it makes 732 // sense to specialize attributes for call sites arguments instead of 733 // redirecting requests to the callee argument. 734 Function *F = getAssociatedFunction(); 735 const IRPosition &FnPos = IRPosition::function(*F); 736 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos); 737 return clampStateAndIndicateChange( 738 getState(), 739 static_cast<const AANoUnwind::StateType &>(FnAA.getState())); 740 } 741 742 /// See AbstractAttribute::trackStatistics() 743 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } 744 }; 745 746 /// --------------------- Function Return Values ------------------------------- 747 748 /// "Attribute" that collects all potential returned values and the return 749 /// instructions that they arise from. 750 /// 751 /// If there is a unique returned value R, the manifest method will: 752 /// - mark R with the "returned" attribute, if R is an argument. 753 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { 754 755 /// Mapping of values potentially returned by the associated function to the 756 /// return instructions that might return them. 757 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; 758 759 /// Mapping to remember the number of returned values for a call site such 760 /// that we can avoid updates if nothing changed. 761 DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA; 762 763 /// Set of unresolved calls returned by the associated function. 764 SmallSetVector<CallBase *, 4> UnresolvedCalls; 765 766 /// State flags 767 /// 768 ///{ 769 bool IsFixed = false; 770 bool IsValidState = true; 771 ///} 772 773 public: 774 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) 775 : AAReturnedValues(IRP, A) {} 776 777 /// See AbstractAttribute::initialize(...). 778 void initialize(Attributor &A) override { 779 // Reset the state. 780 IsFixed = false; 781 IsValidState = true; 782 ReturnedValues.clear(); 783 784 Function *F = getAssociatedFunction(); 785 if (!F) { 786 indicatePessimisticFixpoint(); 787 return; 788 } 789 assert(!F->getReturnType()->isVoidTy() && 790 "Did not expect a void return type!"); 791 792 // The map from instruction opcodes to those instructions in the function. 793 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); 794 795 // Look through all arguments, if one is marked as returned we are done. 796 for (Argument &Arg : F->args()) { 797 if (Arg.hasReturnedAttr()) { 798 auto &ReturnInstSet = ReturnedValues[&Arg]; 799 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) 800 for (Instruction *RI : *Insts) 801 ReturnInstSet.insert(cast<ReturnInst>(RI)); 802 803 indicateOptimisticFixpoint(); 804 return; 805 } 806 } 807 808 if (!A.isFunctionIPOAmendable(*F)) 809 indicatePessimisticFixpoint(); 810 } 811 812 /// See AbstractAttribute::manifest(...). 813 ChangeStatus manifest(Attributor &A) override; 814 815 /// See AbstractAttribute::getState(...). 816 AbstractState &getState() override { return *this; } 817 818 /// See AbstractAttribute::getState(...). 819 const AbstractState &getState() const override { return *this; } 820 821 /// See AbstractAttribute::updateImpl(Attributor &A). 822 ChangeStatus updateImpl(Attributor &A) override; 823 824 llvm::iterator_range<iterator> returned_values() override { 825 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 826 } 827 828 llvm::iterator_range<const_iterator> returned_values() const override { 829 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 830 } 831 832 const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override { 833 return UnresolvedCalls; 834 } 835 836 /// Return the number of potential return values, -1 if unknown. 837 size_t getNumReturnValues() const override { 838 return isValidState() ? ReturnedValues.size() : -1; 839 } 840 841 /// Return an assumed unique return value if a single candidate is found. If 842 /// there cannot be one, return a nullptr. If it is not clear yet, return the 843 /// Optional::NoneType. 844 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 845 846 /// See AbstractState::checkForAllReturnedValues(...). 847 bool checkForAllReturnedValuesAndReturnInsts( 848 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 849 const override; 850 851 /// Pretty print the attribute similar to the IR representation. 852 const std::string getAsStr() const override; 853 854 /// See AbstractState::isAtFixpoint(). 855 bool isAtFixpoint() const override { return IsFixed; } 856 857 /// See AbstractState::isValidState(). 858 bool isValidState() const override { return IsValidState; } 859 860 /// See AbstractState::indicateOptimisticFixpoint(...). 861 ChangeStatus indicateOptimisticFixpoint() override { 862 IsFixed = true; 863 return ChangeStatus::UNCHANGED; 864 } 865 866 ChangeStatus indicatePessimisticFixpoint() override { 867 IsFixed = true; 868 IsValidState = false; 869 return ChangeStatus::CHANGED; 870 } 871 }; 872 873 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 874 ChangeStatus Changed = ChangeStatus::UNCHANGED; 875 876 // Bookkeeping. 877 assert(isValidState()); 878 STATS_DECLTRACK(KnownReturnValues, FunctionReturn, 879 "Number of function with known return values"); 880 881 // Check if we have an assumed unique return value that we could manifest. 882 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); 883 884 if (!UniqueRV.hasValue() || !UniqueRV.getValue()) 885 return Changed; 886 887 // Bookkeeping. 888 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, 889 "Number of function with unique return"); 890 891 // Callback to replace the uses of CB with the constant C. 892 auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) { 893 if (CB.use_empty()) 894 return ChangeStatus::UNCHANGED; 895 if (A.changeValueAfterManifest(CB, C)) 896 return ChangeStatus::CHANGED; 897 return ChangeStatus::UNCHANGED; 898 }; 899 900 // If the assumed unique return value is an argument, annotate it. 901 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { 902 if (UniqueRVArg->getType()->canLosslesslyBitCastTo( 903 getAssociatedFunction()->getReturnType())) { 904 getIRPosition() = IRPosition::argument(*UniqueRVArg); 905 Changed = IRAttribute::manifest(A); 906 } 907 } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) { 908 // We can replace the returned value with the unique returned constant. 909 Value &AnchorValue = getAnchorValue(); 910 if (Function *F = dyn_cast<Function>(&AnchorValue)) { 911 for (const Use &U : F->uses()) 912 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) 913 if (CB->isCallee(&U)) { 914 Constant *RVCCast = 915 CB->getType() == RVC->getType() 916 ? RVC 917 : ConstantExpr::getTruncOrBitCast(RVC, CB->getType()); 918 Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed; 919 } 920 } else { 921 assert(isa<CallBase>(AnchorValue) && 922 "Expcected a function or call base anchor!"); 923 Constant *RVCCast = 924 AnchorValue.getType() == RVC->getType() 925 ? RVC 926 : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType()); 927 Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast); 928 } 929 if (Changed == ChangeStatus::CHANGED) 930 STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn, 931 "Number of function returns replaced by constant return"); 932 } 933 934 return Changed; 935 } 936 937 const std::string AAReturnedValuesImpl::getAsStr() const { 938 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 939 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + 940 ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]"; 941 } 942 943 Optional<Value *> 944 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { 945 // If checkForAllReturnedValues provides a unique value, ignoring potential 946 // undef values that can also be present, it is assumed to be the actual 947 // return value and forwarded to the caller of this method. If there are 948 // multiple, a nullptr is returned indicating there cannot be a unique 949 // returned value. 950 Optional<Value *> UniqueRV; 951 952 auto Pred = [&](Value &RV) -> bool { 953 // If we found a second returned value and neither the current nor the saved 954 // one is an undef, there is no unique returned value. Undefs are special 955 // since we can pretend they have any value. 956 if (UniqueRV.hasValue() && UniqueRV != &RV && 957 !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) { 958 UniqueRV = nullptr; 959 return false; 960 } 961 962 // Do not overwrite a value with an undef. 963 if (!UniqueRV.hasValue() || !isa<UndefValue>(RV)) 964 UniqueRV = &RV; 965 966 return true; 967 }; 968 969 if (!A.checkForAllReturnedValues(Pred, *this)) 970 UniqueRV = nullptr; 971 972 return UniqueRV; 973 } 974 975 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( 976 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 977 const { 978 if (!isValidState()) 979 return false; 980 981 // Check all returned values but ignore call sites as long as we have not 982 // encountered an overdefined one during an update. 983 for (auto &It : ReturnedValues) { 984 Value *RV = It.first; 985 986 CallBase *CB = dyn_cast<CallBase>(RV); 987 if (CB && !UnresolvedCalls.count(CB)) 988 continue; 989 990 if (!Pred(*RV, It.second)) 991 return false; 992 } 993 994 return true; 995 } 996 997 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 998 size_t NumUnresolvedCalls = UnresolvedCalls.size(); 999 bool Changed = false; 1000 1001 // State used in the value traversals starting in returned values. 1002 struct RVState { 1003 // The map in which we collect return values -> return instrs. 1004 decltype(ReturnedValues) &RetValsMap; 1005 // The flag to indicate a change. 1006 bool &Changed; 1007 // The return instrs we come from. 1008 SmallSetVector<ReturnInst *, 4> RetInsts; 1009 }; 1010 1011 // Callback for a leaf value returned by the associated function. 1012 auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS, 1013 bool) -> bool { 1014 auto Size = RVS.RetValsMap[&Val].size(); 1015 RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end()); 1016 bool Inserted = RVS.RetValsMap[&Val].size() != Size; 1017 RVS.Changed |= Inserted; 1018 LLVM_DEBUG({ 1019 if (Inserted) 1020 dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val 1021 << " => " << RVS.RetInsts.size() << "\n"; 1022 }); 1023 return true; 1024 }; 1025 1026 // Helper method to invoke the generic value traversal. 1027 auto VisitReturnedValue = [&](Value &RV, RVState &RVS, 1028 const Instruction *CtxI) { 1029 IRPosition RetValPos = IRPosition::value(RV); 1030 return genericValueTraversal<AAReturnedValues, RVState>( 1031 A, RetValPos, *this, RVS, VisitValueCB, CtxI, 1032 /* UseValueSimplify */ false); 1033 }; 1034 1035 // Callback for all "return intructions" live in the associated function. 1036 auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) { 1037 ReturnInst &Ret = cast<ReturnInst>(I); 1038 RVState RVS({ReturnedValues, Changed, {}}); 1039 RVS.RetInsts.insert(&Ret); 1040 return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I); 1041 }; 1042 1043 // Start by discovering returned values from all live returned instructions in 1044 // the associated function. 1045 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret})) 1046 return indicatePessimisticFixpoint(); 1047 1048 // Once returned values "directly" present in the code are handled we try to 1049 // resolve returned calls. To avoid modifications to the ReturnedValues map 1050 // while we iterate over it we kept record of potential new entries in a copy 1051 // map, NewRVsMap. 1052 decltype(ReturnedValues) NewRVsMap; 1053 1054 auto HandleReturnValue = [&](Value *RV, SmallSetVector<ReturnInst *, 4> &RIs) { 1055 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV 1056 << " by #" << RIs.size() << " RIs\n"); 1057 CallBase *CB = dyn_cast<CallBase>(RV); 1058 if (!CB || UnresolvedCalls.count(CB)) 1059 return; 1060 1061 if (!CB->getCalledFunction()) { 1062 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB 1063 << "\n"); 1064 UnresolvedCalls.insert(CB); 1065 return; 1066 } 1067 1068 // TODO: use the function scope once we have call site AAReturnedValues. 1069 const auto &RetValAA = A.getAAFor<AAReturnedValues>( 1070 *this, IRPosition::function(*CB->getCalledFunction())); 1071 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: " 1072 << RetValAA << "\n"); 1073 1074 // Skip dead ends, thus if we do not know anything about the returned 1075 // call we mark it as unresolved and it will stay that way. 1076 if (!RetValAA.getState().isValidState()) { 1077 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB 1078 << "\n"); 1079 UnresolvedCalls.insert(CB); 1080 return; 1081 } 1082 1083 // Do not try to learn partial information. If the callee has unresolved 1084 // return values we will treat the call as unresolved/opaque. 1085 auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls(); 1086 if (!RetValAAUnresolvedCalls.empty()) { 1087 UnresolvedCalls.insert(CB); 1088 return; 1089 } 1090 1091 // Now check if we can track transitively returned values. If possible, thus 1092 // if all return value can be represented in the current scope, do so. 1093 bool Unresolved = false; 1094 for (auto &RetValAAIt : RetValAA.returned_values()) { 1095 Value *RetVal = RetValAAIt.first; 1096 if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) || 1097 isa<Constant>(RetVal)) 1098 continue; 1099 // Anything that did not fit in the above categories cannot be resolved, 1100 // mark the call as unresolved. 1101 LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value " 1102 "cannot be translated: " 1103 << *RetVal << "\n"); 1104 UnresolvedCalls.insert(CB); 1105 Unresolved = true; 1106 break; 1107 } 1108 1109 if (Unresolved) 1110 return; 1111 1112 // Now track transitively returned values. 1113 unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB]; 1114 if (NumRetAA == RetValAA.getNumReturnValues()) { 1115 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not " 1116 "changed since it was seen last\n"); 1117 return; 1118 } 1119 NumRetAA = RetValAA.getNumReturnValues(); 1120 1121 for (auto &RetValAAIt : RetValAA.returned_values()) { 1122 Value *RetVal = RetValAAIt.first; 1123 if (Argument *Arg = dyn_cast<Argument>(RetVal)) { 1124 // Arguments are mapped to call site operands and we begin the traversal 1125 // again. 1126 bool Unused = false; 1127 RVState RVS({NewRVsMap, Unused, RetValAAIt.second}); 1128 VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB); 1129 continue; 1130 } else if (isa<CallBase>(RetVal)) { 1131 // Call sites are resolved by the callee attribute over time, no need to 1132 // do anything for us. 1133 continue; 1134 } else if (isa<Constant>(RetVal)) { 1135 // Constants are valid everywhere, we can simply take them. 1136 NewRVsMap[RetVal].insert(RIs.begin(), RIs.end()); 1137 continue; 1138 } 1139 } 1140 }; 1141 1142 for (auto &It : ReturnedValues) 1143 HandleReturnValue(It.first, It.second); 1144 1145 // Because processing the new information can again lead to new return values 1146 // we have to be careful and iterate until this iteration is complete. The 1147 // idea is that we are in a stable state at the end of an update. All return 1148 // values have been handled and properly categorized. We might not update 1149 // again if we have not requested a non-fix attribute so we cannot "wait" for 1150 // the next update to analyze a new return value. 1151 while (!NewRVsMap.empty()) { 1152 auto It = std::move(NewRVsMap.back()); 1153 NewRVsMap.pop_back(); 1154 1155 assert(!It.second.empty() && "Entry does not add anything."); 1156 auto &ReturnInsts = ReturnedValues[It.first]; 1157 for (ReturnInst *RI : It.second) 1158 if (ReturnInsts.insert(RI)) { 1159 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value " 1160 << *It.first << " => " << *RI << "\n"); 1161 HandleReturnValue(It.first, ReturnInsts); 1162 Changed = true; 1163 } 1164 } 1165 1166 Changed |= (NumUnresolvedCalls != UnresolvedCalls.size()); 1167 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 1168 } 1169 1170 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { 1171 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) 1172 : AAReturnedValuesImpl(IRP, A) {} 1173 1174 /// See AbstractAttribute::trackStatistics() 1175 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } 1176 }; 1177 1178 /// Returned values information for a call sites. 1179 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { 1180 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) 1181 : AAReturnedValuesImpl(IRP, A) {} 1182 1183 /// See AbstractAttribute::initialize(...). 1184 void initialize(Attributor &A) override { 1185 // TODO: Once we have call site specific value information we can provide 1186 // call site specific liveness information and then it makes 1187 // sense to specialize attributes for call sites instead of 1188 // redirecting requests to the callee. 1189 llvm_unreachable("Abstract attributes for returned values are not " 1190 "supported for call sites yet!"); 1191 } 1192 1193 /// See AbstractAttribute::updateImpl(...). 1194 ChangeStatus updateImpl(Attributor &A) override { 1195 return indicatePessimisticFixpoint(); 1196 } 1197 1198 /// See AbstractAttribute::trackStatistics() 1199 void trackStatistics() const override {} 1200 }; 1201 1202 /// ------------------------ NoSync Function Attribute ------------------------- 1203 1204 struct AANoSyncImpl : AANoSync { 1205 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} 1206 1207 const std::string getAsStr() const override { 1208 return getAssumed() ? "nosync" : "may-sync"; 1209 } 1210 1211 /// See AbstractAttribute::updateImpl(...). 1212 ChangeStatus updateImpl(Attributor &A) override; 1213 1214 /// Helper function used to determine whether an instruction is non-relaxed 1215 /// atomic. In other words, if an atomic instruction does not have unordered 1216 /// or monotonic ordering 1217 static bool isNonRelaxedAtomic(Instruction *I); 1218 1219 /// Helper function used to determine whether an instruction is volatile. 1220 static bool isVolatile(Instruction *I); 1221 1222 /// Helper function uset to check if intrinsic is volatile (memcpy, memmove, 1223 /// memset). 1224 static bool isNoSyncIntrinsic(Instruction *I); 1225 }; 1226 1227 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) { 1228 if (!I->isAtomic()) 1229 return false; 1230 1231 AtomicOrdering Ordering; 1232 switch (I->getOpcode()) { 1233 case Instruction::AtomicRMW: 1234 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 1235 break; 1236 case Instruction::Store: 1237 Ordering = cast<StoreInst>(I)->getOrdering(); 1238 break; 1239 case Instruction::Load: 1240 Ordering = cast<LoadInst>(I)->getOrdering(); 1241 break; 1242 case Instruction::Fence: { 1243 auto *FI = cast<FenceInst>(I); 1244 if (FI->getSyncScopeID() == SyncScope::SingleThread) 1245 return false; 1246 Ordering = FI->getOrdering(); 1247 break; 1248 } 1249 case Instruction::AtomicCmpXchg: { 1250 AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering(); 1251 AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering(); 1252 // Only if both are relaxed, than it can be treated as relaxed. 1253 // Otherwise it is non-relaxed. 1254 if (Success != AtomicOrdering::Unordered && 1255 Success != AtomicOrdering::Monotonic) 1256 return true; 1257 if (Failure != AtomicOrdering::Unordered && 1258 Failure != AtomicOrdering::Monotonic) 1259 return true; 1260 return false; 1261 } 1262 default: 1263 llvm_unreachable( 1264 "New atomic operations need to be known in the attributor."); 1265 } 1266 1267 // Relaxed. 1268 if (Ordering == AtomicOrdering::Unordered || 1269 Ordering == AtomicOrdering::Monotonic) 1270 return false; 1271 return true; 1272 } 1273 1274 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics. 1275 /// FIXME: We should ipmrove the handling of intrinsics. 1276 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) { 1277 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 1278 switch (II->getIntrinsicID()) { 1279 /// Element wise atomic memory intrinsics are can only be unordered, 1280 /// therefore nosync. 1281 case Intrinsic::memset_element_unordered_atomic: 1282 case Intrinsic::memmove_element_unordered_atomic: 1283 case Intrinsic::memcpy_element_unordered_atomic: 1284 return true; 1285 case Intrinsic::memset: 1286 case Intrinsic::memmove: 1287 case Intrinsic::memcpy: 1288 if (!cast<MemIntrinsic>(II)->isVolatile()) 1289 return true; 1290 return false; 1291 default: 1292 return false; 1293 } 1294 } 1295 return false; 1296 } 1297 1298 bool AANoSyncImpl::isVolatile(Instruction *I) { 1299 assert(!isa<CallBase>(I) && "Calls should not be checked here"); 1300 1301 switch (I->getOpcode()) { 1302 case Instruction::AtomicRMW: 1303 return cast<AtomicRMWInst>(I)->isVolatile(); 1304 case Instruction::Store: 1305 return cast<StoreInst>(I)->isVolatile(); 1306 case Instruction::Load: 1307 return cast<LoadInst>(I)->isVolatile(); 1308 case Instruction::AtomicCmpXchg: 1309 return cast<AtomicCmpXchgInst>(I)->isVolatile(); 1310 default: 1311 return false; 1312 } 1313 } 1314 1315 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { 1316 1317 auto CheckRWInstForNoSync = [&](Instruction &I) { 1318 /// We are looking for volatile instructions or Non-Relaxed atomics. 1319 /// FIXME: We should improve the handling of intrinsics. 1320 1321 if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I)) 1322 return true; 1323 1324 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1325 if (CB->hasFnAttr(Attribute::NoSync)) 1326 return true; 1327 1328 const auto &NoSyncAA = 1329 A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB)); 1330 if (NoSyncAA.isAssumedNoSync()) 1331 return true; 1332 return false; 1333 } 1334 1335 if (!isVolatile(&I) && !isNonRelaxedAtomic(&I)) 1336 return true; 1337 1338 return false; 1339 }; 1340 1341 auto CheckForNoSync = [&](Instruction &I) { 1342 // At this point we handled all read/write effects and they are all 1343 // nosync, so they can be skipped. 1344 if (I.mayReadOrWriteMemory()) 1345 return true; 1346 1347 // non-convergent and readnone imply nosync. 1348 return !cast<CallBase>(I).isConvergent(); 1349 }; 1350 1351 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) || 1352 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this)) 1353 return indicatePessimisticFixpoint(); 1354 1355 return ChangeStatus::UNCHANGED; 1356 } 1357 1358 struct AANoSyncFunction final : public AANoSyncImpl { 1359 AANoSyncFunction(const IRPosition &IRP, Attributor &A) 1360 : AANoSyncImpl(IRP, A) {} 1361 1362 /// See AbstractAttribute::trackStatistics() 1363 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } 1364 }; 1365 1366 /// NoSync attribute deduction for a call sites. 1367 struct AANoSyncCallSite final : AANoSyncImpl { 1368 AANoSyncCallSite(const IRPosition &IRP, Attributor &A) 1369 : AANoSyncImpl(IRP, A) {} 1370 1371 /// See AbstractAttribute::initialize(...). 1372 void initialize(Attributor &A) override { 1373 AANoSyncImpl::initialize(A); 1374 Function *F = getAssociatedFunction(); 1375 if (!F) 1376 indicatePessimisticFixpoint(); 1377 } 1378 1379 /// See AbstractAttribute::updateImpl(...). 1380 ChangeStatus updateImpl(Attributor &A) override { 1381 // TODO: Once we have call site specific value information we can provide 1382 // call site specific liveness information and then it makes 1383 // sense to specialize attributes for call sites arguments instead of 1384 // redirecting requests to the callee argument. 1385 Function *F = getAssociatedFunction(); 1386 const IRPosition &FnPos = IRPosition::function(*F); 1387 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos); 1388 return clampStateAndIndicateChange( 1389 getState(), static_cast<const AANoSync::StateType &>(FnAA.getState())); 1390 } 1391 1392 /// See AbstractAttribute::trackStatistics() 1393 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } 1394 }; 1395 1396 /// ------------------------ No-Free Attributes ---------------------------- 1397 1398 struct AANoFreeImpl : public AANoFree { 1399 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} 1400 1401 /// See AbstractAttribute::updateImpl(...). 1402 ChangeStatus updateImpl(Attributor &A) override { 1403 auto CheckForNoFree = [&](Instruction &I) { 1404 const auto &CB = cast<CallBase>(I); 1405 if (CB.hasFnAttr(Attribute::NoFree)) 1406 return true; 1407 1408 const auto &NoFreeAA = 1409 A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB)); 1410 return NoFreeAA.isAssumedNoFree(); 1411 }; 1412 1413 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this)) 1414 return indicatePessimisticFixpoint(); 1415 return ChangeStatus::UNCHANGED; 1416 } 1417 1418 /// See AbstractAttribute::getAsStr(). 1419 const std::string getAsStr() const override { 1420 return getAssumed() ? "nofree" : "may-free"; 1421 } 1422 }; 1423 1424 struct AANoFreeFunction final : public AANoFreeImpl { 1425 AANoFreeFunction(const IRPosition &IRP, Attributor &A) 1426 : AANoFreeImpl(IRP, A) {} 1427 1428 /// See AbstractAttribute::trackStatistics() 1429 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } 1430 }; 1431 1432 /// NoFree attribute deduction for a call sites. 1433 struct AANoFreeCallSite final : AANoFreeImpl { 1434 AANoFreeCallSite(const IRPosition &IRP, Attributor &A) 1435 : AANoFreeImpl(IRP, A) {} 1436 1437 /// See AbstractAttribute::initialize(...). 1438 void initialize(Attributor &A) override { 1439 AANoFreeImpl::initialize(A); 1440 Function *F = getAssociatedFunction(); 1441 if (!F) 1442 indicatePessimisticFixpoint(); 1443 } 1444 1445 /// See AbstractAttribute::updateImpl(...). 1446 ChangeStatus updateImpl(Attributor &A) override { 1447 // TODO: Once we have call site specific value information we can provide 1448 // call site specific liveness information and then it makes 1449 // sense to specialize attributes for call sites arguments instead of 1450 // redirecting requests to the callee argument. 1451 Function *F = getAssociatedFunction(); 1452 const IRPosition &FnPos = IRPosition::function(*F); 1453 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos); 1454 return clampStateAndIndicateChange( 1455 getState(), static_cast<const AANoFree::StateType &>(FnAA.getState())); 1456 } 1457 1458 /// See AbstractAttribute::trackStatistics() 1459 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } 1460 }; 1461 1462 /// NoFree attribute for floating values. 1463 struct AANoFreeFloating : AANoFreeImpl { 1464 AANoFreeFloating(const IRPosition &IRP, Attributor &A) 1465 : AANoFreeImpl(IRP, A) {} 1466 1467 /// See AbstractAttribute::trackStatistics() 1468 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} 1469 1470 /// See Abstract Attribute::updateImpl(...). 1471 ChangeStatus updateImpl(Attributor &A) override { 1472 const IRPosition &IRP = getIRPosition(); 1473 1474 const auto &NoFreeAA = 1475 A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP)); 1476 if (NoFreeAA.isAssumedNoFree()) 1477 return ChangeStatus::UNCHANGED; 1478 1479 Value &AssociatedValue = getIRPosition().getAssociatedValue(); 1480 auto Pred = [&](const Use &U, bool &Follow) -> bool { 1481 Instruction *UserI = cast<Instruction>(U.getUser()); 1482 if (auto *CB = dyn_cast<CallBase>(UserI)) { 1483 if (CB->isBundleOperand(&U)) 1484 return false; 1485 if (!CB->isArgOperand(&U)) 1486 return true; 1487 unsigned ArgNo = CB->getArgOperandNo(&U); 1488 1489 const auto &NoFreeArg = A.getAAFor<AANoFree>( 1490 *this, IRPosition::callsite_argument(*CB, ArgNo)); 1491 return NoFreeArg.isAssumedNoFree(); 1492 } 1493 1494 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 1495 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 1496 Follow = true; 1497 return true; 1498 } 1499 if (isa<ReturnInst>(UserI)) 1500 return true; 1501 1502 // Unknown user. 1503 return false; 1504 }; 1505 if (!A.checkForAllUses(Pred, *this, AssociatedValue)) 1506 return indicatePessimisticFixpoint(); 1507 1508 return ChangeStatus::UNCHANGED; 1509 } 1510 }; 1511 1512 /// NoFree attribute for a call site argument. 1513 struct AANoFreeArgument final : AANoFreeFloating { 1514 AANoFreeArgument(const IRPosition &IRP, Attributor &A) 1515 : AANoFreeFloating(IRP, A) {} 1516 1517 /// See AbstractAttribute::trackStatistics() 1518 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } 1519 }; 1520 1521 /// NoFree attribute for call site arguments. 1522 struct AANoFreeCallSiteArgument final : AANoFreeFloating { 1523 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) 1524 : AANoFreeFloating(IRP, A) {} 1525 1526 /// See AbstractAttribute::updateImpl(...). 1527 ChangeStatus updateImpl(Attributor &A) override { 1528 // TODO: Once we have call site specific value information we can provide 1529 // call site specific liveness information and then it makes 1530 // sense to specialize attributes for call sites arguments instead of 1531 // redirecting requests to the callee argument. 1532 Argument *Arg = getAssociatedArgument(); 1533 if (!Arg) 1534 return indicatePessimisticFixpoint(); 1535 const IRPosition &ArgPos = IRPosition::argument(*Arg); 1536 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos); 1537 return clampStateAndIndicateChange( 1538 getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState())); 1539 } 1540 1541 /// See AbstractAttribute::trackStatistics() 1542 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)}; 1543 }; 1544 1545 /// NoFree attribute for function return value. 1546 struct AANoFreeReturned final : AANoFreeFloating { 1547 AANoFreeReturned(const IRPosition &IRP, Attributor &A) 1548 : AANoFreeFloating(IRP, A) { 1549 llvm_unreachable("NoFree is not applicable to function returns!"); 1550 } 1551 1552 /// See AbstractAttribute::initialize(...). 1553 void initialize(Attributor &A) override { 1554 llvm_unreachable("NoFree is not applicable to function returns!"); 1555 } 1556 1557 /// See AbstractAttribute::updateImpl(...). 1558 ChangeStatus updateImpl(Attributor &A) override { 1559 llvm_unreachable("NoFree is not applicable to function returns!"); 1560 } 1561 1562 /// See AbstractAttribute::trackStatistics() 1563 void trackStatistics() const override {} 1564 }; 1565 1566 /// NoFree attribute deduction for a call site return value. 1567 struct AANoFreeCallSiteReturned final : AANoFreeFloating { 1568 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) 1569 : AANoFreeFloating(IRP, A) {} 1570 1571 ChangeStatus manifest(Attributor &A) override { 1572 return ChangeStatus::UNCHANGED; 1573 } 1574 /// See AbstractAttribute::trackStatistics() 1575 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } 1576 }; 1577 1578 /// ------------------------ NonNull Argument Attribute ------------------------ 1579 static int64_t getKnownNonNullAndDerefBytesForUse( 1580 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, 1581 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { 1582 TrackUse = false; 1583 1584 const Value *UseV = U->get(); 1585 if (!UseV->getType()->isPointerTy()) 1586 return 0; 1587 1588 Type *PtrTy = UseV->getType(); 1589 const Function *F = I->getFunction(); 1590 bool NullPointerIsDefined = 1591 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; 1592 const DataLayout &DL = A.getInfoCache().getDL(); 1593 if (const auto *CB = dyn_cast<CallBase>(I)) { 1594 if (CB->isBundleOperand(U)) { 1595 if (RetainedKnowledge RK = getKnowledgeFromUse( 1596 U, {Attribute::NonNull, Attribute::Dereferenceable})) { 1597 IsNonNull |= 1598 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); 1599 return RK.ArgValue; 1600 } 1601 return 0; 1602 } 1603 1604 if (CB->isCallee(U)) { 1605 IsNonNull |= !NullPointerIsDefined; 1606 return 0; 1607 } 1608 1609 unsigned ArgNo = CB->getArgOperandNo(U); 1610 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 1611 // As long as we only use known information there is no need to track 1612 // dependences here. 1613 auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP, 1614 /* TrackDependence */ false); 1615 IsNonNull |= DerefAA.isKnownNonNull(); 1616 return DerefAA.getKnownDereferenceableBytes(); 1617 } 1618 1619 // We need to follow common pointer manipulation uses to the accesses they 1620 // feed into. We can try to be smart to avoid looking through things we do not 1621 // like for now, e.g., non-inbounds GEPs. 1622 if (isa<CastInst>(I)) { 1623 TrackUse = true; 1624 return 0; 1625 } 1626 1627 if (isa<GetElementPtrInst>(I)) { 1628 TrackUse = true; 1629 return 0; 1630 } 1631 1632 int64_t Offset; 1633 const Value *Base = 1634 getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL); 1635 if (Base) { 1636 if (Base == &AssociatedValue && 1637 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 1638 int64_t DerefBytes = 1639 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset; 1640 1641 IsNonNull |= !NullPointerIsDefined; 1642 return std::max(int64_t(0), DerefBytes); 1643 } 1644 } 1645 1646 /// Corner case when an offset is 0. 1647 Base = getBasePointerOfAccessPointerOperand(I, Offset, DL, 1648 /*AllowNonInbounds*/ true); 1649 if (Base) { 1650 if (Offset == 0 && Base == &AssociatedValue && 1651 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 1652 int64_t DerefBytes = 1653 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()); 1654 IsNonNull |= !NullPointerIsDefined; 1655 return std::max(int64_t(0), DerefBytes); 1656 } 1657 } 1658 1659 return 0; 1660 } 1661 1662 struct AANonNullImpl : AANonNull { 1663 AANonNullImpl(const IRPosition &IRP, Attributor &A) 1664 : AANonNull(IRP, A), 1665 NullIsDefined(NullPointerIsDefined( 1666 getAnchorScope(), 1667 getAssociatedValue().getType()->getPointerAddressSpace())) {} 1668 1669 /// See AbstractAttribute::initialize(...). 1670 void initialize(Attributor &A) override { 1671 Value &V = getAssociatedValue(); 1672 if (!NullIsDefined && 1673 hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, 1674 /* IgnoreSubsumingPositions */ false, &A)) 1675 indicateOptimisticFixpoint(); 1676 else if (isa<ConstantPointerNull>(V)) 1677 indicatePessimisticFixpoint(); 1678 else 1679 AANonNull::initialize(A); 1680 1681 bool CanBeNull = true; 1682 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull)) 1683 if (!CanBeNull) 1684 indicateOptimisticFixpoint(); 1685 1686 if (!getState().isAtFixpoint()) 1687 if (Instruction *CtxI = getCtxI()) 1688 followUsesInMBEC(*this, A, getState(), *CtxI); 1689 } 1690 1691 /// See followUsesInMBEC 1692 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 1693 AANonNull::StateType &State) { 1694 bool IsNonNull = false; 1695 bool TrackUse = false; 1696 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, 1697 IsNonNull, TrackUse); 1698 State.setKnown(IsNonNull); 1699 return TrackUse; 1700 } 1701 1702 /// See AbstractAttribute::getAsStr(). 1703 const std::string getAsStr() const override { 1704 return getAssumed() ? "nonnull" : "may-null"; 1705 } 1706 1707 /// Flag to determine if the underlying value can be null and still allow 1708 /// valid accesses. 1709 const bool NullIsDefined; 1710 }; 1711 1712 /// NonNull attribute for a floating value. 1713 struct AANonNullFloating : public AANonNullImpl { 1714 AANonNullFloating(const IRPosition &IRP, Attributor &A) 1715 : AANonNullImpl(IRP, A) {} 1716 1717 /// See AbstractAttribute::updateImpl(...). 1718 ChangeStatus updateImpl(Attributor &A) override { 1719 if (!NullIsDefined) { 1720 const auto &DerefAA = 1721 A.getAAFor<AADereferenceable>(*this, getIRPosition()); 1722 if (DerefAA.getAssumedDereferenceableBytes()) 1723 return ChangeStatus::UNCHANGED; 1724 } 1725 1726 const DataLayout &DL = A.getDataLayout(); 1727 1728 DominatorTree *DT = nullptr; 1729 AssumptionCache *AC = nullptr; 1730 InformationCache &InfoCache = A.getInfoCache(); 1731 if (const Function *Fn = getAnchorScope()) { 1732 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); 1733 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); 1734 } 1735 1736 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 1737 AANonNull::StateType &T, bool Stripped) -> bool { 1738 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V)); 1739 if (!Stripped && this == &AA) { 1740 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) 1741 T.indicatePessimisticFixpoint(); 1742 } else { 1743 // Use abstract attribute information. 1744 const AANonNull::StateType &NS = 1745 static_cast<const AANonNull::StateType &>(AA.getState()); 1746 T ^= NS; 1747 } 1748 return T.isValidState(); 1749 }; 1750 1751 StateType T; 1752 if (!genericValueTraversal<AANonNull, StateType>( 1753 A, getIRPosition(), *this, T, VisitValueCB, getCtxI())) 1754 return indicatePessimisticFixpoint(); 1755 1756 return clampStateAndIndicateChange(getState(), T); 1757 } 1758 1759 /// See AbstractAttribute::trackStatistics() 1760 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 1761 }; 1762 1763 /// NonNull attribute for function return value. 1764 struct AANonNullReturned final 1765 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> { 1766 AANonNullReturned(const IRPosition &IRP, Attributor &A) 1767 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP, A) {} 1768 1769 /// See AbstractAttribute::trackStatistics() 1770 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 1771 }; 1772 1773 /// NonNull attribute for function argument. 1774 struct AANonNullArgument final 1775 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { 1776 AANonNullArgument(const IRPosition &IRP, Attributor &A) 1777 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} 1778 1779 /// See AbstractAttribute::trackStatistics() 1780 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } 1781 }; 1782 1783 struct AANonNullCallSiteArgument final : AANonNullFloating { 1784 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) 1785 : AANonNullFloating(IRP, A) {} 1786 1787 /// See AbstractAttribute::trackStatistics() 1788 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } 1789 }; 1790 1791 /// NonNull attribute for a call site return position. 1792 struct AANonNullCallSiteReturned final 1793 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { 1794 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) 1795 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} 1796 1797 /// See AbstractAttribute::trackStatistics() 1798 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } 1799 }; 1800 1801 /// ------------------------ No-Recurse Attributes ---------------------------- 1802 1803 struct AANoRecurseImpl : public AANoRecurse { 1804 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} 1805 1806 /// See AbstractAttribute::getAsStr() 1807 const std::string getAsStr() const override { 1808 return getAssumed() ? "norecurse" : "may-recurse"; 1809 } 1810 }; 1811 1812 struct AANoRecurseFunction final : AANoRecurseImpl { 1813 AANoRecurseFunction(const IRPosition &IRP, Attributor &A) 1814 : AANoRecurseImpl(IRP, A) {} 1815 1816 /// See AbstractAttribute::initialize(...). 1817 void initialize(Attributor &A) override { 1818 AANoRecurseImpl::initialize(A); 1819 if (const Function *F = getAnchorScope()) 1820 if (A.getInfoCache().getSccSize(*F) != 1) 1821 indicatePessimisticFixpoint(); 1822 } 1823 1824 /// See AbstractAttribute::updateImpl(...). 1825 ChangeStatus updateImpl(Attributor &A) override { 1826 1827 // If all live call sites are known to be no-recurse, we are as well. 1828 auto CallSitePred = [&](AbstractCallSite ACS) { 1829 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1830 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 1831 /* TrackDependence */ false, DepClassTy::OPTIONAL); 1832 return NoRecurseAA.isKnownNoRecurse(); 1833 }; 1834 bool AllCallSitesKnown; 1835 if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) { 1836 // If we know all call sites and all are known no-recurse, we are done. 1837 // If all known call sites, which might not be all that exist, are known 1838 // to be no-recurse, we are not done but we can continue to assume 1839 // no-recurse. If one of the call sites we have not visited will become 1840 // live, another update is triggered. 1841 if (AllCallSitesKnown) 1842 indicateOptimisticFixpoint(); 1843 return ChangeStatus::UNCHANGED; 1844 } 1845 1846 // If the above check does not hold anymore we look at the calls. 1847 auto CheckForNoRecurse = [&](Instruction &I) { 1848 const auto &CB = cast<CallBase>(I); 1849 if (CB.hasFnAttr(Attribute::NoRecurse)) 1850 return true; 1851 1852 const auto &NoRecurseAA = 1853 A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB)); 1854 if (!NoRecurseAA.isAssumedNoRecurse()) 1855 return false; 1856 1857 // Recursion to the same function 1858 if (CB.getCalledFunction() == getAnchorScope()) 1859 return false; 1860 1861 return true; 1862 }; 1863 1864 if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this)) 1865 return indicatePessimisticFixpoint(); 1866 return ChangeStatus::UNCHANGED; 1867 } 1868 1869 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } 1870 }; 1871 1872 /// NoRecurse attribute deduction for a call sites. 1873 struct AANoRecurseCallSite final : AANoRecurseImpl { 1874 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) 1875 : AANoRecurseImpl(IRP, A) {} 1876 1877 /// See AbstractAttribute::initialize(...). 1878 void initialize(Attributor &A) override { 1879 AANoRecurseImpl::initialize(A); 1880 Function *F = getAssociatedFunction(); 1881 if (!F) 1882 indicatePessimisticFixpoint(); 1883 } 1884 1885 /// See AbstractAttribute::updateImpl(...). 1886 ChangeStatus updateImpl(Attributor &A) override { 1887 // TODO: Once we have call site specific value information we can provide 1888 // call site specific liveness information and then it makes 1889 // sense to specialize attributes for call sites arguments instead of 1890 // redirecting requests to the callee argument. 1891 Function *F = getAssociatedFunction(); 1892 const IRPosition &FnPos = IRPosition::function(*F); 1893 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos); 1894 return clampStateAndIndicateChange( 1895 getState(), 1896 static_cast<const AANoRecurse::StateType &>(FnAA.getState())); 1897 } 1898 1899 /// See AbstractAttribute::trackStatistics() 1900 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } 1901 }; 1902 1903 /// -------------------- Undefined-Behavior Attributes ------------------------ 1904 1905 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { 1906 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) 1907 : AAUndefinedBehavior(IRP, A) {} 1908 1909 /// See AbstractAttribute::updateImpl(...). 1910 // through a pointer (i.e. also branches etc.) 1911 ChangeStatus updateImpl(Attributor &A) override { 1912 const size_t UBPrevSize = KnownUBInsts.size(); 1913 const size_t NoUBPrevSize = AssumedNoUBInsts.size(); 1914 1915 auto InspectMemAccessInstForUB = [&](Instruction &I) { 1916 // Skip instructions that are already saved. 1917 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 1918 return true; 1919 1920 // If we reach here, we know we have an instruction 1921 // that accesses memory through a pointer operand, 1922 // for which getPointerOperand() should give it to us. 1923 const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true); 1924 assert(PtrOp && 1925 "Expected pointer operand of memory accessing instruction"); 1926 1927 // Either we stopped and the appropriate action was taken, 1928 // or we got back a simplified value to continue. 1929 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); 1930 if (!SimplifiedPtrOp.hasValue()) 1931 return true; 1932 const Value *PtrOpVal = SimplifiedPtrOp.getValue(); 1933 1934 // A memory access through a pointer is considered UB 1935 // only if the pointer has constant null value. 1936 // TODO: Expand it to not only check constant values. 1937 if (!isa<ConstantPointerNull>(PtrOpVal)) { 1938 AssumedNoUBInsts.insert(&I); 1939 return true; 1940 } 1941 const Type *PtrTy = PtrOpVal->getType(); 1942 1943 // Because we only consider instructions inside functions, 1944 // assume that a parent function exists. 1945 const Function *F = I.getFunction(); 1946 1947 // A memory access using constant null pointer is only considered UB 1948 // if null pointer is _not_ defined for the target platform. 1949 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) 1950 AssumedNoUBInsts.insert(&I); 1951 else 1952 KnownUBInsts.insert(&I); 1953 return true; 1954 }; 1955 1956 auto InspectBrInstForUB = [&](Instruction &I) { 1957 // A conditional branch instruction is considered UB if it has `undef` 1958 // condition. 1959 1960 // Skip instructions that are already saved. 1961 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 1962 return true; 1963 1964 // We know we have a branch instruction. 1965 auto BrInst = cast<BranchInst>(&I); 1966 1967 // Unconditional branches are never considered UB. 1968 if (BrInst->isUnconditional()) 1969 return true; 1970 1971 // Either we stopped and the appropriate action was taken, 1972 // or we got back a simplified value to continue. 1973 Optional<Value *> SimplifiedCond = 1974 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); 1975 if (!SimplifiedCond.hasValue()) 1976 return true; 1977 AssumedNoUBInsts.insert(&I); 1978 return true; 1979 }; 1980 1981 A.checkForAllInstructions(InspectMemAccessInstForUB, *this, 1982 {Instruction::Load, Instruction::Store, 1983 Instruction::AtomicCmpXchg, 1984 Instruction::AtomicRMW}, 1985 /* CheckBBLivenessOnly */ true); 1986 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, 1987 /* CheckBBLivenessOnly */ true); 1988 if (NoUBPrevSize != AssumedNoUBInsts.size() || 1989 UBPrevSize != KnownUBInsts.size()) 1990 return ChangeStatus::CHANGED; 1991 return ChangeStatus::UNCHANGED; 1992 } 1993 1994 bool isKnownToCauseUB(Instruction *I) const override { 1995 return KnownUBInsts.count(I); 1996 } 1997 1998 bool isAssumedToCauseUB(Instruction *I) const override { 1999 // In simple words, if an instruction is not in the assumed to _not_ 2000 // cause UB, then it is assumed UB (that includes those 2001 // in the KnownUBInsts set). The rest is boilerplate 2002 // is to ensure that it is one of the instructions we test 2003 // for UB. 2004 2005 switch (I->getOpcode()) { 2006 case Instruction::Load: 2007 case Instruction::Store: 2008 case Instruction::AtomicCmpXchg: 2009 case Instruction::AtomicRMW: 2010 return !AssumedNoUBInsts.count(I); 2011 case Instruction::Br: { 2012 auto BrInst = cast<BranchInst>(I); 2013 if (BrInst->isUnconditional()) 2014 return false; 2015 return !AssumedNoUBInsts.count(I); 2016 } break; 2017 default: 2018 return false; 2019 } 2020 return false; 2021 } 2022 2023 ChangeStatus manifest(Attributor &A) override { 2024 if (KnownUBInsts.empty()) 2025 return ChangeStatus::UNCHANGED; 2026 for (Instruction *I : KnownUBInsts) 2027 A.changeToUnreachableAfterManifest(I); 2028 return ChangeStatus::CHANGED; 2029 } 2030 2031 /// See AbstractAttribute::getAsStr() 2032 const std::string getAsStr() const override { 2033 return getAssumed() ? "undefined-behavior" : "no-ub"; 2034 } 2035 2036 /// Note: The correctness of this analysis depends on the fact that the 2037 /// following 2 sets will stop changing after some point. 2038 /// "Change" here means that their size changes. 2039 /// The size of each set is monotonically increasing 2040 /// (we only add items to them) and it is upper bounded by the number of 2041 /// instructions in the processed function (we can never save more 2042 /// elements in either set than this number). Hence, at some point, 2043 /// they will stop increasing. 2044 /// Consequently, at some point, both sets will have stopped 2045 /// changing, effectively making the analysis reach a fixpoint. 2046 2047 /// Note: These 2 sets are disjoint and an instruction can be considered 2048 /// one of 3 things: 2049 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in 2050 /// the KnownUBInsts set. 2051 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior 2052 /// has a reason to assume it). 2053 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior 2054 /// could not find a reason to assume or prove that it can cause UB, 2055 /// hence it assumes it doesn't. We have a set for these instructions 2056 /// so that we don't reprocess them in every update. 2057 /// Note however that instructions in this set may cause UB. 2058 2059 protected: 2060 /// A set of all live instructions _known_ to cause UB. 2061 SmallPtrSet<Instruction *, 8> KnownUBInsts; 2062 2063 private: 2064 /// A set of all the (live) instructions that are assumed to _not_ cause UB. 2065 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; 2066 2067 // Should be called on updates in which if we're processing an instruction 2068 // \p I that depends on a value \p V, one of the following has to happen: 2069 // - If the value is assumed, then stop. 2070 // - If the value is known but undef, then consider it UB. 2071 // - Otherwise, do specific processing with the simplified value. 2072 // We return None in the first 2 cases to signify that an appropriate 2073 // action was taken and the caller should stop. 2074 // Otherwise, we return the simplified value that the caller should 2075 // use for specific processing. 2076 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V, 2077 Instruction *I) { 2078 const auto &ValueSimplifyAA = 2079 A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V)); 2080 Optional<Value *> SimplifiedV = 2081 ValueSimplifyAA.getAssumedSimplifiedValue(A); 2082 if (!ValueSimplifyAA.isKnown()) { 2083 // Don't depend on assumed values. 2084 return llvm::None; 2085 } 2086 if (!SimplifiedV.hasValue()) { 2087 // If it is known (which we tested above) but it doesn't have a value, 2088 // then we can assume `undef` and hence the instruction is UB. 2089 KnownUBInsts.insert(I); 2090 return llvm::None; 2091 } 2092 Value *Val = SimplifiedV.getValue(); 2093 if (isa<UndefValue>(Val)) { 2094 KnownUBInsts.insert(I); 2095 return llvm::None; 2096 } 2097 return Val; 2098 } 2099 }; 2100 2101 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { 2102 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) 2103 : AAUndefinedBehaviorImpl(IRP, A) {} 2104 2105 /// See AbstractAttribute::trackStatistics() 2106 void trackStatistics() const override { 2107 STATS_DECL(UndefinedBehaviorInstruction, Instruction, 2108 "Number of instructions known to have UB"); 2109 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += 2110 KnownUBInsts.size(); 2111 } 2112 }; 2113 2114 /// ------------------------ Will-Return Attributes ---------------------------- 2115 2116 // Helper function that checks whether a function has any cycle which we don't 2117 // know if it is bounded or not. 2118 // Loops with maximum trip count are considered bounded, any other cycle not. 2119 static bool mayContainUnboundedCycle(Function &F, Attributor &A) { 2120 ScalarEvolution *SE = 2121 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); 2122 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); 2123 // If either SCEV or LoopInfo is not available for the function then we assume 2124 // any cycle to be unbounded cycle. 2125 // We use scc_iterator which uses Tarjan algorithm to find all the maximal 2126 // SCCs.To detect if there's a cycle, we only need to find the maximal ones. 2127 if (!SE || !LI) { 2128 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) 2129 if (SCCI.hasCycle()) 2130 return true; 2131 return false; 2132 } 2133 2134 // If there's irreducible control, the function may contain non-loop cycles. 2135 if (mayContainIrreducibleControl(F, LI)) 2136 return true; 2137 2138 // Any loop that does not have a max trip count is considered unbounded cycle. 2139 for (auto *L : LI->getLoopsInPreorder()) { 2140 if (!SE->getSmallConstantMaxTripCount(L)) 2141 return true; 2142 } 2143 return false; 2144 } 2145 2146 struct AAWillReturnImpl : public AAWillReturn { 2147 AAWillReturnImpl(const IRPosition &IRP, Attributor &A) 2148 : AAWillReturn(IRP, A) {} 2149 2150 /// See AbstractAttribute::initialize(...). 2151 void initialize(Attributor &A) override { 2152 AAWillReturn::initialize(A); 2153 2154 Function *F = getAnchorScope(); 2155 if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A)) 2156 indicatePessimisticFixpoint(); 2157 } 2158 2159 /// See AbstractAttribute::updateImpl(...). 2160 ChangeStatus updateImpl(Attributor &A) override { 2161 auto CheckForWillReturn = [&](Instruction &I) { 2162 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); 2163 const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos); 2164 if (WillReturnAA.isKnownWillReturn()) 2165 return true; 2166 if (!WillReturnAA.isAssumedWillReturn()) 2167 return false; 2168 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos); 2169 return NoRecurseAA.isAssumedNoRecurse(); 2170 }; 2171 2172 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this)) 2173 return indicatePessimisticFixpoint(); 2174 2175 return ChangeStatus::UNCHANGED; 2176 } 2177 2178 /// See AbstractAttribute::getAsStr() 2179 const std::string getAsStr() const override { 2180 return getAssumed() ? "willreturn" : "may-noreturn"; 2181 } 2182 }; 2183 2184 struct AAWillReturnFunction final : AAWillReturnImpl { 2185 AAWillReturnFunction(const IRPosition &IRP, Attributor &A) 2186 : AAWillReturnImpl(IRP, A) {} 2187 2188 /// See AbstractAttribute::trackStatistics() 2189 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } 2190 }; 2191 2192 /// WillReturn attribute deduction for a call sites. 2193 struct AAWillReturnCallSite final : AAWillReturnImpl { 2194 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) 2195 : AAWillReturnImpl(IRP, A) {} 2196 2197 /// See AbstractAttribute::initialize(...). 2198 void initialize(Attributor &A) override { 2199 AAWillReturnImpl::initialize(A); 2200 Function *F = getAssociatedFunction(); 2201 if (!F) 2202 indicatePessimisticFixpoint(); 2203 } 2204 2205 /// See AbstractAttribute::updateImpl(...). 2206 ChangeStatus updateImpl(Attributor &A) override { 2207 // TODO: Once we have call site specific value information we can provide 2208 // call site specific liveness information and then it makes 2209 // sense to specialize attributes for call sites arguments instead of 2210 // redirecting requests to the callee argument. 2211 Function *F = getAssociatedFunction(); 2212 const IRPosition &FnPos = IRPosition::function(*F); 2213 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos); 2214 return clampStateAndIndicateChange( 2215 getState(), 2216 static_cast<const AAWillReturn::StateType &>(FnAA.getState())); 2217 } 2218 2219 /// See AbstractAttribute::trackStatistics() 2220 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } 2221 }; 2222 2223 /// -------------------AAReachability Attribute-------------------------- 2224 2225 struct AAReachabilityImpl : AAReachability { 2226 AAReachabilityImpl(const IRPosition &IRP, Attributor &A) 2227 : AAReachability(IRP, A) {} 2228 2229 const std::string getAsStr() const override { 2230 // TODO: Return the number of reachable queries. 2231 return "reachable"; 2232 } 2233 2234 /// See AbstractAttribute::initialize(...). 2235 void initialize(Attributor &A) override { indicatePessimisticFixpoint(); } 2236 2237 /// See AbstractAttribute::updateImpl(...). 2238 ChangeStatus updateImpl(Attributor &A) override { 2239 return indicatePessimisticFixpoint(); 2240 } 2241 }; 2242 2243 struct AAReachabilityFunction final : public AAReachabilityImpl { 2244 AAReachabilityFunction(const IRPosition &IRP, Attributor &A) 2245 : AAReachabilityImpl(IRP, A) {} 2246 2247 /// See AbstractAttribute::trackStatistics() 2248 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); } 2249 }; 2250 2251 /// ------------------------ NoAlias Argument Attribute ------------------------ 2252 2253 struct AANoAliasImpl : AANoAlias { 2254 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { 2255 assert(getAssociatedType()->isPointerTy() && 2256 "Noalias is a pointer attribute"); 2257 } 2258 2259 const std::string getAsStr() const override { 2260 return getAssumed() ? "noalias" : "may-alias"; 2261 } 2262 }; 2263 2264 /// NoAlias attribute for a floating value. 2265 struct AANoAliasFloating final : AANoAliasImpl { 2266 AANoAliasFloating(const IRPosition &IRP, Attributor &A) 2267 : AANoAliasImpl(IRP, A) {} 2268 2269 /// See AbstractAttribute::initialize(...). 2270 void initialize(Attributor &A) override { 2271 AANoAliasImpl::initialize(A); 2272 Value *Val = &getAssociatedValue(); 2273 do { 2274 CastInst *CI = dyn_cast<CastInst>(Val); 2275 if (!CI) 2276 break; 2277 Value *Base = CI->getOperand(0); 2278 if (!Base->hasOneUse()) 2279 break; 2280 Val = Base; 2281 } while (true); 2282 2283 if (!Val->getType()->isPointerTy()) { 2284 indicatePessimisticFixpoint(); 2285 return; 2286 } 2287 2288 if (isa<AllocaInst>(Val)) 2289 indicateOptimisticFixpoint(); 2290 else if (isa<ConstantPointerNull>(Val) && 2291 !NullPointerIsDefined(getAnchorScope(), 2292 Val->getType()->getPointerAddressSpace())) 2293 indicateOptimisticFixpoint(); 2294 else if (Val != &getAssociatedValue()) { 2295 const auto &ValNoAliasAA = 2296 A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val)); 2297 if (ValNoAliasAA.isKnownNoAlias()) 2298 indicateOptimisticFixpoint(); 2299 } 2300 } 2301 2302 /// See AbstractAttribute::updateImpl(...). 2303 ChangeStatus updateImpl(Attributor &A) override { 2304 // TODO: Implement this. 2305 return indicatePessimisticFixpoint(); 2306 } 2307 2308 /// See AbstractAttribute::trackStatistics() 2309 void trackStatistics() const override { 2310 STATS_DECLTRACK_FLOATING_ATTR(noalias) 2311 } 2312 }; 2313 2314 /// NoAlias attribute for an argument. 2315 struct AANoAliasArgument final 2316 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { 2317 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; 2318 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 2319 2320 /// See AbstractAttribute::initialize(...). 2321 void initialize(Attributor &A) override { 2322 Base::initialize(A); 2323 // See callsite argument attribute and callee argument attribute. 2324 if (hasAttr({Attribute::ByVal})) 2325 indicateOptimisticFixpoint(); 2326 } 2327 2328 /// See AbstractAttribute::update(...). 2329 ChangeStatus updateImpl(Attributor &A) override { 2330 // We have to make sure no-alias on the argument does not break 2331 // synchronization when this is a callback argument, see also [1] below. 2332 // If synchronization cannot be affected, we delegate to the base updateImpl 2333 // function, otherwise we give up for now. 2334 2335 // If the function is no-sync, no-alias cannot break synchronization. 2336 const auto &NoSyncAA = A.getAAFor<AANoSync>( 2337 *this, IRPosition::function_scope(getIRPosition())); 2338 if (NoSyncAA.isAssumedNoSync()) 2339 return Base::updateImpl(A); 2340 2341 // If the argument is read-only, no-alias cannot break synchronization. 2342 const auto &MemBehaviorAA = 2343 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition()); 2344 if (MemBehaviorAA.isAssumedReadOnly()) 2345 return Base::updateImpl(A); 2346 2347 // If the argument is never passed through callbacks, no-alias cannot break 2348 // synchronization. 2349 bool AllCallSitesKnown; 2350 if (A.checkForAllCallSites( 2351 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, 2352 true, AllCallSitesKnown)) 2353 return Base::updateImpl(A); 2354 2355 // TODO: add no-alias but make sure it doesn't break synchronization by 2356 // introducing fake uses. See: 2357 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, 2358 // International Workshop on OpenMP 2018, 2359 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf 2360 2361 return indicatePessimisticFixpoint(); 2362 } 2363 2364 /// See AbstractAttribute::trackStatistics() 2365 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } 2366 }; 2367 2368 struct AANoAliasCallSiteArgument final : AANoAliasImpl { 2369 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) 2370 : AANoAliasImpl(IRP, A) {} 2371 2372 /// See AbstractAttribute::initialize(...). 2373 void initialize(Attributor &A) override { 2374 // See callsite argument attribute and callee argument attribute. 2375 const auto &CB = cast<CallBase>(getAnchorValue()); 2376 if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias)) 2377 indicateOptimisticFixpoint(); 2378 Value &Val = getAssociatedValue(); 2379 if (isa<ConstantPointerNull>(Val) && 2380 !NullPointerIsDefined(getAnchorScope(), 2381 Val.getType()->getPointerAddressSpace())) 2382 indicateOptimisticFixpoint(); 2383 } 2384 2385 /// Determine if the underlying value may alias with the call site argument 2386 /// \p OtherArgNo of \p ICS (= the underlying call site). 2387 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, 2388 const AAMemoryBehavior &MemBehaviorAA, 2389 const CallBase &CB, unsigned OtherArgNo) { 2390 // We do not need to worry about aliasing with the underlying IRP. 2391 if (this->getArgNo() == (int)OtherArgNo) 2392 return false; 2393 2394 // If it is not a pointer or pointer vector we do not alias. 2395 const Value *ArgOp = CB.getArgOperand(OtherArgNo); 2396 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 2397 return false; 2398 2399 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 2400 *this, IRPosition::callsite_argument(CB, OtherArgNo), 2401 /* TrackDependence */ false); 2402 2403 // If the argument is readnone, there is no read-write aliasing. 2404 if (CBArgMemBehaviorAA.isAssumedReadNone()) { 2405 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 2406 return false; 2407 } 2408 2409 // If the argument is readonly and the underlying value is readonly, there 2410 // is no read-write aliasing. 2411 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); 2412 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { 2413 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2414 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 2415 return false; 2416 } 2417 2418 // We have to utilize actual alias analysis queries so we need the object. 2419 if (!AAR) 2420 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); 2421 2422 // Try to rule it out at the call site. 2423 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); 2424 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " 2425 "callsite arguments: " 2426 << getAssociatedValue() << " " << *ArgOp << " => " 2427 << (IsAliasing ? "" : "no-") << "alias \n"); 2428 2429 return IsAliasing; 2430 } 2431 2432 bool 2433 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, 2434 const AAMemoryBehavior &MemBehaviorAA, 2435 const AANoAlias &NoAliasAA) { 2436 // We can deduce "noalias" if the following conditions hold. 2437 // (i) Associated value is assumed to be noalias in the definition. 2438 // (ii) Associated value is assumed to be no-capture in all the uses 2439 // possibly executed before this callsite. 2440 // (iii) There is no other pointer argument which could alias with the 2441 // value. 2442 2443 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); 2444 if (!AssociatedValueIsNoAliasAtDef) { 2445 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() 2446 << " is not no-alias at the definition\n"); 2447 return false; 2448 } 2449 2450 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); 2451 2452 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 2453 auto &NoCaptureAA = 2454 A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false); 2455 // Check whether the value is captured in the scope using AANoCapture. 2456 // Look at CFG and check only uses possibly executed before this 2457 // callsite. 2458 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 2459 Instruction *UserI = cast<Instruction>(U.getUser()); 2460 2461 // If user if curr instr and only use. 2462 if (UserI == getCtxI() && UserI->hasOneUse()) 2463 return true; 2464 2465 const Function *ScopeFn = VIRP.getAnchorScope(); 2466 if (ScopeFn) { 2467 const auto &ReachabilityAA = 2468 A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn)); 2469 2470 if (!ReachabilityAA.isAssumedReachable(UserI, getCtxI())) 2471 return true; 2472 2473 if (auto *CB = dyn_cast<CallBase>(UserI)) { 2474 if (CB->isArgOperand(&U)) { 2475 2476 unsigned ArgNo = CB->getArgOperandNo(&U); 2477 2478 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 2479 *this, IRPosition::callsite_argument(*CB, ArgNo)); 2480 2481 if (NoCaptureAA.isAssumedNoCapture()) 2482 return true; 2483 } 2484 } 2485 } 2486 2487 // For cases which can potentially have more users 2488 if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) || 2489 isa<SelectInst>(U)) { 2490 Follow = true; 2491 return true; 2492 } 2493 2494 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n"); 2495 return false; 2496 }; 2497 2498 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 2499 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { 2500 LLVM_DEBUG( 2501 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() 2502 << " cannot be noalias as it is potentially captured\n"); 2503 return false; 2504 } 2505 } 2506 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); 2507 2508 // Check there is no other pointer argument which could alias with the 2509 // value passed at this call site. 2510 // TODO: AbstractCallSite 2511 const auto &CB = cast<CallBase>(getAnchorValue()); 2512 for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands(); 2513 OtherArgNo++) 2514 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) 2515 return false; 2516 2517 return true; 2518 } 2519 2520 /// See AbstractAttribute::updateImpl(...). 2521 ChangeStatus updateImpl(Attributor &A) override { 2522 // If the argument is readnone we are done as there are no accesses via the 2523 // argument. 2524 auto &MemBehaviorAA = 2525 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), 2526 /* TrackDependence */ false); 2527 if (MemBehaviorAA.isAssumedReadNone()) { 2528 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2529 return ChangeStatus::UNCHANGED; 2530 } 2531 2532 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 2533 const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP, 2534 /* TrackDependence */ false); 2535 2536 AAResults *AAR = nullptr; 2537 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, 2538 NoAliasAA)) { 2539 LLVM_DEBUG( 2540 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"); 2541 return ChangeStatus::UNCHANGED; 2542 } 2543 2544 return indicatePessimisticFixpoint(); 2545 } 2546 2547 /// See AbstractAttribute::trackStatistics() 2548 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } 2549 }; 2550 2551 /// NoAlias attribute for function return value. 2552 struct AANoAliasReturned final : AANoAliasImpl { 2553 AANoAliasReturned(const IRPosition &IRP, Attributor &A) 2554 : AANoAliasImpl(IRP, A) {} 2555 2556 /// See AbstractAttribute::updateImpl(...). 2557 virtual ChangeStatus updateImpl(Attributor &A) override { 2558 2559 auto CheckReturnValue = [&](Value &RV) -> bool { 2560 if (Constant *C = dyn_cast<Constant>(&RV)) 2561 if (C->isNullValue() || isa<UndefValue>(C)) 2562 return true; 2563 2564 /// For now, we can only deduce noalias if we have call sites. 2565 /// FIXME: add more support. 2566 if (!isa<CallBase>(&RV)) 2567 return false; 2568 2569 const IRPosition &RVPos = IRPosition::value(RV); 2570 const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos); 2571 if (!NoAliasAA.isAssumedNoAlias()) 2572 return false; 2573 2574 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos); 2575 return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); 2576 }; 2577 2578 if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) 2579 return indicatePessimisticFixpoint(); 2580 2581 return ChangeStatus::UNCHANGED; 2582 } 2583 2584 /// See AbstractAttribute::trackStatistics() 2585 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } 2586 }; 2587 2588 /// NoAlias attribute deduction for a call site return value. 2589 struct AANoAliasCallSiteReturned final : AANoAliasImpl { 2590 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) 2591 : AANoAliasImpl(IRP, A) {} 2592 2593 /// See AbstractAttribute::initialize(...). 2594 void initialize(Attributor &A) override { 2595 AANoAliasImpl::initialize(A); 2596 Function *F = getAssociatedFunction(); 2597 if (!F) 2598 indicatePessimisticFixpoint(); 2599 } 2600 2601 /// See AbstractAttribute::updateImpl(...). 2602 ChangeStatus updateImpl(Attributor &A) override { 2603 // TODO: Once we have call site specific value information we can provide 2604 // call site specific liveness information and then it makes 2605 // sense to specialize attributes for call sites arguments instead of 2606 // redirecting requests to the callee argument. 2607 Function *F = getAssociatedFunction(); 2608 const IRPosition &FnPos = IRPosition::returned(*F); 2609 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos); 2610 return clampStateAndIndicateChange( 2611 getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState())); 2612 } 2613 2614 /// See AbstractAttribute::trackStatistics() 2615 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } 2616 }; 2617 2618 /// -------------------AAIsDead Function Attribute----------------------- 2619 2620 struct AAIsDeadValueImpl : public AAIsDead { 2621 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 2622 2623 /// See AAIsDead::isAssumedDead(). 2624 bool isAssumedDead() const override { return getAssumed(); } 2625 2626 /// See AAIsDead::isKnownDead(). 2627 bool isKnownDead() const override { return getKnown(); } 2628 2629 /// See AAIsDead::isAssumedDead(BasicBlock *). 2630 bool isAssumedDead(const BasicBlock *BB) const override { return false; } 2631 2632 /// See AAIsDead::isKnownDead(BasicBlock *). 2633 bool isKnownDead(const BasicBlock *BB) const override { return false; } 2634 2635 /// See AAIsDead::isAssumedDead(Instruction *I). 2636 bool isAssumedDead(const Instruction *I) const override { 2637 return I == getCtxI() && isAssumedDead(); 2638 } 2639 2640 /// See AAIsDead::isKnownDead(Instruction *I). 2641 bool isKnownDead(const Instruction *I) const override { 2642 return isAssumedDead(I) && getKnown(); 2643 } 2644 2645 /// See AbstractAttribute::getAsStr(). 2646 const std::string getAsStr() const override { 2647 return isAssumedDead() ? "assumed-dead" : "assumed-live"; 2648 } 2649 2650 /// Check if all uses are assumed dead. 2651 bool areAllUsesAssumedDead(Attributor &A, Value &V) { 2652 auto UsePred = [&](const Use &U, bool &Follow) { return false; }; 2653 // Explicitly set the dependence class to required because we want a long 2654 // chain of N dependent instructions to be considered live as soon as one is 2655 // without going through N update cycles. This is not required for 2656 // correctness. 2657 return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED); 2658 } 2659 2660 /// Determine if \p I is assumed to be side-effect free. 2661 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { 2662 if (!I || wouldInstructionBeTriviallyDead(I)) 2663 return true; 2664 2665 auto *CB = dyn_cast<CallBase>(I); 2666 if (!CB || isa<IntrinsicInst>(CB)) 2667 return false; 2668 2669 const IRPosition &CallIRP = IRPosition::callsite_function(*CB); 2670 const auto &NoUnwindAA = A.getAndUpdateAAFor<AANoUnwind>( 2671 *this, CallIRP, /* TrackDependence */ false); 2672 if (!NoUnwindAA.isAssumedNoUnwind()) 2673 return false; 2674 if (!NoUnwindAA.isKnownNoUnwind()) 2675 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); 2676 2677 const auto &MemBehaviorAA = A.getAndUpdateAAFor<AAMemoryBehavior>( 2678 *this, CallIRP, /* TrackDependence */ false); 2679 if (MemBehaviorAA.isAssumedReadOnly()) { 2680 if (!MemBehaviorAA.isKnownReadOnly()) 2681 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2682 return true; 2683 } 2684 return false; 2685 } 2686 }; 2687 2688 struct AAIsDeadFloating : public AAIsDeadValueImpl { 2689 AAIsDeadFloating(const IRPosition &IRP, Attributor &A) 2690 : AAIsDeadValueImpl(IRP, A) {} 2691 2692 /// See AbstractAttribute::initialize(...). 2693 void initialize(Attributor &A) override { 2694 if (isa<UndefValue>(getAssociatedValue())) { 2695 indicatePessimisticFixpoint(); 2696 return; 2697 } 2698 2699 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 2700 if (!isAssumedSideEffectFree(A, I)) 2701 indicatePessimisticFixpoint(); 2702 } 2703 2704 /// See AbstractAttribute::updateImpl(...). 2705 ChangeStatus updateImpl(Attributor &A) override { 2706 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 2707 if (!isAssumedSideEffectFree(A, I)) 2708 return indicatePessimisticFixpoint(); 2709 2710 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 2711 return indicatePessimisticFixpoint(); 2712 return ChangeStatus::UNCHANGED; 2713 } 2714 2715 /// See AbstractAttribute::manifest(...). 2716 ChangeStatus manifest(Attributor &A) override { 2717 Value &V = getAssociatedValue(); 2718 if (auto *I = dyn_cast<Instruction>(&V)) { 2719 // If we get here we basically know the users are all dead. We check if 2720 // isAssumedSideEffectFree returns true here again because it might not be 2721 // the case and only the users are dead but the instruction (=call) is 2722 // still needed. 2723 if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) { 2724 A.deleteAfterManifest(*I); 2725 return ChangeStatus::CHANGED; 2726 } 2727 } 2728 if (V.use_empty()) 2729 return ChangeStatus::UNCHANGED; 2730 2731 bool UsedAssumedInformation = false; 2732 Optional<Constant *> C = 2733 A.getAssumedConstant(V, *this, UsedAssumedInformation); 2734 if (C.hasValue() && C.getValue()) 2735 return ChangeStatus::UNCHANGED; 2736 2737 // Replace the value with undef as it is dead but keep droppable uses around 2738 // as they provide information we don't want to give up on just yet. 2739 UndefValue &UV = *UndefValue::get(V.getType()); 2740 bool AnyChange = 2741 A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false); 2742 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 2743 } 2744 2745 /// See AbstractAttribute::trackStatistics() 2746 void trackStatistics() const override { 2747 STATS_DECLTRACK_FLOATING_ATTR(IsDead) 2748 } 2749 }; 2750 2751 struct AAIsDeadArgument : public AAIsDeadFloating { 2752 AAIsDeadArgument(const IRPosition &IRP, Attributor &A) 2753 : AAIsDeadFloating(IRP, A) {} 2754 2755 /// See AbstractAttribute::initialize(...). 2756 void initialize(Attributor &A) override { 2757 if (!A.isFunctionIPOAmendable(*getAnchorScope())) 2758 indicatePessimisticFixpoint(); 2759 } 2760 2761 /// See AbstractAttribute::manifest(...). 2762 ChangeStatus manifest(Attributor &A) override { 2763 ChangeStatus Changed = AAIsDeadFloating::manifest(A); 2764 Argument &Arg = *getAssociatedArgument(); 2765 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) 2766 if (A.registerFunctionSignatureRewrite( 2767 Arg, /* ReplacementTypes */ {}, 2768 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, 2769 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { 2770 Arg.dropDroppableUses(); 2771 return ChangeStatus::CHANGED; 2772 } 2773 return Changed; 2774 } 2775 2776 /// See AbstractAttribute::trackStatistics() 2777 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) } 2778 }; 2779 2780 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { 2781 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) 2782 : AAIsDeadValueImpl(IRP, A) {} 2783 2784 /// See AbstractAttribute::initialize(...). 2785 void initialize(Attributor &A) override { 2786 if (isa<UndefValue>(getAssociatedValue())) 2787 indicatePessimisticFixpoint(); 2788 } 2789 2790 /// See AbstractAttribute::updateImpl(...). 2791 ChangeStatus updateImpl(Attributor &A) override { 2792 // TODO: Once we have call site specific value information we can provide 2793 // call site specific liveness information and then it makes 2794 // sense to specialize attributes for call sites arguments instead of 2795 // redirecting requests to the callee argument. 2796 Argument *Arg = getAssociatedArgument(); 2797 if (!Arg) 2798 return indicatePessimisticFixpoint(); 2799 const IRPosition &ArgPos = IRPosition::argument(*Arg); 2800 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos); 2801 return clampStateAndIndicateChange( 2802 getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState())); 2803 } 2804 2805 /// See AbstractAttribute::manifest(...). 2806 ChangeStatus manifest(Attributor &A) override { 2807 CallBase &CB = cast<CallBase>(getAnchorValue()); 2808 Use &U = CB.getArgOperandUse(getArgNo()); 2809 assert(!isa<UndefValue>(U.get()) && 2810 "Expected undef values to be filtered out!"); 2811 UndefValue &UV = *UndefValue::get(U->getType()); 2812 if (A.changeUseAfterManifest(U, UV)) 2813 return ChangeStatus::CHANGED; 2814 return ChangeStatus::UNCHANGED; 2815 } 2816 2817 /// See AbstractAttribute::trackStatistics() 2818 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) } 2819 }; 2820 2821 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { 2822 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) 2823 : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {} 2824 2825 /// See AAIsDead::isAssumedDead(). 2826 bool isAssumedDead() const override { 2827 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; 2828 } 2829 2830 /// See AbstractAttribute::initialize(...). 2831 void initialize(Attributor &A) override { 2832 if (isa<UndefValue>(getAssociatedValue())) { 2833 indicatePessimisticFixpoint(); 2834 return; 2835 } 2836 2837 // We track this separately as a secondary state. 2838 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); 2839 } 2840 2841 /// See AbstractAttribute::updateImpl(...). 2842 ChangeStatus updateImpl(Attributor &A) override { 2843 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2844 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { 2845 IsAssumedSideEffectFree = false; 2846 Changed = ChangeStatus::CHANGED; 2847 } 2848 2849 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 2850 return indicatePessimisticFixpoint(); 2851 return Changed; 2852 } 2853 2854 /// See AbstractAttribute::trackStatistics() 2855 void trackStatistics() const override { 2856 if (IsAssumedSideEffectFree) 2857 STATS_DECLTRACK_CSRET_ATTR(IsDead) 2858 else 2859 STATS_DECLTRACK_CSRET_ATTR(UnusedResult) 2860 } 2861 2862 /// See AbstractAttribute::getAsStr(). 2863 const std::string getAsStr() const override { 2864 return isAssumedDead() 2865 ? "assumed-dead" 2866 : (getAssumed() ? "assumed-dead-users" : "assumed-live"); 2867 } 2868 2869 private: 2870 bool IsAssumedSideEffectFree; 2871 }; 2872 2873 struct AAIsDeadReturned : public AAIsDeadValueImpl { 2874 AAIsDeadReturned(const IRPosition &IRP, Attributor &A) 2875 : AAIsDeadValueImpl(IRP, A) {} 2876 2877 /// See AbstractAttribute::updateImpl(...). 2878 ChangeStatus updateImpl(Attributor &A) override { 2879 2880 A.checkForAllInstructions([](Instruction &) { return true; }, *this, 2881 {Instruction::Ret}); 2882 2883 auto PredForCallSite = [&](AbstractCallSite ACS) { 2884 if (ACS.isCallbackCall() || !ACS.getInstruction()) 2885 return false; 2886 return areAllUsesAssumedDead(A, *ACS.getInstruction()); 2887 }; 2888 2889 bool AllCallSitesKnown; 2890 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 2891 AllCallSitesKnown)) 2892 return indicatePessimisticFixpoint(); 2893 2894 return ChangeStatus::UNCHANGED; 2895 } 2896 2897 /// See AbstractAttribute::manifest(...). 2898 ChangeStatus manifest(Attributor &A) override { 2899 // TODO: Rewrite the signature to return void? 2900 bool AnyChange = false; 2901 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); 2902 auto RetInstPred = [&](Instruction &I) { 2903 ReturnInst &RI = cast<ReturnInst>(I); 2904 if (!isa<UndefValue>(RI.getReturnValue())) 2905 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); 2906 return true; 2907 }; 2908 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}); 2909 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 2910 } 2911 2912 /// See AbstractAttribute::trackStatistics() 2913 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) } 2914 }; 2915 2916 struct AAIsDeadFunction : public AAIsDead { 2917 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 2918 2919 /// See AbstractAttribute::initialize(...). 2920 void initialize(Attributor &A) override { 2921 const Function *F = getAnchorScope(); 2922 if (F && !F->isDeclaration()) { 2923 ToBeExploredFrom.insert(&F->getEntryBlock().front()); 2924 assumeLive(A, F->getEntryBlock()); 2925 } 2926 } 2927 2928 /// See AbstractAttribute::getAsStr(). 2929 const std::string getAsStr() const override { 2930 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + 2931 std::to_string(getAnchorScope()->size()) + "][#TBEP " + 2932 std::to_string(ToBeExploredFrom.size()) + "][#KDE " + 2933 std::to_string(KnownDeadEnds.size()) + "]"; 2934 } 2935 2936 /// See AbstractAttribute::manifest(...). 2937 ChangeStatus manifest(Attributor &A) override { 2938 assert(getState().isValidState() && 2939 "Attempted to manifest an invalid state!"); 2940 2941 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 2942 Function &F = *getAnchorScope(); 2943 2944 if (AssumedLiveBlocks.empty()) { 2945 A.deleteAfterManifest(F); 2946 return ChangeStatus::CHANGED; 2947 } 2948 2949 // Flag to determine if we can change an invoke to a call assuming the 2950 // callee is nounwind. This is not possible if the personality of the 2951 // function allows to catch asynchronous exceptions. 2952 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 2953 2954 KnownDeadEnds.set_union(ToBeExploredFrom); 2955 for (const Instruction *DeadEndI : KnownDeadEnds) { 2956 auto *CB = dyn_cast<CallBase>(DeadEndI); 2957 if (!CB) 2958 continue; 2959 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 2960 *this, IRPosition::callsite_function(*CB), /* TrackDependence */ true, 2961 DepClassTy::OPTIONAL); 2962 bool MayReturn = !NoReturnAA.isAssumedNoReturn(); 2963 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) 2964 continue; 2965 2966 if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) 2967 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); 2968 else 2969 A.changeToUnreachableAfterManifest( 2970 const_cast<Instruction *>(DeadEndI->getNextNode())); 2971 HasChanged = ChangeStatus::CHANGED; 2972 } 2973 2974 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted."); 2975 for (BasicBlock &BB : F) 2976 if (!AssumedLiveBlocks.count(&BB)) { 2977 A.deleteAfterManifest(BB); 2978 ++BUILD_STAT_NAME(AAIsDead, BasicBlock); 2979 } 2980 2981 return HasChanged; 2982 } 2983 2984 /// See AbstractAttribute::updateImpl(...). 2985 ChangeStatus updateImpl(Attributor &A) override; 2986 2987 /// See AbstractAttribute::trackStatistics() 2988 void trackStatistics() const override {} 2989 2990 /// Returns true if the function is assumed dead. 2991 bool isAssumedDead() const override { return false; } 2992 2993 /// See AAIsDead::isKnownDead(). 2994 bool isKnownDead() const override { return false; } 2995 2996 /// See AAIsDead::isAssumedDead(BasicBlock *). 2997 bool isAssumedDead(const BasicBlock *BB) const override { 2998 assert(BB->getParent() == getAnchorScope() && 2999 "BB must be in the same anchor scope function."); 3000 3001 if (!getAssumed()) 3002 return false; 3003 return !AssumedLiveBlocks.count(BB); 3004 } 3005 3006 /// See AAIsDead::isKnownDead(BasicBlock *). 3007 bool isKnownDead(const BasicBlock *BB) const override { 3008 return getKnown() && isAssumedDead(BB); 3009 } 3010 3011 /// See AAIsDead::isAssumed(Instruction *I). 3012 bool isAssumedDead(const Instruction *I) const override { 3013 assert(I->getParent()->getParent() == getAnchorScope() && 3014 "Instruction must be in the same anchor scope function."); 3015 3016 if (!getAssumed()) 3017 return false; 3018 3019 // If it is not in AssumedLiveBlocks then it for sure dead. 3020 // Otherwise, it can still be after noreturn call in a live block. 3021 if (!AssumedLiveBlocks.count(I->getParent())) 3022 return true; 3023 3024 // If it is not after a liveness barrier it is live. 3025 const Instruction *PrevI = I->getPrevNode(); 3026 while (PrevI) { 3027 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) 3028 return true; 3029 PrevI = PrevI->getPrevNode(); 3030 } 3031 return false; 3032 } 3033 3034 /// See AAIsDead::isKnownDead(Instruction *I). 3035 bool isKnownDead(const Instruction *I) const override { 3036 return getKnown() && isAssumedDead(I); 3037 } 3038 3039 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A 3040 /// that internal function called from \p BB should now be looked at. 3041 bool assumeLive(Attributor &A, const BasicBlock &BB) { 3042 if (!AssumedLiveBlocks.insert(&BB).second) 3043 return false; 3044 3045 // We assume that all of BB is (probably) live now and if there are calls to 3046 // internal functions we will assume that those are now live as well. This 3047 // is a performance optimization for blocks with calls to a lot of internal 3048 // functions. It can however cause dead functions to be treated as live. 3049 for (const Instruction &I : BB) 3050 if (const auto *CB = dyn_cast<CallBase>(&I)) 3051 if (const Function *F = CB->getCalledFunction()) 3052 if (F->hasLocalLinkage()) 3053 A.markLiveInternalFunction(*F); 3054 return true; 3055 } 3056 3057 /// Collection of instructions that need to be explored again, e.g., we 3058 /// did assume they do not transfer control to (one of their) successors. 3059 SmallSetVector<const Instruction *, 8> ToBeExploredFrom; 3060 3061 /// Collection of instructions that are known to not transfer control. 3062 SmallSetVector<const Instruction *, 8> KnownDeadEnds; 3063 3064 /// Collection of all assumed live BasicBlocks. 3065 DenseSet<const BasicBlock *> AssumedLiveBlocks; 3066 }; 3067 3068 static bool 3069 identifyAliveSuccessors(Attributor &A, const CallBase &CB, 3070 AbstractAttribute &AA, 3071 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3072 const IRPosition &IPos = IRPosition::callsite_function(CB); 3073 3074 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3075 AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 3076 if (NoReturnAA.isAssumedNoReturn()) 3077 return !NoReturnAA.isKnownNoReturn(); 3078 if (CB.isTerminator()) 3079 AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); 3080 else 3081 AliveSuccessors.push_back(CB.getNextNode()); 3082 return false; 3083 } 3084 3085 static bool 3086 identifyAliveSuccessors(Attributor &A, const InvokeInst &II, 3087 AbstractAttribute &AA, 3088 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3089 bool UsedAssumedInformation = 3090 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); 3091 3092 // First, determine if we can change an invoke to a call assuming the 3093 // callee is nounwind. This is not possible if the personality of the 3094 // function allows to catch asynchronous exceptions. 3095 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { 3096 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3097 } else { 3098 const IRPosition &IPos = IRPosition::callsite_function(II); 3099 const auto &AANoUnw = A.getAndUpdateAAFor<AANoUnwind>( 3100 AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 3101 if (AANoUnw.isAssumedNoUnwind()) { 3102 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); 3103 } else { 3104 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3105 } 3106 } 3107 return UsedAssumedInformation; 3108 } 3109 3110 static bool 3111 identifyAliveSuccessors(Attributor &A, const BranchInst &BI, 3112 AbstractAttribute &AA, 3113 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3114 bool UsedAssumedInformation = false; 3115 if (BI.getNumSuccessors() == 1) { 3116 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3117 } else { 3118 Optional<ConstantInt *> CI = getAssumedConstantInt( 3119 A, *BI.getCondition(), AA, UsedAssumedInformation); 3120 if (!CI.hasValue()) { 3121 // No value yet, assume both edges are dead. 3122 } else if (CI.getValue()) { 3123 const BasicBlock *SuccBB = 3124 BI.getSuccessor(1 - CI.getValue()->getZExtValue()); 3125 AliveSuccessors.push_back(&SuccBB->front()); 3126 } else { 3127 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3128 AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); 3129 UsedAssumedInformation = false; 3130 } 3131 } 3132 return UsedAssumedInformation; 3133 } 3134 3135 static bool 3136 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, 3137 AbstractAttribute &AA, 3138 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3139 bool UsedAssumedInformation = false; 3140 Optional<ConstantInt *> CI = 3141 getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation); 3142 if (!CI.hasValue()) { 3143 // No value yet, assume all edges are dead. 3144 } else if (CI.getValue()) { 3145 for (auto &CaseIt : SI.cases()) { 3146 if (CaseIt.getCaseValue() == CI.getValue()) { 3147 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); 3148 return UsedAssumedInformation; 3149 } 3150 } 3151 AliveSuccessors.push_back(&SI.getDefaultDest()->front()); 3152 return UsedAssumedInformation; 3153 } else { 3154 for (const BasicBlock *SuccBB : successors(SI.getParent())) 3155 AliveSuccessors.push_back(&SuccBB->front()); 3156 } 3157 return UsedAssumedInformation; 3158 } 3159 3160 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { 3161 ChangeStatus Change = ChangeStatus::UNCHANGED; 3162 3163 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" 3164 << getAnchorScope()->size() << "] BBs and " 3165 << ToBeExploredFrom.size() << " exploration points and " 3166 << KnownDeadEnds.size() << " known dead ends\n"); 3167 3168 // Copy and clear the list of instructions we need to explore from. It is 3169 // refilled with instructions the next update has to look at. 3170 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), 3171 ToBeExploredFrom.end()); 3172 decltype(ToBeExploredFrom) NewToBeExploredFrom; 3173 3174 SmallVector<const Instruction *, 8> AliveSuccessors; 3175 while (!Worklist.empty()) { 3176 const Instruction *I = Worklist.pop_back_val(); 3177 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"); 3178 3179 AliveSuccessors.clear(); 3180 3181 bool UsedAssumedInformation = false; 3182 switch (I->getOpcode()) { 3183 // TODO: look for (assumed) UB to backwards propagate "deadness". 3184 default: 3185 if (I->isTerminator()) { 3186 for (const BasicBlock *SuccBB : successors(I->getParent())) 3187 AliveSuccessors.push_back(&SuccBB->front()); 3188 } else { 3189 AliveSuccessors.push_back(I->getNextNode()); 3190 } 3191 break; 3192 case Instruction::Call: 3193 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), 3194 *this, AliveSuccessors); 3195 break; 3196 case Instruction::Invoke: 3197 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), 3198 *this, AliveSuccessors); 3199 break; 3200 case Instruction::Br: 3201 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), 3202 *this, AliveSuccessors); 3203 break; 3204 case Instruction::Switch: 3205 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), 3206 *this, AliveSuccessors); 3207 break; 3208 } 3209 3210 if (UsedAssumedInformation) { 3211 NewToBeExploredFrom.insert(I); 3212 } else { 3213 Change = ChangeStatus::CHANGED; 3214 if (AliveSuccessors.empty() || 3215 (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors())) 3216 KnownDeadEnds.insert(I); 3217 } 3218 3219 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: " 3220 << AliveSuccessors.size() << " UsedAssumedInformation: " 3221 << UsedAssumedInformation << "\n"); 3222 3223 for (const Instruction *AliveSuccessor : AliveSuccessors) { 3224 if (!I->isTerminator()) { 3225 assert(AliveSuccessors.size() == 1 && 3226 "Non-terminator expected to have a single successor!"); 3227 Worklist.push_back(AliveSuccessor); 3228 } else { 3229 if (assumeLive(A, *AliveSuccessor->getParent())) 3230 Worklist.push_back(AliveSuccessor); 3231 } 3232 } 3233 } 3234 3235 ToBeExploredFrom = std::move(NewToBeExploredFrom); 3236 3237 // If we know everything is live there is no need to query for liveness. 3238 // Instead, indicating a pessimistic fixpoint will cause the state to be 3239 // "invalid" and all queries to be answered conservatively without lookups. 3240 // To be in this state we have to (1) finished the exploration and (3) not 3241 // discovered any non-trivial dead end and (2) not ruled unreachable code 3242 // dead. 3243 if (ToBeExploredFrom.empty() && 3244 getAnchorScope()->size() == AssumedLiveBlocks.size() && 3245 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { 3246 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; 3247 })) 3248 return indicatePessimisticFixpoint(); 3249 return Change; 3250 } 3251 3252 /// Liveness information for a call sites. 3253 struct AAIsDeadCallSite final : AAIsDeadFunction { 3254 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) 3255 : AAIsDeadFunction(IRP, A) {} 3256 3257 /// See AbstractAttribute::initialize(...). 3258 void initialize(Attributor &A) override { 3259 // TODO: Once we have call site specific value information we can provide 3260 // call site specific liveness information and then it makes 3261 // sense to specialize attributes for call sites instead of 3262 // redirecting requests to the callee. 3263 llvm_unreachable("Abstract attributes for liveness are not " 3264 "supported for call sites yet!"); 3265 } 3266 3267 /// See AbstractAttribute::updateImpl(...). 3268 ChangeStatus updateImpl(Attributor &A) override { 3269 return indicatePessimisticFixpoint(); 3270 } 3271 3272 /// See AbstractAttribute::trackStatistics() 3273 void trackStatistics() const override {} 3274 }; 3275 3276 /// -------------------- Dereferenceable Argument Attribute -------------------- 3277 3278 template <> 3279 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, 3280 const DerefState &R) { 3281 ChangeStatus CS0 = 3282 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); 3283 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); 3284 return CS0 | CS1; 3285 } 3286 3287 struct AADereferenceableImpl : AADereferenceable { 3288 AADereferenceableImpl(const IRPosition &IRP, Attributor &A) 3289 : AADereferenceable(IRP, A) {} 3290 using StateType = DerefState; 3291 3292 /// See AbstractAttribute::initialize(...). 3293 void initialize(Attributor &A) override { 3294 SmallVector<Attribute, 4> Attrs; 3295 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, 3296 Attrs, /* IgnoreSubsumingPositions */ false, &A); 3297 for (const Attribute &Attr : Attrs) 3298 takeKnownDerefBytesMaximum(Attr.getValueAsInt()); 3299 3300 const IRPosition &IRP = this->getIRPosition(); 3301 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, 3302 /* TrackDependence */ false); 3303 3304 bool CanBeNull; 3305 takeKnownDerefBytesMaximum( 3306 IRP.getAssociatedValue().getPointerDereferenceableBytes( 3307 A.getDataLayout(), CanBeNull)); 3308 3309 bool IsFnInterface = IRP.isFnInterfaceKind(); 3310 Function *FnScope = IRP.getAnchorScope(); 3311 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { 3312 indicatePessimisticFixpoint(); 3313 return; 3314 } 3315 3316 if (Instruction *CtxI = getCtxI()) 3317 followUsesInMBEC(*this, A, getState(), *CtxI); 3318 } 3319 3320 /// See AbstractAttribute::getState() 3321 /// { 3322 StateType &getState() override { return *this; } 3323 const StateType &getState() const override { return *this; } 3324 /// } 3325 3326 /// Helper function for collecting accessed bytes in must-be-executed-context 3327 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, 3328 DerefState &State) { 3329 const Value *UseV = U->get(); 3330 if (!UseV->getType()->isPointerTy()) 3331 return; 3332 3333 Type *PtrTy = UseV->getType(); 3334 const DataLayout &DL = A.getDataLayout(); 3335 int64_t Offset; 3336 if (const Value *Base = getBasePointerOfAccessPointerOperand( 3337 I, Offset, DL, /*AllowNonInbounds*/ true)) { 3338 if (Base == &getAssociatedValue() && 3339 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 3340 uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType()); 3341 State.addAccessedBytes(Offset, Size); 3342 } 3343 } 3344 return; 3345 } 3346 3347 /// See followUsesInMBEC 3348 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 3349 AADereferenceable::StateType &State) { 3350 bool IsNonNull = false; 3351 bool TrackUse = false; 3352 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( 3353 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); 3354 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes 3355 << " for instruction " << *I << "\n"); 3356 3357 addAccessedBytesForUse(A, U, I, State); 3358 State.takeKnownDerefBytesMaximum(DerefBytes); 3359 return TrackUse; 3360 } 3361 3362 /// See AbstractAttribute::manifest(...). 3363 ChangeStatus manifest(Attributor &A) override { 3364 ChangeStatus Change = AADereferenceable::manifest(A); 3365 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { 3366 removeAttrs({Attribute::DereferenceableOrNull}); 3367 return ChangeStatus::CHANGED; 3368 } 3369 return Change; 3370 } 3371 3372 void getDeducedAttributes(LLVMContext &Ctx, 3373 SmallVectorImpl<Attribute> &Attrs) const override { 3374 // TODO: Add *_globally support 3375 if (isAssumedNonNull()) 3376 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 3377 Ctx, getAssumedDereferenceableBytes())); 3378 else 3379 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 3380 Ctx, getAssumedDereferenceableBytes())); 3381 } 3382 3383 /// See AbstractAttribute::getAsStr(). 3384 const std::string getAsStr() const override { 3385 if (!getAssumedDereferenceableBytes()) 3386 return "unknown-dereferenceable"; 3387 return std::string("dereferenceable") + 3388 (isAssumedNonNull() ? "" : "_or_null") + 3389 (isAssumedGlobal() ? "_globally" : "") + "<" + 3390 std::to_string(getKnownDereferenceableBytes()) + "-" + 3391 std::to_string(getAssumedDereferenceableBytes()) + ">"; 3392 } 3393 }; 3394 3395 /// Dereferenceable attribute for a floating value. 3396 struct AADereferenceableFloating : AADereferenceableImpl { 3397 AADereferenceableFloating(const IRPosition &IRP, Attributor &A) 3398 : AADereferenceableImpl(IRP, A) {} 3399 3400 /// See AbstractAttribute::updateImpl(...). 3401 ChangeStatus updateImpl(Attributor &A) override { 3402 const DataLayout &DL = A.getDataLayout(); 3403 3404 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T, 3405 bool Stripped) -> bool { 3406 unsigned IdxWidth = 3407 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 3408 APInt Offset(IdxWidth, 0); 3409 const Value *Base = 3410 stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false); 3411 3412 const auto &AA = 3413 A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base)); 3414 int64_t DerefBytes = 0; 3415 if (!Stripped && this == &AA) { 3416 // Use IR information if we did not strip anything. 3417 // TODO: track globally. 3418 bool CanBeNull; 3419 DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull); 3420 T.GlobalState.indicatePessimisticFixpoint(); 3421 } else { 3422 const DerefState &DS = static_cast<const DerefState &>(AA.getState()); 3423 DerefBytes = DS.DerefBytesState.getAssumed(); 3424 T.GlobalState &= DS.GlobalState; 3425 } 3426 3427 3428 // For now we do not try to "increase" dereferenceability due to negative 3429 // indices as we first have to come up with code to deal with loops and 3430 // for overflows of the dereferenceable bytes. 3431 int64_t OffsetSExt = Offset.getSExtValue(); 3432 if (OffsetSExt < 0) 3433 OffsetSExt = 0; 3434 3435 T.takeAssumedDerefBytesMinimum( 3436 std::max(int64_t(0), DerefBytes - OffsetSExt)); 3437 3438 if (this == &AA) { 3439 if (!Stripped) { 3440 // If nothing was stripped IR information is all we got. 3441 T.takeKnownDerefBytesMaximum( 3442 std::max(int64_t(0), DerefBytes - OffsetSExt)); 3443 T.indicatePessimisticFixpoint(); 3444 } else if (OffsetSExt > 0) { 3445 // If something was stripped but there is circular reasoning we look 3446 // for the offset. If it is positive we basically decrease the 3447 // dereferenceable bytes in a circluar loop now, which will simply 3448 // drive them down to the known value in a very slow way which we 3449 // can accelerate. 3450 T.indicatePessimisticFixpoint(); 3451 } 3452 } 3453 3454 return T.isValidState(); 3455 }; 3456 3457 DerefState T; 3458 if (!genericValueTraversal<AADereferenceable, DerefState>( 3459 A, getIRPosition(), *this, T, VisitValueCB, getCtxI())) 3460 return indicatePessimisticFixpoint(); 3461 3462 return clampStateAndIndicateChange(getState(), T); 3463 } 3464 3465 /// See AbstractAttribute::trackStatistics() 3466 void trackStatistics() const override { 3467 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) 3468 } 3469 }; 3470 3471 /// Dereferenceable attribute for a return value. 3472 struct AADereferenceableReturned final 3473 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { 3474 AADereferenceableReturned(const IRPosition &IRP, Attributor &A) 3475 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( 3476 IRP, A) {} 3477 3478 /// See AbstractAttribute::trackStatistics() 3479 void trackStatistics() const override { 3480 STATS_DECLTRACK_FNRET_ATTR(dereferenceable) 3481 } 3482 }; 3483 3484 /// Dereferenceable attribute for an argument 3485 struct AADereferenceableArgument final 3486 : AAArgumentFromCallSiteArguments<AADereferenceable, 3487 AADereferenceableImpl> { 3488 using Base = 3489 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; 3490 AADereferenceableArgument(const IRPosition &IRP, Attributor &A) 3491 : Base(IRP, A) {} 3492 3493 /// See AbstractAttribute::trackStatistics() 3494 void trackStatistics() const override { 3495 STATS_DECLTRACK_ARG_ATTR(dereferenceable) 3496 } 3497 }; 3498 3499 /// Dereferenceable attribute for a call site argument. 3500 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { 3501 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) 3502 : AADereferenceableFloating(IRP, A) {} 3503 3504 /// See AbstractAttribute::trackStatistics() 3505 void trackStatistics() const override { 3506 STATS_DECLTRACK_CSARG_ATTR(dereferenceable) 3507 } 3508 }; 3509 3510 /// Dereferenceable attribute deduction for a call site return value. 3511 struct AADereferenceableCallSiteReturned final 3512 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { 3513 using Base = 3514 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; 3515 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) 3516 : Base(IRP, A) {} 3517 3518 /// See AbstractAttribute::trackStatistics() 3519 void trackStatistics() const override { 3520 STATS_DECLTRACK_CS_ATTR(dereferenceable); 3521 } 3522 }; 3523 3524 // ------------------------ Align Argument Attribute ------------------------ 3525 3526 /// \p Ptr is accessed so we can get alignment information if the ABI requires 3527 /// the element type to be aligned. 3528 static MaybeAlign getKnownAlignmentFromAccessedPtr(const Value *Ptr, 3529 const DataLayout &DL) { 3530 MaybeAlign KnownAlignment = Ptr->getPointerAlignment(DL); 3531 Type *ElementTy = Ptr->getType()->getPointerElementType(); 3532 if (ElementTy->isSized()) 3533 KnownAlignment = max(KnownAlignment, DL.getABITypeAlign(ElementTy)); 3534 return KnownAlignment; 3535 } 3536 3537 static unsigned getKnownAlignForUse(Attributor &A, 3538 AbstractAttribute &QueryingAA, 3539 Value &AssociatedValue, const Use *U, 3540 const Instruction *I, bool &TrackUse) { 3541 // We need to follow common pointer manipulation uses to the accesses they 3542 // feed into. 3543 if (isa<CastInst>(I)) { 3544 // Follow all but ptr2int casts. 3545 TrackUse = !isa<PtrToIntInst>(I); 3546 return 0; 3547 } 3548 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 3549 if (GEP->hasAllConstantIndices()) { 3550 TrackUse = true; 3551 return 0; 3552 } 3553 } 3554 3555 MaybeAlign MA; 3556 if (const auto *CB = dyn_cast<CallBase>(I)) { 3557 if (CB->isBundleOperand(U) || CB->isCallee(U)) 3558 return 0; 3559 3560 unsigned ArgNo = CB->getArgOperandNo(U); 3561 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 3562 // As long as we only use known information there is no need to track 3563 // dependences here. 3564 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, 3565 /* TrackDependence */ false); 3566 MA = MaybeAlign(AlignAA.getKnownAlign()); 3567 } 3568 3569 const DataLayout &DL = A.getDataLayout(); 3570 const Value *UseV = U->get(); 3571 if (auto *SI = dyn_cast<StoreInst>(I)) { 3572 if (SI->getPointerOperand() == UseV) { 3573 if (unsigned SIAlign = SI->getAlignment()) 3574 MA = MaybeAlign(SIAlign); 3575 else 3576 MA = getKnownAlignmentFromAccessedPtr(UseV, DL); 3577 } 3578 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 3579 if (LI->getPointerOperand() == UseV) { 3580 if (unsigned LIAlign = LI->getAlignment()) 3581 MA = MaybeAlign(LIAlign); 3582 else 3583 MA = getKnownAlignmentFromAccessedPtr(UseV, DL); 3584 } 3585 } 3586 3587 if (!MA.hasValue() || MA <= 1) 3588 return 0; 3589 3590 unsigned Alignment = MA->value(); 3591 int64_t Offset; 3592 3593 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { 3594 if (Base == &AssociatedValue) { 3595 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 3596 // So we can say that the maximum power of two which is a divisor of 3597 // gcd(Offset, Alignment) is an alignment. 3598 3599 uint32_t gcd = 3600 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment); 3601 Alignment = llvm::PowerOf2Floor(gcd); 3602 } 3603 } 3604 3605 return Alignment; 3606 } 3607 3608 struct AAAlignImpl : AAAlign { 3609 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} 3610 3611 /// See AbstractAttribute::initialize(...). 3612 void initialize(Attributor &A) override { 3613 SmallVector<Attribute, 4> Attrs; 3614 getAttrs({Attribute::Alignment}, Attrs); 3615 for (const Attribute &Attr : Attrs) 3616 takeKnownMaximum(Attr.getValueAsInt()); 3617 3618 Value &V = getAssociatedValue(); 3619 // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int 3620 // use of the function pointer. This was caused by D73131. We want to 3621 // avoid this for function pointers especially because we iterate 3622 // their uses and int2ptr is not handled. It is not a correctness 3623 // problem though! 3624 if (!V.getType()->getPointerElementType()->isFunctionTy()) 3625 takeKnownMaximum( 3626 V.getPointerAlignment(A.getDataLayout()).valueOrOne().value()); 3627 3628 if (getIRPosition().isFnInterfaceKind() && 3629 (!getAnchorScope() || 3630 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { 3631 indicatePessimisticFixpoint(); 3632 return; 3633 } 3634 3635 if (Instruction *CtxI = getCtxI()) 3636 followUsesInMBEC(*this, A, getState(), *CtxI); 3637 } 3638 3639 /// See AbstractAttribute::manifest(...). 3640 ChangeStatus manifest(Attributor &A) override { 3641 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; 3642 3643 // Check for users that allow alignment annotations. 3644 Value &AssociatedValue = getAssociatedValue(); 3645 for (const Use &U : AssociatedValue.uses()) { 3646 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { 3647 if (SI->getPointerOperand() == &AssociatedValue) 3648 if (SI->getAlignment() < getAssumedAlign()) { 3649 STATS_DECLTRACK(AAAlign, Store, 3650 "Number of times alignment added to a store"); 3651 SI->setAlignment(Align(getAssumedAlign())); 3652 LoadStoreChanged = ChangeStatus::CHANGED; 3653 } 3654 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { 3655 if (LI->getPointerOperand() == &AssociatedValue) 3656 if (LI->getAlignment() < getAssumedAlign()) { 3657 LI->setAlignment(Align(getAssumedAlign())); 3658 STATS_DECLTRACK(AAAlign, Load, 3659 "Number of times alignment added to a load"); 3660 LoadStoreChanged = ChangeStatus::CHANGED; 3661 } 3662 } 3663 } 3664 3665 ChangeStatus Changed = AAAlign::manifest(A); 3666 3667 MaybeAlign InheritAlign = 3668 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 3669 if (InheritAlign.valueOrOne() >= getAssumedAlign()) 3670 return LoadStoreChanged; 3671 return Changed | LoadStoreChanged; 3672 } 3673 3674 // TODO: Provide a helper to determine the implied ABI alignment and check in 3675 // the existing manifest method and a new one for AAAlignImpl that value 3676 // to avoid making the alignment explicit if it did not improve. 3677 3678 /// See AbstractAttribute::getDeducedAttributes 3679 virtual void 3680 getDeducedAttributes(LLVMContext &Ctx, 3681 SmallVectorImpl<Attribute> &Attrs) const override { 3682 if (getAssumedAlign() > 1) 3683 Attrs.emplace_back( 3684 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); 3685 } 3686 3687 /// See followUsesInMBEC 3688 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 3689 AAAlign::StateType &State) { 3690 bool TrackUse = false; 3691 3692 unsigned int KnownAlign = 3693 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); 3694 State.takeKnownMaximum(KnownAlign); 3695 3696 return TrackUse; 3697 } 3698 3699 /// See AbstractAttribute::getAsStr(). 3700 const std::string getAsStr() const override { 3701 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + 3702 "-" + std::to_string(getAssumedAlign()) + ">") 3703 : "unknown-align"; 3704 } 3705 }; 3706 3707 /// Align attribute for a floating value. 3708 struct AAAlignFloating : AAAlignImpl { 3709 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} 3710 3711 /// See AbstractAttribute::updateImpl(...). 3712 ChangeStatus updateImpl(Attributor &A) override { 3713 const DataLayout &DL = A.getDataLayout(); 3714 3715 auto VisitValueCB = [&](Value &V, const Instruction *, 3716 AAAlign::StateType &T, bool Stripped) -> bool { 3717 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V)); 3718 if (!Stripped && this == &AA) { 3719 // Use only IR information if we did not strip anything. 3720 const MaybeAlign PA = V.getPointerAlignment(DL); 3721 T.takeKnownMaximum(PA ? PA->value() : 0); 3722 T.indicatePessimisticFixpoint(); 3723 } else { 3724 // Use abstract attribute information. 3725 const AAAlign::StateType &DS = 3726 static_cast<const AAAlign::StateType &>(AA.getState()); 3727 T ^= DS; 3728 } 3729 return T.isValidState(); 3730 }; 3731 3732 StateType T; 3733 if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T, 3734 VisitValueCB, getCtxI())) 3735 return indicatePessimisticFixpoint(); 3736 3737 // TODO: If we know we visited all incoming values, thus no are assumed 3738 // dead, we can take the known information from the state T. 3739 return clampStateAndIndicateChange(getState(), T); 3740 } 3741 3742 /// See AbstractAttribute::trackStatistics() 3743 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } 3744 }; 3745 3746 /// Align attribute for function return value. 3747 struct AAAlignReturned final 3748 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { 3749 AAAlignReturned(const IRPosition &IRP, Attributor &A) 3750 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP, A) {} 3751 3752 /// See AbstractAttribute::trackStatistics() 3753 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } 3754 }; 3755 3756 /// Align attribute for function argument. 3757 struct AAAlignArgument final 3758 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { 3759 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; 3760 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 3761 3762 /// See AbstractAttribute::manifest(...). 3763 ChangeStatus manifest(Attributor &A) override { 3764 // If the associated argument is involved in a must-tail call we give up 3765 // because we would need to keep the argument alignments of caller and 3766 // callee in-sync. Just does not seem worth the trouble right now. 3767 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) 3768 return ChangeStatus::UNCHANGED; 3769 return Base::manifest(A); 3770 } 3771 3772 /// See AbstractAttribute::trackStatistics() 3773 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } 3774 }; 3775 3776 struct AAAlignCallSiteArgument final : AAAlignFloating { 3777 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) 3778 : AAAlignFloating(IRP, A) {} 3779 3780 /// See AbstractAttribute::manifest(...). 3781 ChangeStatus manifest(Attributor &A) override { 3782 // If the associated argument is involved in a must-tail call we give up 3783 // because we would need to keep the argument alignments of caller and 3784 // callee in-sync. Just does not seem worth the trouble right now. 3785 if (Argument *Arg = getAssociatedArgument()) 3786 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) 3787 return ChangeStatus::UNCHANGED; 3788 ChangeStatus Changed = AAAlignImpl::manifest(A); 3789 MaybeAlign InheritAlign = 3790 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 3791 if (InheritAlign.valueOrOne() >= getAssumedAlign()) 3792 Changed = ChangeStatus::UNCHANGED; 3793 return Changed; 3794 } 3795 3796 /// See AbstractAttribute::updateImpl(Attributor &A). 3797 ChangeStatus updateImpl(Attributor &A) override { 3798 ChangeStatus Changed = AAAlignFloating::updateImpl(A); 3799 if (Argument *Arg = getAssociatedArgument()) { 3800 // We only take known information from the argument 3801 // so we do not need to track a dependence. 3802 const auto &ArgAlignAA = A.getAAFor<AAAlign>( 3803 *this, IRPosition::argument(*Arg), /* TrackDependence */ false); 3804 takeKnownMaximum(ArgAlignAA.getKnownAlign()); 3805 } 3806 return Changed; 3807 } 3808 3809 /// See AbstractAttribute::trackStatistics() 3810 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } 3811 }; 3812 3813 /// Align attribute deduction for a call site return value. 3814 struct AAAlignCallSiteReturned final 3815 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { 3816 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; 3817 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) 3818 : Base(IRP, A) {} 3819 3820 /// See AbstractAttribute::initialize(...). 3821 void initialize(Attributor &A) override { 3822 Base::initialize(A); 3823 Function *F = getAssociatedFunction(); 3824 if (!F) 3825 indicatePessimisticFixpoint(); 3826 } 3827 3828 /// See AbstractAttribute::trackStatistics() 3829 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } 3830 }; 3831 3832 /// ------------------ Function No-Return Attribute ---------------------------- 3833 struct AANoReturnImpl : public AANoReturn { 3834 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} 3835 3836 /// See AbstractAttribute::initialize(...). 3837 void initialize(Attributor &A) override { 3838 AANoReturn::initialize(A); 3839 Function *F = getAssociatedFunction(); 3840 if (!F) 3841 indicatePessimisticFixpoint(); 3842 } 3843 3844 /// See AbstractAttribute::getAsStr(). 3845 const std::string getAsStr() const override { 3846 return getAssumed() ? "noreturn" : "may-return"; 3847 } 3848 3849 /// See AbstractAttribute::updateImpl(Attributor &A). 3850 virtual ChangeStatus updateImpl(Attributor &A) override { 3851 auto CheckForNoReturn = [](Instruction &) { return false; }; 3852 if (!A.checkForAllInstructions(CheckForNoReturn, *this, 3853 {(unsigned)Instruction::Ret})) 3854 return indicatePessimisticFixpoint(); 3855 return ChangeStatus::UNCHANGED; 3856 } 3857 }; 3858 3859 struct AANoReturnFunction final : AANoReturnImpl { 3860 AANoReturnFunction(const IRPosition &IRP, Attributor &A) 3861 : AANoReturnImpl(IRP, A) {} 3862 3863 /// See AbstractAttribute::trackStatistics() 3864 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } 3865 }; 3866 3867 /// NoReturn attribute deduction for a call sites. 3868 struct AANoReturnCallSite final : AANoReturnImpl { 3869 AANoReturnCallSite(const IRPosition &IRP, Attributor &A) 3870 : AANoReturnImpl(IRP, A) {} 3871 3872 /// See AbstractAttribute::updateImpl(...). 3873 ChangeStatus updateImpl(Attributor &A) override { 3874 // TODO: Once we have call site specific value information we can provide 3875 // call site specific liveness information and then it makes 3876 // sense to specialize attributes for call sites arguments instead of 3877 // redirecting requests to the callee argument. 3878 Function *F = getAssociatedFunction(); 3879 const IRPosition &FnPos = IRPosition::function(*F); 3880 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos); 3881 return clampStateAndIndicateChange( 3882 getState(), 3883 static_cast<const AANoReturn::StateType &>(FnAA.getState())); 3884 } 3885 3886 /// See AbstractAttribute::trackStatistics() 3887 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } 3888 }; 3889 3890 /// ----------------------- Variable Capturing --------------------------------- 3891 3892 /// A class to hold the state of for no-capture attributes. 3893 struct AANoCaptureImpl : public AANoCapture { 3894 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} 3895 3896 /// See AbstractAttribute::initialize(...). 3897 void initialize(Attributor &A) override { 3898 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { 3899 indicateOptimisticFixpoint(); 3900 return; 3901 } 3902 Function *AnchorScope = getAnchorScope(); 3903 if (isFnInterfaceKind() && 3904 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { 3905 indicatePessimisticFixpoint(); 3906 return; 3907 } 3908 3909 // You cannot "capture" null in the default address space. 3910 if (isa<ConstantPointerNull>(getAssociatedValue()) && 3911 getAssociatedValue().getType()->getPointerAddressSpace() == 0) { 3912 indicateOptimisticFixpoint(); 3913 return; 3914 } 3915 3916 const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope; 3917 3918 // Check what state the associated function can actually capture. 3919 if (F) 3920 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 3921 else 3922 indicatePessimisticFixpoint(); 3923 } 3924 3925 /// See AbstractAttribute::updateImpl(...). 3926 ChangeStatus updateImpl(Attributor &A) override; 3927 3928 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). 3929 virtual void 3930 getDeducedAttributes(LLVMContext &Ctx, 3931 SmallVectorImpl<Attribute> &Attrs) const override { 3932 if (!isAssumedNoCaptureMaybeReturned()) 3933 return; 3934 3935 if (getArgNo() >= 0) { 3936 if (isAssumedNoCapture()) 3937 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); 3938 else if (ManifestInternal) 3939 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); 3940 } 3941 } 3942 3943 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known 3944 /// depending on the ability of the function associated with \p IRP to capture 3945 /// state in memory and through "returning/throwing", respectively. 3946 static void determineFunctionCaptureCapabilities(const IRPosition &IRP, 3947 const Function &F, 3948 BitIntegerState &State) { 3949 // TODO: Once we have memory behavior attributes we should use them here. 3950 3951 // If we know we cannot communicate or write to memory, we do not care about 3952 // ptr2int anymore. 3953 if (F.onlyReadsMemory() && F.doesNotThrow() && 3954 F.getReturnType()->isVoidTy()) { 3955 State.addKnownBits(NO_CAPTURE); 3956 return; 3957 } 3958 3959 // A function cannot capture state in memory if it only reads memory, it can 3960 // however return/throw state and the state might be influenced by the 3961 // pointer value, e.g., loading from a returned pointer might reveal a bit. 3962 if (F.onlyReadsMemory()) 3963 State.addKnownBits(NOT_CAPTURED_IN_MEM); 3964 3965 // A function cannot communicate state back if it does not through 3966 // exceptions and doesn not return values. 3967 if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) 3968 State.addKnownBits(NOT_CAPTURED_IN_RET); 3969 3970 // Check existing "returned" attributes. 3971 int ArgNo = IRP.getArgNo(); 3972 if (F.doesNotThrow() && ArgNo >= 0) { 3973 for (unsigned u = 0, e = F.arg_size(); u < e; ++u) 3974 if (F.hasParamAttribute(u, Attribute::Returned)) { 3975 if (u == unsigned(ArgNo)) 3976 State.removeAssumedBits(NOT_CAPTURED_IN_RET); 3977 else if (F.onlyReadsMemory()) 3978 State.addKnownBits(NO_CAPTURE); 3979 else 3980 State.addKnownBits(NOT_CAPTURED_IN_RET); 3981 break; 3982 } 3983 } 3984 } 3985 3986 /// See AbstractState::getAsStr(). 3987 const std::string getAsStr() const override { 3988 if (isKnownNoCapture()) 3989 return "known not-captured"; 3990 if (isAssumedNoCapture()) 3991 return "assumed not-captured"; 3992 if (isKnownNoCaptureMaybeReturned()) 3993 return "known not-captured-maybe-returned"; 3994 if (isAssumedNoCaptureMaybeReturned()) 3995 return "assumed not-captured-maybe-returned"; 3996 return "assumed-captured"; 3997 } 3998 }; 3999 4000 /// Attributor-aware capture tracker. 4001 struct AACaptureUseTracker final : public CaptureTracker { 4002 4003 /// Create a capture tracker that can lookup in-flight abstract attributes 4004 /// through the Attributor \p A. 4005 /// 4006 /// If a use leads to a potential capture, \p CapturedInMemory is set and the 4007 /// search is stopped. If a use leads to a return instruction, 4008 /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed. 4009 /// If a use leads to a ptr2int which may capture the value, 4010 /// \p CapturedInInteger is set. If a use is found that is currently assumed 4011 /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies 4012 /// set. All values in \p PotentialCopies are later tracked as well. For every 4013 /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0, 4014 /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger 4015 /// conservatively set to true. 4016 AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA, 4017 const AAIsDead &IsDeadAA, AANoCapture::StateType &State, 4018 SmallVectorImpl<const Value *> &PotentialCopies, 4019 unsigned &RemainingUsesToExplore) 4020 : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State), 4021 PotentialCopies(PotentialCopies), 4022 RemainingUsesToExplore(RemainingUsesToExplore) {} 4023 4024 /// Determine if \p V maybe captured. *Also updates the state!* 4025 bool valueMayBeCaptured(const Value *V) { 4026 if (V->getType()->isPointerTy()) { 4027 PointerMayBeCaptured(V, this); 4028 } else { 4029 State.indicatePessimisticFixpoint(); 4030 } 4031 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4032 } 4033 4034 /// See CaptureTracker::tooManyUses(). 4035 void tooManyUses() override { 4036 State.removeAssumedBits(AANoCapture::NO_CAPTURE); 4037 } 4038 4039 bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override { 4040 if (CaptureTracker::isDereferenceableOrNull(O, DL)) 4041 return true; 4042 const auto &DerefAA = A.getAAFor<AADereferenceable>( 4043 NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true, 4044 DepClassTy::OPTIONAL); 4045 return DerefAA.getAssumedDereferenceableBytes(); 4046 } 4047 4048 /// See CaptureTracker::captured(...). 4049 bool captured(const Use *U) override { 4050 Instruction *UInst = cast<Instruction>(U->getUser()); 4051 LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst 4052 << "\n"); 4053 4054 // Because we may reuse the tracker multiple times we keep track of the 4055 // number of explored uses ourselves as well. 4056 if (RemainingUsesToExplore-- == 0) { 4057 LLVM_DEBUG(dbgs() << " - too many uses to explore!\n"); 4058 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4059 /* Return */ true); 4060 } 4061 4062 // Deal with ptr2int by following uses. 4063 if (isa<PtrToIntInst>(UInst)) { 4064 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n"); 4065 return valueMayBeCaptured(UInst); 4066 } 4067 4068 // Explicitly catch return instructions. 4069 if (isa<ReturnInst>(UInst)) 4070 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4071 /* Return */ true); 4072 4073 // For now we only use special logic for call sites. However, the tracker 4074 // itself knows about a lot of other non-capturing cases already. 4075 auto *CB = dyn_cast<CallBase>(UInst); 4076 if (!CB || !CB->isArgOperand(U)) 4077 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4078 /* Return */ true); 4079 4080 unsigned ArgNo = CB->getArgOperandNo(U); 4081 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); 4082 // If we have a abstract no-capture attribute for the argument we can use 4083 // it to justify a non-capture attribute here. This allows recursion! 4084 auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos); 4085 if (ArgNoCaptureAA.isAssumedNoCapture()) 4086 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4087 /* Return */ false); 4088 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 4089 addPotentialCopy(*CB); 4090 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4091 /* Return */ false); 4092 } 4093 4094 // Lastly, we could not find a reason no-capture can be assumed so we don't. 4095 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4096 /* Return */ true); 4097 } 4098 4099 /// Register \p CS as potential copy of the value we are checking. 4100 void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); } 4101 4102 /// See CaptureTracker::shouldExplore(...). 4103 bool shouldExplore(const Use *U) override { 4104 // Check liveness and ignore droppable users. 4105 return !U->getUser()->isDroppable() && 4106 !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA); 4107 } 4108 4109 /// Update the state according to \p CapturedInMem, \p CapturedInInt, and 4110 /// \p CapturedInRet, then return the appropriate value for use in the 4111 /// CaptureTracker::captured() interface. 4112 bool isCapturedIn(bool CapturedInMem, bool CapturedInInt, 4113 bool CapturedInRet) { 4114 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " 4115 << CapturedInInt << "|Ret " << CapturedInRet << "]\n"); 4116 if (CapturedInMem) 4117 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); 4118 if (CapturedInInt) 4119 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); 4120 if (CapturedInRet) 4121 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); 4122 return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4123 } 4124 4125 private: 4126 /// The attributor providing in-flight abstract attributes. 4127 Attributor &A; 4128 4129 /// The abstract attribute currently updated. 4130 AANoCapture &NoCaptureAA; 4131 4132 /// The abstract liveness state. 4133 const AAIsDead &IsDeadAA; 4134 4135 /// The state currently updated. 4136 AANoCapture::StateType &State; 4137 4138 /// Set of potential copies of the tracked value. 4139 SmallVectorImpl<const Value *> &PotentialCopies; 4140 4141 /// Global counter to limit the number of explored uses. 4142 unsigned &RemainingUsesToExplore; 4143 }; 4144 4145 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { 4146 const IRPosition &IRP = getIRPosition(); 4147 const Value *V = 4148 getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue(); 4149 if (!V) 4150 return indicatePessimisticFixpoint(); 4151 4152 const Function *F = 4153 getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 4154 assert(F && "Expected a function!"); 4155 const IRPosition &FnPos = IRPosition::function(*F); 4156 const auto &IsDeadAA = 4157 A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false); 4158 4159 AANoCapture::StateType T; 4160 4161 // Readonly means we cannot capture through memory. 4162 const auto &FnMemAA = 4163 A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false); 4164 if (FnMemAA.isAssumedReadOnly()) { 4165 T.addKnownBits(NOT_CAPTURED_IN_MEM); 4166 if (FnMemAA.isKnownReadOnly()) 4167 addKnownBits(NOT_CAPTURED_IN_MEM); 4168 else 4169 A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL); 4170 } 4171 4172 // Make sure all returned values are different than the underlying value. 4173 // TODO: we could do this in a more sophisticated way inside 4174 // AAReturnedValues, e.g., track all values that escape through returns 4175 // directly somehow. 4176 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { 4177 bool SeenConstant = false; 4178 for (auto &It : RVAA.returned_values()) { 4179 if (isa<Constant>(It.first)) { 4180 if (SeenConstant) 4181 return false; 4182 SeenConstant = true; 4183 } else if (!isa<Argument>(It.first) || 4184 It.first == getAssociatedArgument()) 4185 return false; 4186 } 4187 return true; 4188 }; 4189 4190 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( 4191 *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 4192 if (NoUnwindAA.isAssumedNoUnwind()) { 4193 bool IsVoidTy = F->getReturnType()->isVoidTy(); 4194 const AAReturnedValues *RVAA = 4195 IsVoidTy ? nullptr 4196 : &A.getAAFor<AAReturnedValues>(*this, FnPos, 4197 /* TrackDependence */ true, 4198 DepClassTy::OPTIONAL); 4199 if (IsVoidTy || CheckReturnedArgs(*RVAA)) { 4200 T.addKnownBits(NOT_CAPTURED_IN_RET); 4201 if (T.isKnown(NOT_CAPTURED_IN_MEM)) 4202 return ChangeStatus::UNCHANGED; 4203 if (NoUnwindAA.isKnownNoUnwind() && 4204 (IsVoidTy || RVAA->getState().isAtFixpoint())) { 4205 addKnownBits(NOT_CAPTURED_IN_RET); 4206 if (isKnown(NOT_CAPTURED_IN_MEM)) 4207 return indicateOptimisticFixpoint(); 4208 } 4209 } 4210 } 4211 4212 // Use the CaptureTracker interface and logic with the specialized tracker, 4213 // defined in AACaptureUseTracker, that can look at in-flight abstract 4214 // attributes and directly updates the assumed state. 4215 SmallVector<const Value *, 4> PotentialCopies; 4216 unsigned RemainingUsesToExplore = 4217 getDefaultMaxUsesToExploreForCaptureTracking(); 4218 AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies, 4219 RemainingUsesToExplore); 4220 4221 // Check all potential copies of the associated value until we can assume 4222 // none will be captured or we have to assume at least one might be. 4223 unsigned Idx = 0; 4224 PotentialCopies.push_back(V); 4225 while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size()) 4226 Tracker.valueMayBeCaptured(PotentialCopies[Idx++]); 4227 4228 AANoCapture::StateType &S = getState(); 4229 auto Assumed = S.getAssumed(); 4230 S.intersectAssumedBits(T.getAssumed()); 4231 if (!isAssumedNoCaptureMaybeReturned()) 4232 return indicatePessimisticFixpoint(); 4233 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 4234 : ChangeStatus::CHANGED; 4235 } 4236 4237 /// NoCapture attribute for function arguments. 4238 struct AANoCaptureArgument final : AANoCaptureImpl { 4239 AANoCaptureArgument(const IRPosition &IRP, Attributor &A) 4240 : AANoCaptureImpl(IRP, A) {} 4241 4242 /// See AbstractAttribute::trackStatistics() 4243 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } 4244 }; 4245 4246 /// NoCapture attribute for call site arguments. 4247 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { 4248 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) 4249 : AANoCaptureImpl(IRP, A) {} 4250 4251 /// See AbstractAttribute::initialize(...). 4252 void initialize(Attributor &A) override { 4253 if (Argument *Arg = getAssociatedArgument()) 4254 if (Arg->hasByValAttr()) 4255 indicateOptimisticFixpoint(); 4256 AANoCaptureImpl::initialize(A); 4257 } 4258 4259 /// See AbstractAttribute::updateImpl(...). 4260 ChangeStatus updateImpl(Attributor &A) override { 4261 // TODO: Once we have call site specific value information we can provide 4262 // call site specific liveness information and then it makes 4263 // sense to specialize attributes for call sites arguments instead of 4264 // redirecting requests to the callee argument. 4265 Argument *Arg = getAssociatedArgument(); 4266 if (!Arg) 4267 return indicatePessimisticFixpoint(); 4268 const IRPosition &ArgPos = IRPosition::argument(*Arg); 4269 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos); 4270 return clampStateAndIndicateChange( 4271 getState(), 4272 static_cast<const AANoCapture::StateType &>(ArgAA.getState())); 4273 } 4274 4275 /// See AbstractAttribute::trackStatistics() 4276 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)}; 4277 }; 4278 4279 /// NoCapture attribute for floating values. 4280 struct AANoCaptureFloating final : AANoCaptureImpl { 4281 AANoCaptureFloating(const IRPosition &IRP, Attributor &A) 4282 : AANoCaptureImpl(IRP, A) {} 4283 4284 /// See AbstractAttribute::trackStatistics() 4285 void trackStatistics() const override { 4286 STATS_DECLTRACK_FLOATING_ATTR(nocapture) 4287 } 4288 }; 4289 4290 /// NoCapture attribute for function return value. 4291 struct AANoCaptureReturned final : AANoCaptureImpl { 4292 AANoCaptureReturned(const IRPosition &IRP, Attributor &A) 4293 : AANoCaptureImpl(IRP, A) { 4294 llvm_unreachable("NoCapture is not applicable to function returns!"); 4295 } 4296 4297 /// See AbstractAttribute::initialize(...). 4298 void initialize(Attributor &A) override { 4299 llvm_unreachable("NoCapture is not applicable to function returns!"); 4300 } 4301 4302 /// See AbstractAttribute::updateImpl(...). 4303 ChangeStatus updateImpl(Attributor &A) override { 4304 llvm_unreachable("NoCapture is not applicable to function returns!"); 4305 } 4306 4307 /// See AbstractAttribute::trackStatistics() 4308 void trackStatistics() const override {} 4309 }; 4310 4311 /// NoCapture attribute deduction for a call site return value. 4312 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { 4313 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) 4314 : AANoCaptureImpl(IRP, A) {} 4315 4316 /// See AbstractAttribute::trackStatistics() 4317 void trackStatistics() const override { 4318 STATS_DECLTRACK_CSRET_ATTR(nocapture) 4319 } 4320 }; 4321 4322 /// ------------------ Value Simplify Attribute ---------------------------- 4323 struct AAValueSimplifyImpl : AAValueSimplify { 4324 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) 4325 : AAValueSimplify(IRP, A) {} 4326 4327 /// See AbstractAttribute::initialize(...). 4328 void initialize(Attributor &A) override { 4329 if (getAssociatedValue().getType()->isVoidTy()) 4330 indicatePessimisticFixpoint(); 4331 } 4332 4333 /// See AbstractAttribute::getAsStr(). 4334 const std::string getAsStr() const override { 4335 return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple") 4336 : "not-simple"; 4337 } 4338 4339 /// See AbstractAttribute::trackStatistics() 4340 void trackStatistics() const override {} 4341 4342 /// See AAValueSimplify::getAssumedSimplifiedValue() 4343 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 4344 if (!getAssumed()) 4345 return const_cast<Value *>(&getAssociatedValue()); 4346 return SimplifiedAssociatedValue; 4347 } 4348 4349 /// Helper function for querying AAValueSimplify and updating candicate. 4350 /// \param QueryingValue Value trying to unify with SimplifiedValue 4351 /// \param AccumulatedSimplifiedValue Current simplification result. 4352 static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, 4353 Value &QueryingValue, 4354 Optional<Value *> &AccumulatedSimplifiedValue) { 4355 // FIXME: Add a typecast support. 4356 4357 auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>( 4358 QueryingAA, IRPosition::value(QueryingValue)); 4359 4360 Optional<Value *> QueryingValueSimplified = 4361 ValueSimplifyAA.getAssumedSimplifiedValue(A); 4362 4363 if (!QueryingValueSimplified.hasValue()) 4364 return true; 4365 4366 if (!QueryingValueSimplified.getValue()) 4367 return false; 4368 4369 Value &QueryingValueSimplifiedUnwrapped = 4370 *QueryingValueSimplified.getValue(); 4371 4372 if (AccumulatedSimplifiedValue.hasValue() && 4373 !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) && 4374 !isa<UndefValue>(QueryingValueSimplifiedUnwrapped)) 4375 return AccumulatedSimplifiedValue == QueryingValueSimplified; 4376 if (AccumulatedSimplifiedValue.hasValue() && 4377 isa<UndefValue>(QueryingValueSimplifiedUnwrapped)) 4378 return true; 4379 4380 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue 4381 << " is assumed to be " 4382 << QueryingValueSimplifiedUnwrapped << "\n"); 4383 4384 AccumulatedSimplifiedValue = QueryingValueSimplified; 4385 return true; 4386 } 4387 4388 bool askSimplifiedValueForAAValueConstantRange(Attributor &A) { 4389 if (!getAssociatedValue().getType()->isIntegerTy()) 4390 return false; 4391 4392 const auto &ValueConstantRangeAA = 4393 A.getAAFor<AAValueConstantRange>(*this, getIRPosition()); 4394 4395 Optional<ConstantInt *> COpt = 4396 ValueConstantRangeAA.getAssumedConstantInt(A); 4397 if (COpt.hasValue()) { 4398 if (auto *C = COpt.getValue()) 4399 SimplifiedAssociatedValue = C; 4400 else 4401 return false; 4402 } else { 4403 SimplifiedAssociatedValue = llvm::None; 4404 } 4405 return true; 4406 } 4407 4408 /// See AbstractAttribute::manifest(...). 4409 ChangeStatus manifest(Attributor &A) override { 4410 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4411 4412 if (SimplifiedAssociatedValue.hasValue() && 4413 !SimplifiedAssociatedValue.getValue()) 4414 return Changed; 4415 4416 Value &V = getAssociatedValue(); 4417 auto *C = SimplifiedAssociatedValue.hasValue() 4418 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) 4419 : UndefValue::get(V.getType()); 4420 if (C) { 4421 // We can replace the AssociatedValue with the constant. 4422 if (!V.user_empty() && &V != C && V.getType() == C->getType()) { 4423 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C 4424 << " :: " << *this << "\n"); 4425 if (A.changeValueAfterManifest(V, *C)) 4426 Changed = ChangeStatus::CHANGED; 4427 } 4428 } 4429 4430 return Changed | AAValueSimplify::manifest(A); 4431 } 4432 4433 /// See AbstractState::indicatePessimisticFixpoint(...). 4434 ChangeStatus indicatePessimisticFixpoint() override { 4435 // NOTE: Associated value will be returned in a pessimistic fixpoint and is 4436 // regarded as known. That's why`indicateOptimisticFixpoint` is called. 4437 SimplifiedAssociatedValue = &getAssociatedValue(); 4438 indicateOptimisticFixpoint(); 4439 return ChangeStatus::CHANGED; 4440 } 4441 4442 protected: 4443 // An assumed simplified value. Initially, it is set to Optional::None, which 4444 // means that the value is not clear under current assumption. If in the 4445 // pessimistic state, getAssumedSimplifiedValue doesn't return this value but 4446 // returns orignal associated value. 4447 Optional<Value *> SimplifiedAssociatedValue; 4448 }; 4449 4450 struct AAValueSimplifyArgument final : AAValueSimplifyImpl { 4451 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) 4452 : AAValueSimplifyImpl(IRP, A) {} 4453 4454 void initialize(Attributor &A) override { 4455 AAValueSimplifyImpl::initialize(A); 4456 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) 4457 indicatePessimisticFixpoint(); 4458 if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest}, 4459 /* IgnoreSubsumingPositions */ true)) 4460 indicatePessimisticFixpoint(); 4461 4462 // FIXME: This is a hack to prevent us from propagating function poiner in 4463 // the new pass manager CGSCC pass as it creates call edges the 4464 // CallGraphUpdater cannot handle yet. 4465 Value &V = getAssociatedValue(); 4466 if (V.getType()->isPointerTy() && 4467 V.getType()->getPointerElementType()->isFunctionTy() && 4468 !A.isModulePass()) 4469 indicatePessimisticFixpoint(); 4470 } 4471 4472 /// See AbstractAttribute::updateImpl(...). 4473 ChangeStatus updateImpl(Attributor &A) override { 4474 // Byval is only replacable if it is readonly otherwise we would write into 4475 // the replaced value and not the copy that byval creates implicitly. 4476 Argument *Arg = getAssociatedArgument(); 4477 if (Arg->hasByValAttr()) { 4478 // TODO: We probably need to verify synchronization is not an issue, e.g., 4479 // there is no race by not copying a constant byval. 4480 const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition()); 4481 if (!MemAA.isAssumedReadOnly()) 4482 return indicatePessimisticFixpoint(); 4483 } 4484 4485 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4486 4487 auto PredForCallSite = [&](AbstractCallSite ACS) { 4488 const IRPosition &ACSArgPos = 4489 IRPosition::callsite_argument(ACS, getArgNo()); 4490 // Check if a coresponding argument was found or if it is on not 4491 // associated (which can happen for callback calls). 4492 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 4493 return false; 4494 4495 // We can only propagate thread independent values through callbacks. 4496 // This is different to direct/indirect call sites because for them we 4497 // know the thread executing the caller and callee is the same. For 4498 // callbacks this is not guaranteed, thus a thread dependent value could 4499 // be different for the caller and callee, making it invalid to propagate. 4500 Value &ArgOp = ACSArgPos.getAssociatedValue(); 4501 if (ACS.isCallbackCall()) 4502 if (auto *C = dyn_cast<Constant>(&ArgOp)) 4503 if (C->isThreadDependent()) 4504 return false; 4505 return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue); 4506 }; 4507 4508 bool AllCallSitesKnown; 4509 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 4510 AllCallSitesKnown)) 4511 if (!askSimplifiedValueForAAValueConstantRange(A)) 4512 return indicatePessimisticFixpoint(); 4513 4514 // If a candicate was found in this update, return CHANGED. 4515 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4516 ? ChangeStatus::UNCHANGED 4517 : ChangeStatus ::CHANGED; 4518 } 4519 4520 /// See AbstractAttribute::trackStatistics() 4521 void trackStatistics() const override { 4522 STATS_DECLTRACK_ARG_ATTR(value_simplify) 4523 } 4524 }; 4525 4526 struct AAValueSimplifyReturned : AAValueSimplifyImpl { 4527 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) 4528 : AAValueSimplifyImpl(IRP, A) {} 4529 4530 /// See AbstractAttribute::updateImpl(...). 4531 ChangeStatus updateImpl(Attributor &A) override { 4532 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4533 4534 auto PredForReturned = [&](Value &V) { 4535 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue); 4536 }; 4537 4538 if (!A.checkForAllReturnedValues(PredForReturned, *this)) 4539 if (!askSimplifiedValueForAAValueConstantRange(A)) 4540 return indicatePessimisticFixpoint(); 4541 4542 // If a candicate was found in this update, return CHANGED. 4543 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4544 ? ChangeStatus::UNCHANGED 4545 : ChangeStatus ::CHANGED; 4546 } 4547 4548 ChangeStatus manifest(Attributor &A) override { 4549 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4550 4551 if (SimplifiedAssociatedValue.hasValue() && 4552 !SimplifiedAssociatedValue.getValue()) 4553 return Changed; 4554 4555 Value &V = getAssociatedValue(); 4556 auto *C = SimplifiedAssociatedValue.hasValue() 4557 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) 4558 : UndefValue::get(V.getType()); 4559 if (C) { 4560 auto PredForReturned = 4561 [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 4562 // We can replace the AssociatedValue with the constant. 4563 if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V)) 4564 return true; 4565 4566 for (ReturnInst *RI : RetInsts) { 4567 if (RI->getFunction() != getAnchorScope()) 4568 continue; 4569 auto *RC = C; 4570 if (RC->getType() != RI->getReturnValue()->getType()) 4571 RC = ConstantExpr::getBitCast(RC, 4572 RI->getReturnValue()->getType()); 4573 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC 4574 << " in " << *RI << " :: " << *this << "\n"); 4575 if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC)) 4576 Changed = ChangeStatus::CHANGED; 4577 } 4578 return true; 4579 }; 4580 A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this); 4581 } 4582 4583 return Changed | AAValueSimplify::manifest(A); 4584 } 4585 4586 /// See AbstractAttribute::trackStatistics() 4587 void trackStatistics() const override { 4588 STATS_DECLTRACK_FNRET_ATTR(value_simplify) 4589 } 4590 }; 4591 4592 struct AAValueSimplifyFloating : AAValueSimplifyImpl { 4593 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) 4594 : AAValueSimplifyImpl(IRP, A) {} 4595 4596 /// See AbstractAttribute::initialize(...). 4597 void initialize(Attributor &A) override { 4598 // FIXME: This might have exposed a SCC iterator update bug in the old PM. 4599 // Needs investigation. 4600 // AAValueSimplifyImpl::initialize(A); 4601 Value &V = getAnchorValue(); 4602 4603 // TODO: add other stuffs 4604 if (isa<Constant>(V)) 4605 indicatePessimisticFixpoint(); 4606 } 4607 4608 /// See AbstractAttribute::updateImpl(...). 4609 ChangeStatus updateImpl(Attributor &A) override { 4610 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4611 4612 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &, 4613 bool Stripped) -> bool { 4614 auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V)); 4615 if (!Stripped && this == &AA) { 4616 // TODO: Look the instruction and check recursively. 4617 4618 LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V 4619 << "\n"); 4620 return false; 4621 } 4622 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue); 4623 }; 4624 4625 bool Dummy = false; 4626 if (!genericValueTraversal<AAValueSimplify, bool>( 4627 A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(), 4628 /* UseValueSimplify */ false)) 4629 if (!askSimplifiedValueForAAValueConstantRange(A)) 4630 return indicatePessimisticFixpoint(); 4631 4632 // If a candicate was found in this update, return CHANGED. 4633 4634 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4635 ? ChangeStatus::UNCHANGED 4636 : ChangeStatus ::CHANGED; 4637 } 4638 4639 /// See AbstractAttribute::trackStatistics() 4640 void trackStatistics() const override { 4641 STATS_DECLTRACK_FLOATING_ATTR(value_simplify) 4642 } 4643 }; 4644 4645 struct AAValueSimplifyFunction : AAValueSimplifyImpl { 4646 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) 4647 : AAValueSimplifyImpl(IRP, A) {} 4648 4649 /// See AbstractAttribute::initialize(...). 4650 void initialize(Attributor &A) override { 4651 SimplifiedAssociatedValue = &getAnchorValue(); 4652 indicateOptimisticFixpoint(); 4653 } 4654 /// See AbstractAttribute::initialize(...). 4655 ChangeStatus updateImpl(Attributor &A) override { 4656 llvm_unreachable( 4657 "AAValueSimplify(Function|CallSite)::updateImpl will not be called"); 4658 } 4659 /// See AbstractAttribute::trackStatistics() 4660 void trackStatistics() const override { 4661 STATS_DECLTRACK_FN_ATTR(value_simplify) 4662 } 4663 }; 4664 4665 struct AAValueSimplifyCallSite : AAValueSimplifyFunction { 4666 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) 4667 : AAValueSimplifyFunction(IRP, A) {} 4668 /// See AbstractAttribute::trackStatistics() 4669 void trackStatistics() const override { 4670 STATS_DECLTRACK_CS_ATTR(value_simplify) 4671 } 4672 }; 4673 4674 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned { 4675 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) 4676 : AAValueSimplifyReturned(IRP, A) {} 4677 4678 /// See AbstractAttribute::manifest(...). 4679 ChangeStatus manifest(Attributor &A) override { 4680 return AAValueSimplifyImpl::manifest(A); 4681 } 4682 4683 void trackStatistics() const override { 4684 STATS_DECLTRACK_CSRET_ATTR(value_simplify) 4685 } 4686 }; 4687 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { 4688 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) 4689 : AAValueSimplifyFloating(IRP, A) {} 4690 4691 void trackStatistics() const override { 4692 STATS_DECLTRACK_CSARG_ATTR(value_simplify) 4693 } 4694 }; 4695 4696 /// ----------------------- Heap-To-Stack Conversion --------------------------- 4697 struct AAHeapToStackImpl : public AAHeapToStack { 4698 AAHeapToStackImpl(const IRPosition &IRP, Attributor &A) 4699 : AAHeapToStack(IRP, A) {} 4700 4701 const std::string getAsStr() const override { 4702 return "[H2S] Mallocs: " + std::to_string(MallocCalls.size()); 4703 } 4704 4705 ChangeStatus manifest(Attributor &A) override { 4706 assert(getState().isValidState() && 4707 "Attempted to manifest an invalid state!"); 4708 4709 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 4710 Function *F = getAnchorScope(); 4711 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 4712 4713 for (Instruction *MallocCall : MallocCalls) { 4714 // This malloc cannot be replaced. 4715 if (BadMallocCalls.count(MallocCall)) 4716 continue; 4717 4718 for (Instruction *FreeCall : FreesForMalloc[MallocCall]) { 4719 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); 4720 A.deleteAfterManifest(*FreeCall); 4721 HasChanged = ChangeStatus::CHANGED; 4722 } 4723 4724 LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall 4725 << "\n"); 4726 4727 Align Alignment; 4728 Constant *Size; 4729 if (isCallocLikeFn(MallocCall, TLI)) { 4730 auto *Num = cast<ConstantInt>(MallocCall->getOperand(0)); 4731 auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1)); 4732 APInt TotalSize = SizeT->getValue() * Num->getValue(); 4733 Size = 4734 ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize); 4735 } else if (isAlignedAllocLikeFn(MallocCall, TLI)) { 4736 Size = cast<ConstantInt>(MallocCall->getOperand(1)); 4737 Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0)) 4738 ->getValue() 4739 .getZExtValue()) 4740 .valueOrOne(); 4741 } else { 4742 Size = cast<ConstantInt>(MallocCall->getOperand(0)); 4743 } 4744 4745 unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace(); 4746 Instruction *AI = 4747 new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment, 4748 "", MallocCall->getNextNode()); 4749 4750 if (AI->getType() != MallocCall->getType()) 4751 AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc", 4752 AI->getNextNode()); 4753 4754 A.changeValueAfterManifest(*MallocCall, *AI); 4755 4756 if (auto *II = dyn_cast<InvokeInst>(MallocCall)) { 4757 auto *NBB = II->getNormalDest(); 4758 BranchInst::Create(NBB, MallocCall->getParent()); 4759 A.deleteAfterManifest(*MallocCall); 4760 } else { 4761 A.deleteAfterManifest(*MallocCall); 4762 } 4763 4764 // Zero out the allocated memory if it was a calloc. 4765 if (isCallocLikeFn(MallocCall, TLI)) { 4766 auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc", 4767 AI->getNextNode()); 4768 Value *Ops[] = { 4769 BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size, 4770 ConstantInt::get(Type::getInt1Ty(F->getContext()), false)}; 4771 4772 Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()}; 4773 Module *M = F->getParent(); 4774 Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys); 4775 CallInst::Create(Fn, Ops, "", BI->getNextNode()); 4776 } 4777 HasChanged = ChangeStatus::CHANGED; 4778 } 4779 4780 return HasChanged; 4781 } 4782 4783 /// Collection of all malloc calls in a function. 4784 SmallSetVector<Instruction *, 4> MallocCalls; 4785 4786 /// Collection of malloc calls that cannot be converted. 4787 DenseSet<const Instruction *> BadMallocCalls; 4788 4789 /// A map for each malloc call to the set of associated free calls. 4790 DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc; 4791 4792 ChangeStatus updateImpl(Attributor &A) override; 4793 }; 4794 4795 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) { 4796 const Function *F = getAnchorScope(); 4797 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 4798 4799 MustBeExecutedContextExplorer &Explorer = 4800 A.getInfoCache().getMustBeExecutedContextExplorer(); 4801 4802 auto FreeCheck = [&](Instruction &I) { 4803 const auto &Frees = FreesForMalloc.lookup(&I); 4804 if (Frees.size() != 1) 4805 return false; 4806 Instruction *UniqueFree = *Frees.begin(); 4807 return Explorer.findInContextOf(UniqueFree, I.getNextNode()); 4808 }; 4809 4810 auto UsesCheck = [&](Instruction &I) { 4811 bool ValidUsesOnly = true; 4812 bool MustUse = true; 4813 auto Pred = [&](const Use &U, bool &Follow) -> bool { 4814 Instruction *UserI = cast<Instruction>(U.getUser()); 4815 if (isa<LoadInst>(UserI)) 4816 return true; 4817 if (auto *SI = dyn_cast<StoreInst>(UserI)) { 4818 if (SI->getValueOperand() == U.get()) { 4819 LLVM_DEBUG(dbgs() 4820 << "[H2S] escaping store to memory: " << *UserI << "\n"); 4821 ValidUsesOnly = false; 4822 } else { 4823 // A store into the malloc'ed memory is fine. 4824 } 4825 return true; 4826 } 4827 if (auto *CB = dyn_cast<CallBase>(UserI)) { 4828 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) 4829 return true; 4830 // Record malloc. 4831 if (isFreeCall(UserI, TLI)) { 4832 if (MustUse) { 4833 FreesForMalloc[&I].insert(UserI); 4834 } else { 4835 LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: " 4836 << *UserI << "\n"); 4837 ValidUsesOnly = false; 4838 } 4839 return true; 4840 } 4841 4842 unsigned ArgNo = CB->getArgOperandNo(&U); 4843 4844 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 4845 *this, IRPosition::callsite_argument(*CB, ArgNo)); 4846 4847 // If a callsite argument use is nofree, we are fine. 4848 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( 4849 *this, IRPosition::callsite_argument(*CB, ArgNo)); 4850 4851 if (!NoCaptureAA.isAssumedNoCapture() || 4852 !ArgNoFreeAA.isAssumedNoFree()) { 4853 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n"); 4854 ValidUsesOnly = false; 4855 } 4856 return true; 4857 } 4858 4859 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 4860 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 4861 MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI)); 4862 Follow = true; 4863 return true; 4864 } 4865 // Unknown user for which we can not track uses further (in a way that 4866 // makes sense). 4867 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n"); 4868 ValidUsesOnly = false; 4869 return true; 4870 }; 4871 A.checkForAllUses(Pred, *this, I); 4872 return ValidUsesOnly; 4873 }; 4874 4875 auto MallocCallocCheck = [&](Instruction &I) { 4876 if (BadMallocCalls.count(&I)) 4877 return true; 4878 4879 bool IsMalloc = isMallocLikeFn(&I, TLI); 4880 bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI); 4881 bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI); 4882 if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) { 4883 BadMallocCalls.insert(&I); 4884 return true; 4885 } 4886 4887 if (IsMalloc) { 4888 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0))) 4889 if (Size->getValue().ule(MaxHeapToStackSize)) 4890 if (UsesCheck(I) || FreeCheck(I)) { 4891 MallocCalls.insert(&I); 4892 return true; 4893 } 4894 } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) { 4895 // Only if the alignment and sizes are constant. 4896 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1))) 4897 if (Size->getValue().ule(MaxHeapToStackSize)) 4898 if (UsesCheck(I) || FreeCheck(I)) { 4899 MallocCalls.insert(&I); 4900 return true; 4901 } 4902 } else if (IsCalloc) { 4903 bool Overflow = false; 4904 if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0))) 4905 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1))) 4906 if ((Size->getValue().umul_ov(Num->getValue(), Overflow)) 4907 .ule(MaxHeapToStackSize)) 4908 if (!Overflow && (UsesCheck(I) || FreeCheck(I))) { 4909 MallocCalls.insert(&I); 4910 return true; 4911 } 4912 } 4913 4914 BadMallocCalls.insert(&I); 4915 return true; 4916 }; 4917 4918 size_t NumBadMallocs = BadMallocCalls.size(); 4919 4920 A.checkForAllCallLikeInstructions(MallocCallocCheck, *this); 4921 4922 if (NumBadMallocs != BadMallocCalls.size()) 4923 return ChangeStatus::CHANGED; 4924 4925 return ChangeStatus::UNCHANGED; 4926 } 4927 4928 struct AAHeapToStackFunction final : public AAHeapToStackImpl { 4929 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) 4930 : AAHeapToStackImpl(IRP, A) {} 4931 4932 /// See AbstractAttribute::trackStatistics(). 4933 void trackStatistics() const override { 4934 STATS_DECL( 4935 MallocCalls, Function, 4936 "Number of malloc/calloc/aligned_alloc calls converted to allocas"); 4937 for (auto *C : MallocCalls) 4938 if (!BadMallocCalls.count(C)) 4939 ++BUILD_STAT_NAME(MallocCalls, Function); 4940 } 4941 }; 4942 4943 /// ----------------------- Privatizable Pointers ------------------------------ 4944 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { 4945 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) 4946 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} 4947 4948 ChangeStatus indicatePessimisticFixpoint() override { 4949 AAPrivatizablePtr::indicatePessimisticFixpoint(); 4950 PrivatizableType = nullptr; 4951 return ChangeStatus::CHANGED; 4952 } 4953 4954 /// Identify the type we can chose for a private copy of the underlying 4955 /// argument. None means it is not clear yet, nullptr means there is none. 4956 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; 4957 4958 /// Return a privatizable type that encloses both T0 and T1. 4959 /// TODO: This is merely a stub for now as we should manage a mapping as well. 4960 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { 4961 if (!T0.hasValue()) 4962 return T1; 4963 if (!T1.hasValue()) 4964 return T0; 4965 if (T0 == T1) 4966 return T0; 4967 return nullptr; 4968 } 4969 4970 Optional<Type *> getPrivatizableType() const override { 4971 return PrivatizableType; 4972 } 4973 4974 const std::string getAsStr() const override { 4975 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; 4976 } 4977 4978 protected: 4979 Optional<Type *> PrivatizableType; 4980 }; 4981 4982 // TODO: Do this for call site arguments (probably also other values) as well. 4983 4984 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { 4985 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) 4986 : AAPrivatizablePtrImpl(IRP, A) {} 4987 4988 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 4989 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 4990 // If this is a byval argument and we know all the call sites (so we can 4991 // rewrite them), there is no need to check them explicitly. 4992 bool AllCallSitesKnown; 4993 if (getIRPosition().hasAttr(Attribute::ByVal) && 4994 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, 4995 true, AllCallSitesKnown)) 4996 return getAssociatedValue().getType()->getPointerElementType(); 4997 4998 Optional<Type *> Ty; 4999 unsigned ArgNo = getIRPosition().getArgNo(); 5000 5001 // Make sure the associated call site argument has the same type at all call 5002 // sites and it is an allocation we know is safe to privatize, for now that 5003 // means we only allow alloca instructions. 5004 // TODO: We can additionally analyze the accesses in the callee to create 5005 // the type from that information instead. That is a little more 5006 // involved and will be done in a follow up patch. 5007 auto CallSiteCheck = [&](AbstractCallSite ACS) { 5008 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 5009 // Check if a coresponding argument was found or if it is one not 5010 // associated (which can happen for callback calls). 5011 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 5012 return false; 5013 5014 // Check that all call sites agree on a type. 5015 auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos); 5016 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); 5017 5018 LLVM_DEBUG({ 5019 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; 5020 if (CSTy.hasValue() && CSTy.getValue()) 5021 CSTy.getValue()->print(dbgs()); 5022 else if (CSTy.hasValue()) 5023 dbgs() << "<nullptr>"; 5024 else 5025 dbgs() << "<none>"; 5026 }); 5027 5028 Ty = combineTypes(Ty, CSTy); 5029 5030 LLVM_DEBUG({ 5031 dbgs() << " : New Type: "; 5032 if (Ty.hasValue() && Ty.getValue()) 5033 Ty.getValue()->print(dbgs()); 5034 else if (Ty.hasValue()) 5035 dbgs() << "<nullptr>"; 5036 else 5037 dbgs() << "<none>"; 5038 dbgs() << "\n"; 5039 }); 5040 5041 return !Ty.hasValue() || Ty.getValue(); 5042 }; 5043 5044 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown)) 5045 return nullptr; 5046 return Ty; 5047 } 5048 5049 /// See AbstractAttribute::updateImpl(...). 5050 ChangeStatus updateImpl(Attributor &A) override { 5051 PrivatizableType = identifyPrivatizableType(A); 5052 if (!PrivatizableType.hasValue()) 5053 return ChangeStatus::UNCHANGED; 5054 if (!PrivatizableType.getValue()) 5055 return indicatePessimisticFixpoint(); 5056 5057 // The dependence is optional so we don't give up once we give up on the 5058 // alignment. 5059 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), 5060 /* TrackDependence */ true, DepClassTy::OPTIONAL); 5061 5062 // Avoid arguments with padding for now. 5063 if (!getIRPosition().hasAttr(Attribute::ByVal) && 5064 !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(), 5065 A.getInfoCache().getDL())) { 5066 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n"); 5067 return indicatePessimisticFixpoint(); 5068 } 5069 5070 // Verify callee and caller agree on how the promoted argument would be 5071 // passed. 5072 // TODO: The use of the ArgumentPromotion interface here is ugly, we need a 5073 // specialized form of TargetTransformInfo::areFunctionArgsABICompatible 5074 // which doesn't require the arguments ArgumentPromotion wanted to pass. 5075 Function &Fn = *getIRPosition().getAnchorScope(); 5076 SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy; 5077 ArgsToPromote.insert(getAssociatedArgument()); 5078 const auto *TTI = 5079 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); 5080 if (!TTI || 5081 !ArgumentPromotionPass::areFunctionArgsABICompatible( 5082 Fn, *TTI, ArgsToPromote, Dummy) || 5083 ArgsToPromote.empty()) { 5084 LLVM_DEBUG( 5085 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " 5086 << Fn.getName() << "\n"); 5087 return indicatePessimisticFixpoint(); 5088 } 5089 5090 // Collect the types that will replace the privatizable type in the function 5091 // signature. 5092 SmallVector<Type *, 16> ReplacementTypes; 5093 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 5094 5095 // Register a rewrite of the argument. 5096 Argument *Arg = getAssociatedArgument(); 5097 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { 5098 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"); 5099 return indicatePessimisticFixpoint(); 5100 } 5101 5102 unsigned ArgNo = Arg->getArgNo(); 5103 5104 // Helper to check if for the given call site the associated argument is 5105 // passed to a callback where the privatization would be different. 5106 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { 5107 SmallVector<const Use *, 4> CallbackUses; 5108 AbstractCallSite::getCallbackUses(CB, CallbackUses); 5109 for (const Use *U : CallbackUses) { 5110 AbstractCallSite CBACS(U); 5111 assert(CBACS && CBACS.isCallbackCall()); 5112 for (Argument &CBArg : CBACS.getCalledFunction()->args()) { 5113 int CBArgNo = CBACS.getCallArgOperandNo(CBArg); 5114 5115 LLVM_DEBUG({ 5116 dbgs() 5117 << "[AAPrivatizablePtr] Argument " << *Arg 5118 << "check if can be privatized in the context of its parent (" 5119 << Arg->getParent()->getName() 5120 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5121 "callback (" 5122 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 5123 << ")\n[AAPrivatizablePtr] " << CBArg << " : " 5124 << CBACS.getCallArgOperand(CBArg) << " vs " 5125 << CB.getArgOperand(ArgNo) << "\n" 5126 << "[AAPrivatizablePtr] " << CBArg << " : " 5127 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; 5128 }); 5129 5130 if (CBArgNo != int(ArgNo)) 5131 continue; 5132 const auto &CBArgPrivAA = 5133 A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg)); 5134 if (CBArgPrivAA.isValidState()) { 5135 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); 5136 if (!CBArgPrivTy.hasValue()) 5137 continue; 5138 if (CBArgPrivTy.getValue() == PrivatizableType) 5139 continue; 5140 } 5141 5142 LLVM_DEBUG({ 5143 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5144 << " cannot be privatized in the context of its parent (" 5145 << Arg->getParent()->getName() 5146 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5147 "callback (" 5148 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 5149 << ").\n[AAPrivatizablePtr] for which the argument " 5150 "privatization is not compatible.\n"; 5151 }); 5152 return false; 5153 } 5154 } 5155 return true; 5156 }; 5157 5158 // Helper to check if for the given call site the associated argument is 5159 // passed to a direct call where the privatization would be different. 5160 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { 5161 CallBase *DC = cast<CallBase>(ACS.getInstruction()); 5162 int DCArgNo = ACS.getCallArgOperandNo(ArgNo); 5163 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() && 5164 "Expected a direct call operand for callback call operand"); 5165 5166 LLVM_DEBUG({ 5167 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5168 << " check if be privatized in the context of its parent (" 5169 << Arg->getParent()->getName() 5170 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5171 "direct call of (" 5172 << DCArgNo << "@" << DC->getCalledFunction()->getName() 5173 << ").\n"; 5174 }); 5175 5176 Function *DCCallee = DC->getCalledFunction(); 5177 if (unsigned(DCArgNo) < DCCallee->arg_size()) { 5178 const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 5179 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo))); 5180 if (DCArgPrivAA.isValidState()) { 5181 auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); 5182 if (!DCArgPrivTy.hasValue()) 5183 return true; 5184 if (DCArgPrivTy.getValue() == PrivatizableType) 5185 return true; 5186 } 5187 } 5188 5189 LLVM_DEBUG({ 5190 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5191 << " cannot be privatized in the context of its parent (" 5192 << Arg->getParent()->getName() 5193 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5194 "direct call of (" 5195 << ACS.getInstruction()->getCalledFunction()->getName() 5196 << ").\n[AAPrivatizablePtr] for which the argument " 5197 "privatization is not compatible.\n"; 5198 }); 5199 return false; 5200 }; 5201 5202 // Helper to check if the associated argument is used at the given abstract 5203 // call site in a way that is incompatible with the privatization assumed 5204 // here. 5205 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { 5206 if (ACS.isDirectCall()) 5207 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); 5208 if (ACS.isCallbackCall()) 5209 return IsCompatiblePrivArgOfDirectCS(ACS); 5210 return false; 5211 }; 5212 5213 bool AllCallSitesKnown; 5214 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true, 5215 AllCallSitesKnown)) 5216 return indicatePessimisticFixpoint(); 5217 5218 return ChangeStatus::UNCHANGED; 5219 } 5220 5221 /// Given a type to private \p PrivType, collect the constituates (which are 5222 /// used) in \p ReplacementTypes. 5223 static void 5224 identifyReplacementTypes(Type *PrivType, 5225 SmallVectorImpl<Type *> &ReplacementTypes) { 5226 // TODO: For now we expand the privatization type to the fullest which can 5227 // lead to dead arguments that need to be removed later. 5228 assert(PrivType && "Expected privatizable type!"); 5229 5230 // Traverse the type, extract constituate types on the outermost level. 5231 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5232 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) 5233 ReplacementTypes.push_back(PrivStructType->getElementType(u)); 5234 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5235 ReplacementTypes.append(PrivArrayType->getNumElements(), 5236 PrivArrayType->getElementType()); 5237 } else { 5238 ReplacementTypes.push_back(PrivType); 5239 } 5240 } 5241 5242 /// Initialize \p Base according to the type \p PrivType at position \p IP. 5243 /// The values needed are taken from the arguments of \p F starting at 5244 /// position \p ArgNo. 5245 static void createInitialization(Type *PrivType, Value &Base, Function &F, 5246 unsigned ArgNo, Instruction &IP) { 5247 assert(PrivType && "Expected privatizable type!"); 5248 5249 IRBuilder<NoFolder> IRB(&IP); 5250 const DataLayout &DL = F.getParent()->getDataLayout(); 5251 5252 // Traverse the type, build GEPs and stores. 5253 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5254 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 5255 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 5256 Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo(); 5257 Value *Ptr = constructPointer( 5258 PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL); 5259 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 5260 } 5261 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5262 Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo(); 5263 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy); 5264 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 5265 Value *Ptr = 5266 constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL); 5267 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 5268 } 5269 } else { 5270 new StoreInst(F.getArg(ArgNo), &Base, &IP); 5271 } 5272 } 5273 5274 /// Extract values from \p Base according to the type \p PrivType at the 5275 /// call position \p ACS. The values are appended to \p ReplacementValues. 5276 void createReplacementValues(Align Alignment, Type *PrivType, 5277 AbstractCallSite ACS, Value *Base, 5278 SmallVectorImpl<Value *> &ReplacementValues) { 5279 assert(Base && "Expected base value!"); 5280 assert(PrivType && "Expected privatizable type!"); 5281 Instruction *IP = ACS.getInstruction(); 5282 5283 IRBuilder<NoFolder> IRB(IP); 5284 const DataLayout &DL = IP->getModule()->getDataLayout(); 5285 5286 if (Base->getType()->getPointerElementType() != PrivType) 5287 Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(), 5288 "", ACS.getInstruction()); 5289 5290 // Traverse the type, build GEPs and loads. 5291 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5292 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 5293 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 5294 Type *PointeeTy = PrivStructType->getElementType(u); 5295 Value *Ptr = 5296 constructPointer(PointeeTy->getPointerTo(), Base, 5297 PrivStructLayout->getElementOffset(u), IRB, DL); 5298 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 5299 L->setAlignment(Alignment); 5300 ReplacementValues.push_back(L); 5301 } 5302 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5303 Type *PointeeTy = PrivArrayType->getElementType(); 5304 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 5305 Type *PointeePtrTy = PointeeTy->getPointerTo(); 5306 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 5307 Value *Ptr = 5308 constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL); 5309 LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP); 5310 L->setAlignment(Alignment); 5311 ReplacementValues.push_back(L); 5312 } 5313 } else { 5314 LoadInst *L = new LoadInst(PrivType, Base, "", IP); 5315 L->setAlignment(Alignment); 5316 ReplacementValues.push_back(L); 5317 } 5318 } 5319 5320 /// See AbstractAttribute::manifest(...) 5321 ChangeStatus manifest(Attributor &A) override { 5322 if (!PrivatizableType.hasValue()) 5323 return ChangeStatus::UNCHANGED; 5324 assert(PrivatizableType.getValue() && "Expected privatizable type!"); 5325 5326 // Collect all tail calls in the function as we cannot allow new allocas to 5327 // escape into tail recursion. 5328 // TODO: Be smarter about new allocas escaping into tail calls. 5329 SmallVector<CallInst *, 16> TailCalls; 5330 if (!A.checkForAllInstructions( 5331 [&](Instruction &I) { 5332 CallInst &CI = cast<CallInst>(I); 5333 if (CI.isTailCall()) 5334 TailCalls.push_back(&CI); 5335 return true; 5336 }, 5337 *this, {Instruction::Call})) 5338 return ChangeStatus::UNCHANGED; 5339 5340 Argument *Arg = getAssociatedArgument(); 5341 // Query AAAlign attribute for alignment of associated argument to 5342 // determine the best alignment of loads. 5343 const auto &AlignAA = A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg)); 5344 5345 // Callback to repair the associated function. A new alloca is placed at the 5346 // beginning and initialized with the values passed through arguments. The 5347 // new alloca replaces the use of the old pointer argument. 5348 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = 5349 [=](const Attributor::ArgumentReplacementInfo &ARI, 5350 Function &ReplacementFn, Function::arg_iterator ArgIt) { 5351 BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); 5352 Instruction *IP = &*EntryBB.getFirstInsertionPt(); 5353 auto *AI = new AllocaInst(PrivatizableType.getValue(), 0, 5354 Arg->getName() + ".priv", IP); 5355 createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn, 5356 ArgIt->getArgNo(), *IP); 5357 Arg->replaceAllUsesWith(AI); 5358 5359 for (CallInst *CI : TailCalls) 5360 CI->setTailCall(false); 5361 }; 5362 5363 // Callback to repair a call site of the associated function. The elements 5364 // of the privatizable type are loaded prior to the call and passed to the 5365 // new function version. 5366 Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = 5367 [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI, 5368 AbstractCallSite ACS, 5369 SmallVectorImpl<Value *> &NewArgOperands) { 5370 // When no alignment is specified for the load instruction, 5371 // natural alignment is assumed. 5372 createReplacementValues( 5373 assumeAligned(AlignAA.getAssumedAlign()), 5374 PrivatizableType.getValue(), ACS, 5375 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()), 5376 NewArgOperands); 5377 }; 5378 5379 // Collect the types that will replace the privatizable type in the function 5380 // signature. 5381 SmallVector<Type *, 16> ReplacementTypes; 5382 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 5383 5384 // Register a rewrite of the argument. 5385 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes, 5386 std::move(FnRepairCB), 5387 std::move(ACSRepairCB))) 5388 return ChangeStatus::CHANGED; 5389 return ChangeStatus::UNCHANGED; 5390 } 5391 5392 /// See AbstractAttribute::trackStatistics() 5393 void trackStatistics() const override { 5394 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr); 5395 } 5396 }; 5397 5398 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { 5399 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) 5400 : AAPrivatizablePtrImpl(IRP, A) {} 5401 5402 /// See AbstractAttribute::initialize(...). 5403 virtual void initialize(Attributor &A) override { 5404 // TODO: We can privatize more than arguments. 5405 indicatePessimisticFixpoint(); 5406 } 5407 5408 ChangeStatus updateImpl(Attributor &A) override { 5409 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" 5410 "updateImpl will not be called"); 5411 } 5412 5413 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 5414 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 5415 Value *Obj = 5416 GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL()); 5417 if (!Obj) { 5418 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n"); 5419 return nullptr; 5420 } 5421 5422 if (auto *AI = dyn_cast<AllocaInst>(Obj)) 5423 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 5424 if (CI->isOne()) 5425 return Obj->getType()->getPointerElementType(); 5426 if (auto *Arg = dyn_cast<Argument>(Obj)) { 5427 auto &PrivArgAA = 5428 A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg)); 5429 if (PrivArgAA.isAssumedPrivatizablePtr()) 5430 return Obj->getType()->getPointerElementType(); 5431 } 5432 5433 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " 5434 "alloca nor privatizable argument: " 5435 << *Obj << "!\n"); 5436 return nullptr; 5437 } 5438 5439 /// See AbstractAttribute::trackStatistics() 5440 void trackStatistics() const override { 5441 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr); 5442 } 5443 }; 5444 5445 struct AAPrivatizablePtrCallSiteArgument final 5446 : public AAPrivatizablePtrFloating { 5447 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) 5448 : AAPrivatizablePtrFloating(IRP, A) {} 5449 5450 /// See AbstractAttribute::initialize(...). 5451 void initialize(Attributor &A) override { 5452 if (getIRPosition().hasAttr(Attribute::ByVal)) 5453 indicateOptimisticFixpoint(); 5454 } 5455 5456 /// See AbstractAttribute::updateImpl(...). 5457 ChangeStatus updateImpl(Attributor &A) override { 5458 PrivatizableType = identifyPrivatizableType(A); 5459 if (!PrivatizableType.hasValue()) 5460 return ChangeStatus::UNCHANGED; 5461 if (!PrivatizableType.getValue()) 5462 return indicatePessimisticFixpoint(); 5463 5464 const IRPosition &IRP = getIRPosition(); 5465 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP); 5466 if (!NoCaptureAA.isAssumedNoCapture()) { 5467 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n"); 5468 return indicatePessimisticFixpoint(); 5469 } 5470 5471 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP); 5472 if (!NoAliasAA.isAssumedNoAlias()) { 5473 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n"); 5474 return indicatePessimisticFixpoint(); 5475 } 5476 5477 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP); 5478 if (!MemBehaviorAA.isAssumedReadOnly()) { 5479 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n"); 5480 return indicatePessimisticFixpoint(); 5481 } 5482 5483 return ChangeStatus::UNCHANGED; 5484 } 5485 5486 /// See AbstractAttribute::trackStatistics() 5487 void trackStatistics() const override { 5488 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr); 5489 } 5490 }; 5491 5492 struct AAPrivatizablePtrCallSiteReturned final 5493 : public AAPrivatizablePtrFloating { 5494 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) 5495 : AAPrivatizablePtrFloating(IRP, A) {} 5496 5497 /// See AbstractAttribute::initialize(...). 5498 void initialize(Attributor &A) override { 5499 // TODO: We can privatize more than arguments. 5500 indicatePessimisticFixpoint(); 5501 } 5502 5503 /// See AbstractAttribute::trackStatistics() 5504 void trackStatistics() const override { 5505 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr); 5506 } 5507 }; 5508 5509 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { 5510 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) 5511 : AAPrivatizablePtrFloating(IRP, A) {} 5512 5513 /// See AbstractAttribute::initialize(...). 5514 void initialize(Attributor &A) override { 5515 // TODO: We can privatize more than arguments. 5516 indicatePessimisticFixpoint(); 5517 } 5518 5519 /// See AbstractAttribute::trackStatistics() 5520 void trackStatistics() const override { 5521 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr); 5522 } 5523 }; 5524 5525 /// -------------------- Memory Behavior Attributes ---------------------------- 5526 /// Includes read-none, read-only, and write-only. 5527 /// ---------------------------------------------------------------------------- 5528 struct AAMemoryBehaviorImpl : public AAMemoryBehavior { 5529 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) 5530 : AAMemoryBehavior(IRP, A) {} 5531 5532 /// See AbstractAttribute::initialize(...). 5533 void initialize(Attributor &A) override { 5534 intersectAssumedBits(BEST_STATE); 5535 getKnownStateFromValue(getIRPosition(), getState()); 5536 IRAttribute::initialize(A); 5537 } 5538 5539 /// Return the memory behavior information encoded in the IR for \p IRP. 5540 static void getKnownStateFromValue(const IRPosition &IRP, 5541 BitIntegerState &State, 5542 bool IgnoreSubsumingPositions = false) { 5543 SmallVector<Attribute, 2> Attrs; 5544 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 5545 for (const Attribute &Attr : Attrs) { 5546 switch (Attr.getKindAsEnum()) { 5547 case Attribute::ReadNone: 5548 State.addKnownBits(NO_ACCESSES); 5549 break; 5550 case Attribute::ReadOnly: 5551 State.addKnownBits(NO_WRITES); 5552 break; 5553 case Attribute::WriteOnly: 5554 State.addKnownBits(NO_READS); 5555 break; 5556 default: 5557 llvm_unreachable("Unexpected attribute!"); 5558 } 5559 } 5560 5561 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) { 5562 if (!I->mayReadFromMemory()) 5563 State.addKnownBits(NO_READS); 5564 if (!I->mayWriteToMemory()) 5565 State.addKnownBits(NO_WRITES); 5566 } 5567 } 5568 5569 /// See AbstractAttribute::getDeducedAttributes(...). 5570 void getDeducedAttributes(LLVMContext &Ctx, 5571 SmallVectorImpl<Attribute> &Attrs) const override { 5572 assert(Attrs.size() == 0); 5573 if (isAssumedReadNone()) 5574 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 5575 else if (isAssumedReadOnly()) 5576 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly)); 5577 else if (isAssumedWriteOnly()) 5578 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly)); 5579 assert(Attrs.size() <= 1); 5580 } 5581 5582 /// See AbstractAttribute::manifest(...). 5583 ChangeStatus manifest(Attributor &A) override { 5584 if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true)) 5585 return ChangeStatus::UNCHANGED; 5586 5587 const IRPosition &IRP = getIRPosition(); 5588 5589 // Check if we would improve the existing attributes first. 5590 SmallVector<Attribute, 4> DeducedAttrs; 5591 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 5592 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 5593 return IRP.hasAttr(Attr.getKindAsEnum(), 5594 /* IgnoreSubsumingPositions */ true); 5595 })) 5596 return ChangeStatus::UNCHANGED; 5597 5598 // Clear existing attributes. 5599 IRP.removeAttrs(AttrKinds); 5600 5601 // Use the generic manifest method. 5602 return IRAttribute::manifest(A); 5603 } 5604 5605 /// See AbstractState::getAsStr(). 5606 const std::string getAsStr() const override { 5607 if (isAssumedReadNone()) 5608 return "readnone"; 5609 if (isAssumedReadOnly()) 5610 return "readonly"; 5611 if (isAssumedWriteOnly()) 5612 return "writeonly"; 5613 return "may-read/write"; 5614 } 5615 5616 /// The set of IR attributes AAMemoryBehavior deals with. 5617 static const Attribute::AttrKind AttrKinds[3]; 5618 }; 5619 5620 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { 5621 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; 5622 5623 /// Memory behavior attribute for a floating value. 5624 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { 5625 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) 5626 : AAMemoryBehaviorImpl(IRP, A) {} 5627 5628 /// See AbstractAttribute::initialize(...). 5629 void initialize(Attributor &A) override { 5630 AAMemoryBehaviorImpl::initialize(A); 5631 // Initialize the use vector with all direct uses of the associated value. 5632 for (const Use &U : getAssociatedValue().uses()) 5633 Uses.insert(&U); 5634 } 5635 5636 /// See AbstractAttribute::updateImpl(...). 5637 ChangeStatus updateImpl(Attributor &A) override; 5638 5639 /// See AbstractAttribute::trackStatistics() 5640 void trackStatistics() const override { 5641 if (isAssumedReadNone()) 5642 STATS_DECLTRACK_FLOATING_ATTR(readnone) 5643 else if (isAssumedReadOnly()) 5644 STATS_DECLTRACK_FLOATING_ATTR(readonly) 5645 else if (isAssumedWriteOnly()) 5646 STATS_DECLTRACK_FLOATING_ATTR(writeonly) 5647 } 5648 5649 private: 5650 /// Return true if users of \p UserI might access the underlying 5651 /// variable/location described by \p U and should therefore be analyzed. 5652 bool followUsersOfUseIn(Attributor &A, const Use *U, 5653 const Instruction *UserI); 5654 5655 /// Update the state according to the effect of use \p U in \p UserI. 5656 void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI); 5657 5658 protected: 5659 /// Container for (transitive) uses of the associated argument. 5660 SetVector<const Use *> Uses; 5661 }; 5662 5663 /// Memory behavior attribute for function argument. 5664 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { 5665 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) 5666 : AAMemoryBehaviorFloating(IRP, A) {} 5667 5668 /// See AbstractAttribute::initialize(...). 5669 void initialize(Attributor &A) override { 5670 intersectAssumedBits(BEST_STATE); 5671 const IRPosition &IRP = getIRPosition(); 5672 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we 5673 // can query it when we use has/getAttr. That would allow us to reuse the 5674 // initialize of the base class here. 5675 bool HasByVal = 5676 IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true); 5677 getKnownStateFromValue(IRP, getState(), 5678 /* IgnoreSubsumingPositions */ HasByVal); 5679 5680 // Initialize the use vector with all direct uses of the associated value. 5681 Argument *Arg = getAssociatedArgument(); 5682 if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) { 5683 indicatePessimisticFixpoint(); 5684 } else { 5685 // Initialize the use vector with all direct uses of the associated value. 5686 for (const Use &U : Arg->uses()) 5687 Uses.insert(&U); 5688 } 5689 } 5690 5691 ChangeStatus manifest(Attributor &A) override { 5692 // TODO: Pointer arguments are not supported on vectors of pointers yet. 5693 if (!getAssociatedValue().getType()->isPointerTy()) 5694 return ChangeStatus::UNCHANGED; 5695 5696 // TODO: From readattrs.ll: "inalloca parameters are always 5697 // considered written" 5698 if (hasAttr({Attribute::InAlloca})) { 5699 removeKnownBits(NO_WRITES); 5700 removeAssumedBits(NO_WRITES); 5701 } 5702 return AAMemoryBehaviorFloating::manifest(A); 5703 } 5704 5705 /// See AbstractAttribute::trackStatistics() 5706 void trackStatistics() const override { 5707 if (isAssumedReadNone()) 5708 STATS_DECLTRACK_ARG_ATTR(readnone) 5709 else if (isAssumedReadOnly()) 5710 STATS_DECLTRACK_ARG_ATTR(readonly) 5711 else if (isAssumedWriteOnly()) 5712 STATS_DECLTRACK_ARG_ATTR(writeonly) 5713 } 5714 }; 5715 5716 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { 5717 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) 5718 : AAMemoryBehaviorArgument(IRP, A) {} 5719 5720 /// See AbstractAttribute::initialize(...). 5721 void initialize(Attributor &A) override { 5722 if (Argument *Arg = getAssociatedArgument()) { 5723 if (Arg->hasByValAttr()) { 5724 addKnownBits(NO_WRITES); 5725 removeKnownBits(NO_READS); 5726 removeAssumedBits(NO_READS); 5727 } 5728 } 5729 AAMemoryBehaviorArgument::initialize(A); 5730 } 5731 5732 /// See AbstractAttribute::updateImpl(...). 5733 ChangeStatus updateImpl(Attributor &A) override { 5734 // TODO: Once we have call site specific value information we can provide 5735 // call site specific liveness liveness information and then it makes 5736 // sense to specialize attributes for call sites arguments instead of 5737 // redirecting requests to the callee argument. 5738 Argument *Arg = getAssociatedArgument(); 5739 const IRPosition &ArgPos = IRPosition::argument(*Arg); 5740 auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos); 5741 return clampStateAndIndicateChange( 5742 getState(), 5743 static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState())); 5744 } 5745 5746 /// See AbstractAttribute::trackStatistics() 5747 void trackStatistics() const override { 5748 if (isAssumedReadNone()) 5749 STATS_DECLTRACK_CSARG_ATTR(readnone) 5750 else if (isAssumedReadOnly()) 5751 STATS_DECLTRACK_CSARG_ATTR(readonly) 5752 else if (isAssumedWriteOnly()) 5753 STATS_DECLTRACK_CSARG_ATTR(writeonly) 5754 } 5755 }; 5756 5757 /// Memory behavior attribute for a call site return position. 5758 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { 5759 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) 5760 : AAMemoryBehaviorFloating(IRP, A) {} 5761 5762 /// See AbstractAttribute::manifest(...). 5763 ChangeStatus manifest(Attributor &A) override { 5764 // We do not annotate returned values. 5765 return ChangeStatus::UNCHANGED; 5766 } 5767 5768 /// See AbstractAttribute::trackStatistics() 5769 void trackStatistics() const override {} 5770 }; 5771 5772 /// An AA to represent the memory behavior function attributes. 5773 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { 5774 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) 5775 : AAMemoryBehaviorImpl(IRP, A) {} 5776 5777 /// See AbstractAttribute::updateImpl(Attributor &A). 5778 virtual ChangeStatus updateImpl(Attributor &A) override; 5779 5780 /// See AbstractAttribute::manifest(...). 5781 ChangeStatus manifest(Attributor &A) override { 5782 Function &F = cast<Function>(getAnchorValue()); 5783 if (isAssumedReadNone()) { 5784 F.removeFnAttr(Attribute::ArgMemOnly); 5785 F.removeFnAttr(Attribute::InaccessibleMemOnly); 5786 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); 5787 } 5788 return AAMemoryBehaviorImpl::manifest(A); 5789 } 5790 5791 /// See AbstractAttribute::trackStatistics() 5792 void trackStatistics() const override { 5793 if (isAssumedReadNone()) 5794 STATS_DECLTRACK_FN_ATTR(readnone) 5795 else if (isAssumedReadOnly()) 5796 STATS_DECLTRACK_FN_ATTR(readonly) 5797 else if (isAssumedWriteOnly()) 5798 STATS_DECLTRACK_FN_ATTR(writeonly) 5799 } 5800 }; 5801 5802 /// AAMemoryBehavior attribute for call sites. 5803 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl { 5804 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) 5805 : AAMemoryBehaviorImpl(IRP, A) {} 5806 5807 /// See AbstractAttribute::initialize(...). 5808 void initialize(Attributor &A) override { 5809 AAMemoryBehaviorImpl::initialize(A); 5810 Function *F = getAssociatedFunction(); 5811 if (!F || !A.isFunctionIPOAmendable(*F)) { 5812 indicatePessimisticFixpoint(); 5813 return; 5814 } 5815 } 5816 5817 /// See AbstractAttribute::updateImpl(...). 5818 ChangeStatus updateImpl(Attributor &A) override { 5819 // TODO: Once we have call site specific value information we can provide 5820 // call site specific liveness liveness information and then it makes 5821 // sense to specialize attributes for call sites arguments instead of 5822 // redirecting requests to the callee argument. 5823 Function *F = getAssociatedFunction(); 5824 const IRPosition &FnPos = IRPosition::function(*F); 5825 auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos); 5826 return clampStateAndIndicateChange( 5827 getState(), 5828 static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState())); 5829 } 5830 5831 /// See AbstractAttribute::trackStatistics() 5832 void trackStatistics() const override { 5833 if (isAssumedReadNone()) 5834 STATS_DECLTRACK_CS_ATTR(readnone) 5835 else if (isAssumedReadOnly()) 5836 STATS_DECLTRACK_CS_ATTR(readonly) 5837 else if (isAssumedWriteOnly()) 5838 STATS_DECLTRACK_CS_ATTR(writeonly) 5839 } 5840 }; 5841 5842 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { 5843 5844 // The current assumed state used to determine a change. 5845 auto AssumedState = getAssumed(); 5846 5847 auto CheckRWInst = [&](Instruction &I) { 5848 // If the instruction has an own memory behavior state, use it to restrict 5849 // the local state. No further analysis is required as the other memory 5850 // state is as optimistic as it gets. 5851 if (const auto *CB = dyn_cast<CallBase>(&I)) { 5852 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 5853 *this, IRPosition::callsite_function(*CB)); 5854 intersectAssumedBits(MemBehaviorAA.getAssumed()); 5855 return !isAtFixpoint(); 5856 } 5857 5858 // Remove access kind modifiers if necessary. 5859 if (I.mayReadFromMemory()) 5860 removeAssumedBits(NO_READS); 5861 if (I.mayWriteToMemory()) 5862 removeAssumedBits(NO_WRITES); 5863 return !isAtFixpoint(); 5864 }; 5865 5866 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this)) 5867 return indicatePessimisticFixpoint(); 5868 5869 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 5870 : ChangeStatus::UNCHANGED; 5871 } 5872 5873 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { 5874 5875 const IRPosition &IRP = getIRPosition(); 5876 const IRPosition &FnPos = IRPosition::function_scope(IRP); 5877 AAMemoryBehavior::StateType &S = getState(); 5878 5879 // First, check the function scope. We take the known information and we avoid 5880 // work if the assumed information implies the current assumed information for 5881 // this attribute. This is a valid for all but byval arguments. 5882 Argument *Arg = IRP.getAssociatedArgument(); 5883 AAMemoryBehavior::base_t FnMemAssumedState = 5884 AAMemoryBehavior::StateType::getWorstState(); 5885 if (!Arg || !Arg->hasByValAttr()) { 5886 const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>( 5887 *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 5888 FnMemAssumedState = FnMemAA.getAssumed(); 5889 S.addKnownBits(FnMemAA.getKnown()); 5890 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed()) 5891 return ChangeStatus::UNCHANGED; 5892 } 5893 5894 // Make sure the value is not captured (except through "return"), if 5895 // it is, any information derived would be irrelevant anyway as we cannot 5896 // check the potential aliases introduced by the capture. However, no need 5897 // to fall back to anythign less optimistic than the function state. 5898 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 5899 *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL); 5900 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 5901 S.intersectAssumedBits(FnMemAssumedState); 5902 return ChangeStatus::CHANGED; 5903 } 5904 5905 // The current assumed state used to determine a change. 5906 auto AssumedState = S.getAssumed(); 5907 5908 // Liveness information to exclude dead users. 5909 // TODO: Take the FnPos once we have call site specific liveness information. 5910 const auto &LivenessAA = A.getAAFor<AAIsDead>( 5911 *this, IRPosition::function(*IRP.getAssociatedFunction()), 5912 /* TrackDependence */ false); 5913 5914 // Visit and expand uses until all are analyzed or a fixpoint is reached. 5915 for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) { 5916 const Use *U = Uses[i]; 5917 Instruction *UserI = cast<Instruction>(U->getUser()); 5918 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI 5919 << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA)) 5920 << "]\n"); 5921 if (A.isAssumedDead(*U, this, &LivenessAA)) 5922 continue; 5923 5924 // Droppable users, e.g., llvm::assume does not actually perform any action. 5925 if (UserI->isDroppable()) 5926 continue; 5927 5928 // Check if the users of UserI should also be visited. 5929 if (followUsersOfUseIn(A, U, UserI)) 5930 for (const Use &UserIUse : UserI->uses()) 5931 Uses.insert(&UserIUse); 5932 5933 // If UserI might touch memory we analyze the use in detail. 5934 if (UserI->mayReadOrWriteMemory()) 5935 analyzeUseIn(A, U, UserI); 5936 } 5937 5938 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 5939 : ChangeStatus::UNCHANGED; 5940 } 5941 5942 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U, 5943 const Instruction *UserI) { 5944 // The loaded value is unrelated to the pointer argument, no need to 5945 // follow the users of the load. 5946 if (isa<LoadInst>(UserI)) 5947 return false; 5948 5949 // By default we follow all uses assuming UserI might leak information on U, 5950 // we have special handling for call sites operands though. 5951 const auto *CB = dyn_cast<CallBase>(UserI); 5952 if (!CB || !CB->isArgOperand(U)) 5953 return true; 5954 5955 // If the use is a call argument known not to be captured, the users of 5956 // the call do not need to be visited because they have to be unrelated to 5957 // the input. Note that this check is not trivial even though we disallow 5958 // general capturing of the underlying argument. The reason is that the 5959 // call might the argument "through return", which we allow and for which we 5960 // need to check call users. 5961 if (U->get()->getType()->isPointerTy()) { 5962 unsigned ArgNo = CB->getArgOperandNo(U); 5963 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 5964 *this, IRPosition::callsite_argument(*CB, ArgNo), 5965 /* TrackDependence */ true, DepClassTy::OPTIONAL); 5966 return !ArgNoCaptureAA.isAssumedNoCapture(); 5967 } 5968 5969 return true; 5970 } 5971 5972 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U, 5973 const Instruction *UserI) { 5974 assert(UserI->mayReadOrWriteMemory()); 5975 5976 switch (UserI->getOpcode()) { 5977 default: 5978 // TODO: Handle all atomics and other side-effect operations we know of. 5979 break; 5980 case Instruction::Load: 5981 // Loads cause the NO_READS property to disappear. 5982 removeAssumedBits(NO_READS); 5983 return; 5984 5985 case Instruction::Store: 5986 // Stores cause the NO_WRITES property to disappear if the use is the 5987 // pointer operand. Note that we do assume that capturing was taken care of 5988 // somewhere else. 5989 if (cast<StoreInst>(UserI)->getPointerOperand() == U->get()) 5990 removeAssumedBits(NO_WRITES); 5991 return; 5992 5993 case Instruction::Call: 5994 case Instruction::CallBr: 5995 case Instruction::Invoke: { 5996 // For call sites we look at the argument memory behavior attribute (this 5997 // could be recursive!) in order to restrict our own state. 5998 const auto *CB = cast<CallBase>(UserI); 5999 6000 // Give up on operand bundles. 6001 if (CB->isBundleOperand(U)) { 6002 indicatePessimisticFixpoint(); 6003 return; 6004 } 6005 6006 // Calling a function does read the function pointer, maybe write it if the 6007 // function is self-modifying. 6008 if (CB->isCallee(U)) { 6009 removeAssumedBits(NO_READS); 6010 break; 6011 } 6012 6013 // Adjust the possible access behavior based on the information on the 6014 // argument. 6015 IRPosition Pos; 6016 if (U->get()->getType()->isPointerTy()) 6017 Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U)); 6018 else 6019 Pos = IRPosition::callsite_function(*CB); 6020 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 6021 *this, Pos, 6022 /* TrackDependence */ true, DepClassTy::OPTIONAL); 6023 // "assumed" has at most the same bits as the MemBehaviorAA assumed 6024 // and at least "known". 6025 intersectAssumedBits(MemBehaviorAA.getAssumed()); 6026 return; 6027 } 6028 }; 6029 6030 // Generally, look at the "may-properties" and adjust the assumed state if we 6031 // did not trigger special handling before. 6032 if (UserI->mayReadFromMemory()) 6033 removeAssumedBits(NO_READS); 6034 if (UserI->mayWriteToMemory()) 6035 removeAssumedBits(NO_WRITES); 6036 } 6037 6038 } // namespace 6039 6040 /// -------------------- Memory Locations Attributes --------------------------- 6041 /// Includes read-none, argmemonly, inaccessiblememonly, 6042 /// inaccessiblememorargmemonly 6043 /// ---------------------------------------------------------------------------- 6044 6045 std::string AAMemoryLocation::getMemoryLocationsAsStr( 6046 AAMemoryLocation::MemoryLocationsKind MLK) { 6047 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) 6048 return "all memory"; 6049 if (MLK == AAMemoryLocation::NO_LOCATIONS) 6050 return "no memory"; 6051 std::string S = "memory:"; 6052 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) 6053 S += "stack,"; 6054 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) 6055 S += "constant,"; 6056 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) 6057 S += "internal global,"; 6058 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) 6059 S += "external global,"; 6060 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) 6061 S += "argument,"; 6062 if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) 6063 S += "inaccessible,"; 6064 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) 6065 S += "malloced,"; 6066 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) 6067 S += "unknown,"; 6068 S.pop_back(); 6069 return S; 6070 } 6071 6072 namespace { 6073 struct AAMemoryLocationImpl : public AAMemoryLocation { 6074 6075 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) 6076 : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { 6077 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 6078 AccessKind2Accesses[u] = nullptr; 6079 } 6080 6081 ~AAMemoryLocationImpl() { 6082 // The AccessSets are allocated via a BumpPtrAllocator, we call 6083 // the destructor manually. 6084 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 6085 if (AccessKind2Accesses[u]) 6086 AccessKind2Accesses[u]->~AccessSet(); 6087 } 6088 6089 /// See AbstractAttribute::initialize(...). 6090 void initialize(Attributor &A) override { 6091 intersectAssumedBits(BEST_STATE); 6092 getKnownStateFromValue(A, getIRPosition(), getState()); 6093 IRAttribute::initialize(A); 6094 } 6095 6096 /// Return the memory behavior information encoded in the IR for \p IRP. 6097 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, 6098 BitIntegerState &State, 6099 bool IgnoreSubsumingPositions = false) { 6100 // For internal functions we ignore `argmemonly` and 6101 // `inaccessiblememorargmemonly` as we might break it via interprocedural 6102 // constant propagation. It is unclear if this is the best way but it is 6103 // unlikely this will cause real performance problems. If we are deriving 6104 // attributes for the anchor function we even remove the attribute in 6105 // addition to ignoring it. 6106 bool UseArgMemOnly = true; 6107 Function *AnchorFn = IRP.getAnchorScope(); 6108 if (AnchorFn && A.isRunOn(*AnchorFn)) 6109 UseArgMemOnly = !AnchorFn->hasLocalLinkage(); 6110 6111 SmallVector<Attribute, 2> Attrs; 6112 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 6113 for (const Attribute &Attr : Attrs) { 6114 switch (Attr.getKindAsEnum()) { 6115 case Attribute::ReadNone: 6116 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM); 6117 break; 6118 case Attribute::InaccessibleMemOnly: 6119 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 6120 break; 6121 case Attribute::ArgMemOnly: 6122 if (UseArgMemOnly) 6123 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true)); 6124 else 6125 IRP.removeAttrs({Attribute::ArgMemOnly}); 6126 break; 6127 case Attribute::InaccessibleMemOrArgMemOnly: 6128 if (UseArgMemOnly) 6129 State.addKnownBits(inverseLocation( 6130 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 6131 else 6132 IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly}); 6133 break; 6134 default: 6135 llvm_unreachable("Unexpected attribute!"); 6136 } 6137 } 6138 } 6139 6140 /// See AbstractAttribute::getDeducedAttributes(...). 6141 void getDeducedAttributes(LLVMContext &Ctx, 6142 SmallVectorImpl<Attribute> &Attrs) const override { 6143 assert(Attrs.size() == 0); 6144 if (isAssumedReadNone()) { 6145 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 6146 } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { 6147 if (isAssumedInaccessibleMemOnly()) 6148 Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly)); 6149 else if (isAssumedArgMemOnly()) 6150 Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly)); 6151 else if (isAssumedInaccessibleOrArgMemOnly()) 6152 Attrs.push_back( 6153 Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly)); 6154 } 6155 assert(Attrs.size() <= 1); 6156 } 6157 6158 /// See AbstractAttribute::manifest(...). 6159 ChangeStatus manifest(Attributor &A) override { 6160 const IRPosition &IRP = getIRPosition(); 6161 6162 // Check if we would improve the existing attributes first. 6163 SmallVector<Attribute, 4> DeducedAttrs; 6164 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 6165 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 6166 return IRP.hasAttr(Attr.getKindAsEnum(), 6167 /* IgnoreSubsumingPositions */ true); 6168 })) 6169 return ChangeStatus::UNCHANGED; 6170 6171 // Clear existing attributes. 6172 IRP.removeAttrs(AttrKinds); 6173 if (isAssumedReadNone()) 6174 IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds); 6175 6176 // Use the generic manifest method. 6177 return IRAttribute::manifest(A); 6178 } 6179 6180 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). 6181 bool checkForAllAccessesToMemoryKind( 6182 function_ref<bool(const Instruction *, const Value *, AccessKind, 6183 MemoryLocationsKind)> 6184 Pred, 6185 MemoryLocationsKind RequestedMLK) const override { 6186 if (!isValidState()) 6187 return false; 6188 6189 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); 6190 if (AssumedMLK == NO_LOCATIONS) 6191 return true; 6192 6193 unsigned Idx = 0; 6194 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; 6195 CurMLK *= 2, ++Idx) { 6196 if (CurMLK & RequestedMLK) 6197 continue; 6198 6199 if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) 6200 for (const AccessInfo &AI : *Accesses) 6201 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) 6202 return false; 6203 } 6204 6205 return true; 6206 } 6207 6208 ChangeStatus indicatePessimisticFixpoint() override { 6209 // If we give up and indicate a pessimistic fixpoint this instruction will 6210 // become an access for all potential access kinds: 6211 // TODO: Add pointers for argmemonly and globals to improve the results of 6212 // checkForAllAccessesToMemoryKind. 6213 bool Changed = false; 6214 MemoryLocationsKind KnownMLK = getKnown(); 6215 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 6216 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) 6217 if (!(CurMLK & KnownMLK)) 6218 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed, 6219 getAccessKindFromInst(I)); 6220 return AAMemoryLocation::indicatePessimisticFixpoint(); 6221 } 6222 6223 protected: 6224 /// Helper struct to tie together an instruction that has a read or write 6225 /// effect with the pointer it accesses (if any). 6226 struct AccessInfo { 6227 6228 /// The instruction that caused the access. 6229 const Instruction *I; 6230 6231 /// The base pointer that is accessed, or null if unknown. 6232 const Value *Ptr; 6233 6234 /// The kind of access (read/write/read+write). 6235 AccessKind Kind; 6236 6237 bool operator==(const AccessInfo &RHS) const { 6238 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; 6239 } 6240 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { 6241 if (LHS.I != RHS.I) 6242 return LHS.I < RHS.I; 6243 if (LHS.Ptr != RHS.Ptr) 6244 return LHS.Ptr < RHS.Ptr; 6245 if (LHS.Kind != RHS.Kind) 6246 return LHS.Kind < RHS.Kind; 6247 return false; 6248 } 6249 }; 6250 6251 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the 6252 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. 6253 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; 6254 AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()]; 6255 6256 /// Return the kind(s) of location that may be accessed by \p V. 6257 AAMemoryLocation::MemoryLocationsKind 6258 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); 6259 6260 /// Return the access kind as determined by \p I. 6261 AccessKind getAccessKindFromInst(const Instruction *I) { 6262 AccessKind AK = READ_WRITE; 6263 if (I) { 6264 AK = I->mayReadFromMemory() ? READ : NONE; 6265 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); 6266 } 6267 return AK; 6268 } 6269 6270 /// Update the state \p State and the AccessKind2Accesses given that \p I is 6271 /// an access of kind \p AK to a \p MLK memory location with the access 6272 /// pointer \p Ptr. 6273 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, 6274 MemoryLocationsKind MLK, const Instruction *I, 6275 const Value *Ptr, bool &Changed, 6276 AccessKind AK = READ_WRITE) { 6277 6278 assert(isPowerOf2_32(MLK) && "Expected a single location set!"); 6279 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)]; 6280 if (!Accesses) 6281 Accesses = new (Allocator) AccessSet(); 6282 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second; 6283 State.removeAssumedBits(MLK); 6284 } 6285 6286 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or 6287 /// arguments, and update the state and access map accordingly. 6288 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, 6289 AAMemoryLocation::StateType &State, bool &Changed); 6290 6291 /// Used to allocate access sets. 6292 BumpPtrAllocator &Allocator; 6293 6294 /// The set of IR attributes AAMemoryLocation deals with. 6295 static const Attribute::AttrKind AttrKinds[4]; 6296 }; 6297 6298 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = { 6299 Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly, 6300 Attribute::InaccessibleMemOrArgMemOnly}; 6301 6302 void AAMemoryLocationImpl::categorizePtrValue( 6303 Attributor &A, const Instruction &I, const Value &Ptr, 6304 AAMemoryLocation::StateType &State, bool &Changed) { 6305 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for " 6306 << Ptr << " [" 6307 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n"); 6308 6309 auto StripGEPCB = [](Value *V) -> Value * { 6310 auto *GEP = dyn_cast<GEPOperator>(V); 6311 while (GEP) { 6312 V = GEP->getPointerOperand(); 6313 GEP = dyn_cast<GEPOperator>(V); 6314 } 6315 return V; 6316 }; 6317 6318 auto VisitValueCB = [&](Value &V, const Instruction *, 6319 AAMemoryLocation::StateType &T, 6320 bool Stripped) -> bool { 6321 MemoryLocationsKind MLK = NO_LOCATIONS; 6322 assert(!isa<GEPOperator>(V) && "GEPs should have been stripped."); 6323 if (isa<UndefValue>(V)) 6324 return true; 6325 if (auto *Arg = dyn_cast<Argument>(&V)) { 6326 if (Arg->hasByValAttr()) 6327 MLK = NO_LOCAL_MEM; 6328 else 6329 MLK = NO_ARGUMENT_MEM; 6330 } else if (auto *GV = dyn_cast<GlobalValue>(&V)) { 6331 if (GV->hasLocalLinkage()) 6332 MLK = NO_GLOBAL_INTERNAL_MEM; 6333 else 6334 MLK = NO_GLOBAL_EXTERNAL_MEM; 6335 } else if (isa<ConstantPointerNull>(V) && 6336 !NullPointerIsDefined(getAssociatedFunction(), 6337 V.getType()->getPointerAddressSpace())) { 6338 return true; 6339 } else if (isa<AllocaInst>(V)) { 6340 MLK = NO_LOCAL_MEM; 6341 } else if (const auto *CB = dyn_cast<CallBase>(&V)) { 6342 const auto &NoAliasAA = 6343 A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB)); 6344 if (NoAliasAA.isAssumedNoAlias()) 6345 MLK = NO_MALLOCED_MEM; 6346 else 6347 MLK = NO_UNKOWN_MEM; 6348 } else { 6349 MLK = NO_UNKOWN_MEM; 6350 } 6351 6352 assert(MLK != NO_LOCATIONS && "No location specified!"); 6353 updateStateAndAccessesMap(T, MLK, &I, &V, Changed, 6354 getAccessKindFromInst(&I)); 6355 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: " 6356 << V << " -> " << getMemoryLocationsAsStr(T.getAssumed()) 6357 << "\n"); 6358 return true; 6359 }; 6360 6361 if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>( 6362 A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(), 6363 /* UseValueSimplify */ true, 6364 /* MaxValues */ 32, StripGEPCB)) { 6365 LLVM_DEBUG( 6366 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"); 6367 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed, 6368 getAccessKindFromInst(&I)); 6369 } else { 6370 LLVM_DEBUG( 6371 dbgs() 6372 << "[AAMemoryLocation] Accessed locations with pointer locations: " 6373 << getMemoryLocationsAsStr(State.getAssumed()) << "\n"); 6374 } 6375 } 6376 6377 AAMemoryLocation::MemoryLocationsKind 6378 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, 6379 bool &Changed) { 6380 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for " 6381 << I << "\n"); 6382 6383 AAMemoryLocation::StateType AccessedLocs; 6384 AccessedLocs.intersectAssumedBits(NO_LOCATIONS); 6385 6386 if (auto *CB = dyn_cast<CallBase>(&I)) { 6387 6388 // First check if we assume any memory is access is visible. 6389 const auto &CBMemLocationAA = 6390 A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB)); 6391 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I 6392 << " [" << CBMemLocationAA << "]\n"); 6393 6394 if (CBMemLocationAA.isAssumedReadNone()) 6395 return NO_LOCATIONS; 6396 6397 if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) { 6398 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr, 6399 Changed, getAccessKindFromInst(&I)); 6400 return AccessedLocs.getAssumed(); 6401 } 6402 6403 uint32_t CBAssumedNotAccessedLocs = 6404 CBMemLocationAA.getAssumedNotAccessedLocation(); 6405 6406 // Set the argmemonly and global bit as we handle them separately below. 6407 uint32_t CBAssumedNotAccessedLocsNoArgMem = 6408 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; 6409 6410 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { 6411 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) 6412 continue; 6413 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed, 6414 getAccessKindFromInst(&I)); 6415 } 6416 6417 // Now handle global memory if it might be accessed. This is slightly tricky 6418 // as NO_GLOBAL_MEM has multiple bits set. 6419 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); 6420 if (HasGlobalAccesses) { 6421 auto AccessPred = [&](const Instruction *, const Value *Ptr, 6422 AccessKind Kind, MemoryLocationsKind MLK) { 6423 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed, 6424 getAccessKindFromInst(&I)); 6425 return true; 6426 }; 6427 if (!CBMemLocationAA.checkForAllAccessesToMemoryKind( 6428 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false))) 6429 return AccessedLocs.getWorstState(); 6430 } 6431 6432 LLVM_DEBUG( 6433 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " 6434 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 6435 6436 // Now handle argument memory if it might be accessed. 6437 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); 6438 if (HasArgAccesses) { 6439 for (unsigned ArgNo = 0, E = CB->getNumArgOperands(); ArgNo < E; 6440 ++ArgNo) { 6441 6442 // Skip non-pointer arguments. 6443 const Value *ArgOp = CB->getArgOperand(ArgNo); 6444 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 6445 continue; 6446 6447 // Skip readnone arguments. 6448 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(*CB, ArgNo); 6449 const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>( 6450 *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL); 6451 6452 if (ArgOpMemLocationAA.isAssumedReadNone()) 6453 continue; 6454 6455 // Categorize potentially accessed pointer arguments as if there was an 6456 // access instruction with them as pointer. 6457 categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed); 6458 } 6459 } 6460 6461 LLVM_DEBUG( 6462 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " 6463 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 6464 6465 return AccessedLocs.getAssumed(); 6466 } 6467 6468 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) { 6469 LLVM_DEBUG( 6470 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " 6471 << I << " [" << *Ptr << "]\n"); 6472 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed); 6473 return AccessedLocs.getAssumed(); 6474 } 6475 6476 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " 6477 << I << "\n"); 6478 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed, 6479 getAccessKindFromInst(&I)); 6480 return AccessedLocs.getAssumed(); 6481 } 6482 6483 /// An AA to represent the memory behavior function attributes. 6484 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { 6485 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) 6486 : AAMemoryLocationImpl(IRP, A) {} 6487 6488 /// See AbstractAttribute::updateImpl(Attributor &A). 6489 virtual ChangeStatus updateImpl(Attributor &A) override { 6490 6491 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 6492 *this, getIRPosition(), /* TrackDependence */ false); 6493 if (MemBehaviorAA.isAssumedReadNone()) { 6494 if (MemBehaviorAA.isKnownReadNone()) 6495 return indicateOptimisticFixpoint(); 6496 assert(isAssumedReadNone() && 6497 "AAMemoryLocation was not read-none but AAMemoryBehavior was!"); 6498 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 6499 return ChangeStatus::UNCHANGED; 6500 } 6501 6502 // The current assumed state used to determine a change. 6503 auto AssumedState = getAssumed(); 6504 bool Changed = false; 6505 6506 auto CheckRWInst = [&](Instruction &I) { 6507 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); 6508 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I 6509 << ": " << getMemoryLocationsAsStr(MLK) << "\n"); 6510 removeAssumedBits(inverseLocation(MLK, false, false)); 6511 return true; 6512 }; 6513 6514 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this)) 6515 return indicatePessimisticFixpoint(); 6516 6517 Changed |= AssumedState != getAssumed(); 6518 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 6519 } 6520 6521 /// See AbstractAttribute::trackStatistics() 6522 void trackStatistics() const override { 6523 if (isAssumedReadNone()) 6524 STATS_DECLTRACK_FN_ATTR(readnone) 6525 else if (isAssumedArgMemOnly()) 6526 STATS_DECLTRACK_FN_ATTR(argmemonly) 6527 else if (isAssumedInaccessibleMemOnly()) 6528 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly) 6529 else if (isAssumedInaccessibleOrArgMemOnly()) 6530 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly) 6531 } 6532 }; 6533 6534 /// AAMemoryLocation attribute for call sites. 6535 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { 6536 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) 6537 : AAMemoryLocationImpl(IRP, A) {} 6538 6539 /// See AbstractAttribute::initialize(...). 6540 void initialize(Attributor &A) override { 6541 AAMemoryLocationImpl::initialize(A); 6542 Function *F = getAssociatedFunction(); 6543 if (!F || !A.isFunctionIPOAmendable(*F)) { 6544 indicatePessimisticFixpoint(); 6545 return; 6546 } 6547 } 6548 6549 /// See AbstractAttribute::updateImpl(...). 6550 ChangeStatus updateImpl(Attributor &A) override { 6551 // TODO: Once we have call site specific value information we can provide 6552 // call site specific liveness liveness information and then it makes 6553 // sense to specialize attributes for call sites arguments instead of 6554 // redirecting requests to the callee argument. 6555 Function *F = getAssociatedFunction(); 6556 const IRPosition &FnPos = IRPosition::function(*F); 6557 auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos); 6558 bool Changed = false; 6559 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 6560 AccessKind Kind, MemoryLocationsKind MLK) { 6561 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed, 6562 getAccessKindFromInst(I)); 6563 return true; 6564 }; 6565 if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS)) 6566 return indicatePessimisticFixpoint(); 6567 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 6568 } 6569 6570 /// See AbstractAttribute::trackStatistics() 6571 void trackStatistics() const override { 6572 if (isAssumedReadNone()) 6573 STATS_DECLTRACK_CS_ATTR(readnone) 6574 } 6575 }; 6576 6577 /// ------------------ Value Constant Range Attribute ------------------------- 6578 6579 struct AAValueConstantRangeImpl : AAValueConstantRange { 6580 using StateType = IntegerRangeState; 6581 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) 6582 : AAValueConstantRange(IRP, A) {} 6583 6584 /// See AbstractAttribute::getAsStr(). 6585 const std::string getAsStr() const override { 6586 std::string Str; 6587 llvm::raw_string_ostream OS(Str); 6588 OS << "range(" << getBitWidth() << ")<"; 6589 getKnown().print(OS); 6590 OS << " / "; 6591 getAssumed().print(OS); 6592 OS << ">"; 6593 return OS.str(); 6594 } 6595 6596 /// Helper function to get a SCEV expr for the associated value at program 6597 /// point \p I. 6598 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { 6599 if (!getAnchorScope()) 6600 return nullptr; 6601 6602 ScalarEvolution *SE = 6603 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 6604 *getAnchorScope()); 6605 6606 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( 6607 *getAnchorScope()); 6608 6609 if (!SE || !LI) 6610 return nullptr; 6611 6612 const SCEV *S = SE->getSCEV(&getAssociatedValue()); 6613 if (!I) 6614 return S; 6615 6616 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent())); 6617 } 6618 6619 /// Helper function to get a range from SCEV for the associated value at 6620 /// program point \p I. 6621 ConstantRange getConstantRangeFromSCEV(Attributor &A, 6622 const Instruction *I = nullptr) const { 6623 if (!getAnchorScope()) 6624 return getWorstState(getBitWidth()); 6625 6626 ScalarEvolution *SE = 6627 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 6628 *getAnchorScope()); 6629 6630 const SCEV *S = getSCEV(A, I); 6631 if (!SE || !S) 6632 return getWorstState(getBitWidth()); 6633 6634 return SE->getUnsignedRange(S); 6635 } 6636 6637 /// Helper function to get a range from LVI for the associated value at 6638 /// program point \p I. 6639 ConstantRange 6640 getConstantRangeFromLVI(Attributor &A, 6641 const Instruction *CtxI = nullptr) const { 6642 if (!getAnchorScope()) 6643 return getWorstState(getBitWidth()); 6644 6645 LazyValueInfo *LVI = 6646 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( 6647 *getAnchorScope()); 6648 6649 if (!LVI || !CtxI) 6650 return getWorstState(getBitWidth()); 6651 return LVI->getConstantRange(&getAssociatedValue(), 6652 const_cast<BasicBlock *>(CtxI->getParent()), 6653 const_cast<Instruction *>(CtxI)); 6654 } 6655 6656 /// See AAValueConstantRange::getKnownConstantRange(..). 6657 ConstantRange 6658 getKnownConstantRange(Attributor &A, 6659 const Instruction *CtxI = nullptr) const override { 6660 if (!CtxI || CtxI == getCtxI()) 6661 return getKnown(); 6662 6663 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 6664 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 6665 return getKnown().intersectWith(SCEVR).intersectWith(LVIR); 6666 } 6667 6668 /// See AAValueConstantRange::getAssumedConstantRange(..). 6669 ConstantRange 6670 getAssumedConstantRange(Attributor &A, 6671 const Instruction *CtxI = nullptr) const override { 6672 // TODO: Make SCEV use Attributor assumption. 6673 // We may be able to bound a variable range via assumptions in 6674 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to 6675 // evolve to x^2 + x, then we can say that y is in [2, 12]. 6676 6677 if (!CtxI || CtxI == getCtxI()) 6678 return getAssumed(); 6679 6680 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 6681 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 6682 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR); 6683 } 6684 6685 /// See AbstractAttribute::initialize(..). 6686 void initialize(Attributor &A) override { 6687 // Intersect a range given by SCEV. 6688 intersectKnown(getConstantRangeFromSCEV(A, getCtxI())); 6689 6690 // Intersect a range given by LVI. 6691 intersectKnown(getConstantRangeFromLVI(A, getCtxI())); 6692 } 6693 6694 /// Helper function to create MDNode for range metadata. 6695 static MDNode * 6696 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, 6697 const ConstantRange &AssumedConstantRange) { 6698 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get( 6699 Ty, AssumedConstantRange.getLower())), 6700 ConstantAsMetadata::get(ConstantInt::get( 6701 Ty, AssumedConstantRange.getUpper()))}; 6702 return MDNode::get(Ctx, LowAndHigh); 6703 } 6704 6705 /// Return true if \p Assumed is included in \p KnownRanges. 6706 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { 6707 6708 if (Assumed.isFullSet()) 6709 return false; 6710 6711 if (!KnownRanges) 6712 return true; 6713 6714 // If multiple ranges are annotated in IR, we give up to annotate assumed 6715 // range for now. 6716 6717 // TODO: If there exists a known range which containts assumed range, we 6718 // can say assumed range is better. 6719 if (KnownRanges->getNumOperands() > 2) 6720 return false; 6721 6722 ConstantInt *Lower = 6723 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0)); 6724 ConstantInt *Upper = 6725 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1)); 6726 6727 ConstantRange Known(Lower->getValue(), Upper->getValue()); 6728 return Known.contains(Assumed) && Known != Assumed; 6729 } 6730 6731 /// Helper function to set range metadata. 6732 static bool 6733 setRangeMetadataIfisBetterRange(Instruction *I, 6734 const ConstantRange &AssumedConstantRange) { 6735 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range); 6736 if (isBetterRange(AssumedConstantRange, OldRangeMD)) { 6737 if (!AssumedConstantRange.isEmptySet()) { 6738 I->setMetadata(LLVMContext::MD_range, 6739 getMDNodeForConstantRange(I->getType(), I->getContext(), 6740 AssumedConstantRange)); 6741 return true; 6742 } 6743 } 6744 return false; 6745 } 6746 6747 /// See AbstractAttribute::manifest() 6748 ChangeStatus manifest(Attributor &A) override { 6749 ChangeStatus Changed = ChangeStatus::UNCHANGED; 6750 ConstantRange AssumedConstantRange = getAssumedConstantRange(A); 6751 assert(!AssumedConstantRange.isFullSet() && "Invalid state"); 6752 6753 auto &V = getAssociatedValue(); 6754 if (!AssumedConstantRange.isEmptySet() && 6755 !AssumedConstantRange.isSingleElement()) { 6756 if (Instruction *I = dyn_cast<Instruction>(&V)) 6757 if (isa<CallInst>(I) || isa<LoadInst>(I)) 6758 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) 6759 Changed = ChangeStatus::CHANGED; 6760 } 6761 6762 return Changed; 6763 } 6764 }; 6765 6766 struct AAValueConstantRangeArgument final 6767 : AAArgumentFromCallSiteArguments< 6768 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> { 6769 using Base = AAArgumentFromCallSiteArguments< 6770 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>; 6771 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) 6772 : Base(IRP, A) {} 6773 6774 /// See AbstractAttribute::initialize(..). 6775 void initialize(Attributor &A) override { 6776 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 6777 indicatePessimisticFixpoint(); 6778 } else { 6779 Base::initialize(A); 6780 } 6781 } 6782 6783 /// See AbstractAttribute::trackStatistics() 6784 void trackStatistics() const override { 6785 STATS_DECLTRACK_ARG_ATTR(value_range) 6786 } 6787 }; 6788 6789 struct AAValueConstantRangeReturned 6790 : AAReturnedFromReturnedValues<AAValueConstantRange, 6791 AAValueConstantRangeImpl> { 6792 using Base = AAReturnedFromReturnedValues<AAValueConstantRange, 6793 AAValueConstantRangeImpl>; 6794 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) 6795 : Base(IRP, A) {} 6796 6797 /// See AbstractAttribute::initialize(...). 6798 void initialize(Attributor &A) override {} 6799 6800 /// See AbstractAttribute::trackStatistics() 6801 void trackStatistics() const override { 6802 STATS_DECLTRACK_FNRET_ATTR(value_range) 6803 } 6804 }; 6805 6806 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { 6807 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) 6808 : AAValueConstantRangeImpl(IRP, A) {} 6809 6810 /// See AbstractAttribute::initialize(...). 6811 void initialize(Attributor &A) override { 6812 AAValueConstantRangeImpl::initialize(A); 6813 Value &V = getAssociatedValue(); 6814 6815 if (auto *C = dyn_cast<ConstantInt>(&V)) { 6816 unionAssumed(ConstantRange(C->getValue())); 6817 indicateOptimisticFixpoint(); 6818 return; 6819 } 6820 6821 if (isa<UndefValue>(&V)) { 6822 // Collapse the undef state to 0. 6823 unionAssumed(ConstantRange(APInt(getBitWidth(), 0))); 6824 indicateOptimisticFixpoint(); 6825 return; 6826 } 6827 6828 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V)) 6829 return; 6830 // If it is a load instruction with range metadata, use it. 6831 if (LoadInst *LI = dyn_cast<LoadInst>(&V)) 6832 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) { 6833 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 6834 return; 6835 } 6836 6837 // We can work with PHI and select instruction as we traverse their operands 6838 // during update. 6839 if (isa<SelectInst>(V) || isa<PHINode>(V)) 6840 return; 6841 6842 // Otherwise we give up. 6843 indicatePessimisticFixpoint(); 6844 6845 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: " 6846 << getAssociatedValue() << "\n"); 6847 } 6848 6849 bool calculateBinaryOperator( 6850 Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, 6851 const Instruction *CtxI, 6852 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 6853 Value *LHS = BinOp->getOperand(0); 6854 Value *RHS = BinOp->getOperand(1); 6855 // TODO: Allow non integers as well. 6856 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 6857 return false; 6858 6859 auto &LHSAA = 6860 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS)); 6861 QuerriedAAs.push_back(&LHSAA); 6862 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 6863 6864 auto &RHSAA = 6865 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS)); 6866 QuerriedAAs.push_back(&RHSAA); 6867 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 6868 6869 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange); 6870 6871 T.unionAssumed(AssumedRange); 6872 6873 // TODO: Track a known state too. 6874 6875 return T.isValidState(); 6876 } 6877 6878 bool calculateCastInst( 6879 Attributor &A, CastInst *CastI, IntegerRangeState &T, 6880 const Instruction *CtxI, 6881 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 6882 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!"); 6883 // TODO: Allow non integers as well. 6884 Value &OpV = *CastI->getOperand(0); 6885 if (!OpV.getType()->isIntegerTy()) 6886 return false; 6887 6888 auto &OpAA = 6889 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV)); 6890 QuerriedAAs.push_back(&OpAA); 6891 T.unionAssumed( 6892 OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth())); 6893 return T.isValidState(); 6894 } 6895 6896 bool 6897 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, 6898 const Instruction *CtxI, 6899 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 6900 Value *LHS = CmpI->getOperand(0); 6901 Value *RHS = CmpI->getOperand(1); 6902 // TODO: Allow non integers as well. 6903 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 6904 return false; 6905 6906 auto &LHSAA = 6907 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS)); 6908 QuerriedAAs.push_back(&LHSAA); 6909 auto &RHSAA = 6910 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS)); 6911 QuerriedAAs.push_back(&RHSAA); 6912 6913 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 6914 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 6915 6916 // If one of them is empty set, we can't decide. 6917 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) 6918 return true; 6919 6920 bool MustTrue = false, MustFalse = false; 6921 6922 auto AllowedRegion = 6923 ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange); 6924 6925 auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion( 6926 CmpI->getPredicate(), RHSAARange); 6927 6928 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet()) 6929 MustFalse = true; 6930 6931 if (SatisfyingRegion.contains(LHSAARange)) 6932 MustTrue = true; 6933 6934 assert((!MustTrue || !MustFalse) && 6935 "Either MustTrue or MustFalse should be false!"); 6936 6937 if (MustTrue) 6938 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); 6939 else if (MustFalse) 6940 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); 6941 else 6942 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); 6943 6944 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA 6945 << " " << RHSAA << "\n"); 6946 6947 // TODO: Track a known state too. 6948 return T.isValidState(); 6949 } 6950 6951 /// See AbstractAttribute::updateImpl(...). 6952 ChangeStatus updateImpl(Attributor &A) override { 6953 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 6954 IntegerRangeState &T, bool Stripped) -> bool { 6955 Instruction *I = dyn_cast<Instruction>(&V); 6956 if (!I || isa<CallBase>(I)) { 6957 6958 // If the value is not instruction, we query AA to Attributor. 6959 const auto &AA = 6960 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V)); 6961 6962 // Clamp operator is not used to utilize a program point CtxI. 6963 T.unionAssumed(AA.getAssumedConstantRange(A, CtxI)); 6964 6965 return T.isValidState(); 6966 } 6967 6968 SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; 6969 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) { 6970 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) 6971 return false; 6972 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) { 6973 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) 6974 return false; 6975 } else if (auto *CastI = dyn_cast<CastInst>(I)) { 6976 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) 6977 return false; 6978 } else { 6979 // Give up with other instructions. 6980 // TODO: Add other instructions 6981 6982 T.indicatePessimisticFixpoint(); 6983 return false; 6984 } 6985 6986 // Catch circular reasoning in a pessimistic way for now. 6987 // TODO: Check how the range evolves and if we stripped anything, see also 6988 // AADereferenceable or AAAlign for similar situations. 6989 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { 6990 if (QueriedAA != this) 6991 continue; 6992 // If we are in a stady state we do not need to worry. 6993 if (T.getAssumed() == getState().getAssumed()) 6994 continue; 6995 T.indicatePessimisticFixpoint(); 6996 } 6997 6998 return T.isValidState(); 6999 }; 7000 7001 IntegerRangeState T(getBitWidth()); 7002 7003 if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>( 7004 A, getIRPosition(), *this, T, VisitValueCB, getCtxI(), 7005 /* UseValueSimplify */ false)) 7006 return indicatePessimisticFixpoint(); 7007 7008 return clampStateAndIndicateChange(getState(), T); 7009 } 7010 7011 /// See AbstractAttribute::trackStatistics() 7012 void trackStatistics() const override { 7013 STATS_DECLTRACK_FLOATING_ATTR(value_range) 7014 } 7015 }; 7016 7017 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { 7018 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) 7019 : AAValueConstantRangeImpl(IRP, A) {} 7020 7021 /// See AbstractAttribute::initialize(...). 7022 ChangeStatus updateImpl(Attributor &A) override { 7023 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will " 7024 "not be called"); 7025 } 7026 7027 /// See AbstractAttribute::trackStatistics() 7028 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) } 7029 }; 7030 7031 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { 7032 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) 7033 : AAValueConstantRangeFunction(IRP, A) {} 7034 7035 /// See AbstractAttribute::trackStatistics() 7036 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) } 7037 }; 7038 7039 struct AAValueConstantRangeCallSiteReturned 7040 : AACallSiteReturnedFromReturned<AAValueConstantRange, 7041 AAValueConstantRangeImpl> { 7042 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) 7043 : AACallSiteReturnedFromReturned<AAValueConstantRange, 7044 AAValueConstantRangeImpl>(IRP, A) {} 7045 7046 /// See AbstractAttribute::initialize(...). 7047 void initialize(Attributor &A) override { 7048 // If it is a load instruction with range metadata, use the metadata. 7049 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue())) 7050 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range)) 7051 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 7052 7053 AAValueConstantRangeImpl::initialize(A); 7054 } 7055 7056 /// See AbstractAttribute::trackStatistics() 7057 void trackStatistics() const override { 7058 STATS_DECLTRACK_CSRET_ATTR(value_range) 7059 } 7060 }; 7061 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { 7062 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) 7063 : AAValueConstantRangeFloating(IRP, A) {} 7064 7065 /// See AbstractAttribute::trackStatistics() 7066 void trackStatistics() const override { 7067 STATS_DECLTRACK_CSARG_ATTR(value_range) 7068 } 7069 }; 7070 } // namespace 7071 7072 const char AAReturnedValues::ID = 0; 7073 const char AANoUnwind::ID = 0; 7074 const char AANoSync::ID = 0; 7075 const char AANoFree::ID = 0; 7076 const char AANonNull::ID = 0; 7077 const char AANoRecurse::ID = 0; 7078 const char AAWillReturn::ID = 0; 7079 const char AAUndefinedBehavior::ID = 0; 7080 const char AANoAlias::ID = 0; 7081 const char AAReachability::ID = 0; 7082 const char AANoReturn::ID = 0; 7083 const char AAIsDead::ID = 0; 7084 const char AADereferenceable::ID = 0; 7085 const char AAAlign::ID = 0; 7086 const char AANoCapture::ID = 0; 7087 const char AAValueSimplify::ID = 0; 7088 const char AAHeapToStack::ID = 0; 7089 const char AAPrivatizablePtr::ID = 0; 7090 const char AAMemoryBehavior::ID = 0; 7091 const char AAMemoryLocation::ID = 0; 7092 const char AAValueConstantRange::ID = 0; 7093 7094 // Macro magic to create the static generator function for attributes that 7095 // follow the naming scheme. 7096 7097 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ 7098 case IRPosition::PK: \ 7099 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); 7100 7101 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ 7102 case IRPosition::PK: \ 7103 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ 7104 ++NumAAs; \ 7105 break; 7106 7107 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7108 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7109 CLASS *AA = nullptr; \ 7110 switch (IRP.getPositionKind()) { \ 7111 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7112 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 7113 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 7114 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 7115 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 7116 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 7117 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7118 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 7119 } \ 7120 return *AA; \ 7121 } 7122 7123 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7124 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7125 CLASS *AA = nullptr; \ 7126 switch (IRP.getPositionKind()) { \ 7127 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7128 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ 7129 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 7130 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 7131 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 7132 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 7133 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 7134 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 7135 } \ 7136 return *AA; \ 7137 } 7138 7139 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7140 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7141 CLASS *AA = nullptr; \ 7142 switch (IRP.getPositionKind()) { \ 7143 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7144 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7145 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 7146 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 7147 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 7148 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 7149 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 7150 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 7151 } \ 7152 return *AA; \ 7153 } 7154 7155 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7156 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7157 CLASS *AA = nullptr; \ 7158 switch (IRP.getPositionKind()) { \ 7159 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7160 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 7161 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 7162 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 7163 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 7164 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 7165 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 7166 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7167 } \ 7168 return *AA; \ 7169 } 7170 7171 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7172 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7173 CLASS *AA = nullptr; \ 7174 switch (IRP.getPositionKind()) { \ 7175 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7176 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 7177 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7178 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 7179 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 7180 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 7181 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 7182 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 7183 } \ 7184 return *AA; \ 7185 } 7186 7187 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) 7188 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) 7189 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) 7190 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) 7191 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) 7192 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) 7193 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) 7194 7195 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) 7196 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) 7197 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) 7198 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) 7199 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) 7200 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) 7201 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) 7202 7203 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) 7204 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) 7205 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) 7206 7207 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) 7208 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability) 7209 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) 7210 7211 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) 7212 7213 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION 7214 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION 7215 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION 7216 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION 7217 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION 7218 #undef SWITCH_PK_CREATE 7219 #undef SWITCH_PK_INV 7220