1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // See the Attributor.h file comment and the class descriptions in that file for 10 // more information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/IPO/Attributor.h" 15 16 #include "llvm/ADT/SmallPtrSet.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/Analysis/AssumeBundleQueries.h" 19 #include "llvm/Analysis/CaptureTracking.h" 20 #include "llvm/Analysis/LazyValueInfo.h" 21 #include "llvm/Analysis/MemoryBuiltins.h" 22 #include "llvm/Analysis/ScalarEvolution.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/IR/IRBuilder.h" 25 #include "llvm/IR/IntrinsicInst.h" 26 #include "llvm/IR/NoFolder.h" 27 #include "llvm/Transforms/IPO/ArgumentPromotion.h" 28 #include "llvm/Transforms/Utils/Local.h" 29 30 #include <cassert> 31 32 using namespace llvm; 33 34 #define DEBUG_TYPE "attributor" 35 36 static cl::opt<bool> ManifestInternal( 37 "attributor-manifest-internal", cl::Hidden, 38 cl::desc("Manifest Attributor internal string attributes."), 39 cl::init(false)); 40 41 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), 42 cl::Hidden); 43 44 STATISTIC(NumAAs, "Number of abstract attributes created"); 45 46 // Some helper macros to deal with statistics tracking. 47 // 48 // Usage: 49 // For simple IR attribute tracking overload trackStatistics in the abstract 50 // attribute and choose the right STATS_DECLTRACK_********* macro, 51 // e.g.,: 52 // void trackStatistics() const override { 53 // STATS_DECLTRACK_ARG_ATTR(returned) 54 // } 55 // If there is a single "increment" side one can use the macro 56 // STATS_DECLTRACK with a custom message. If there are multiple increment 57 // sides, STATS_DECL and STATS_TRACK can also be used separatly. 58 // 59 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ 60 ("Number of " #TYPE " marked '" #NAME "'") 61 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME 62 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); 63 #define STATS_DECL(NAME, TYPE, MSG) \ 64 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); 65 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); 66 #define STATS_DECLTRACK(NAME, TYPE, MSG) \ 67 { \ 68 STATS_DECL(NAME, TYPE, MSG) \ 69 STATS_TRACK(NAME, TYPE) \ 70 } 71 #define STATS_DECLTRACK_ARG_ATTR(NAME) \ 72 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) 73 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ 74 STATS_DECLTRACK(NAME, CSArguments, \ 75 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) 76 #define STATS_DECLTRACK_FN_ATTR(NAME) \ 77 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) 78 #define STATS_DECLTRACK_CS_ATTR(NAME) \ 79 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) 80 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ 81 STATS_DECLTRACK(NAME, FunctionReturn, \ 82 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) 83 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ 84 STATS_DECLTRACK(NAME, CSReturn, \ 85 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) 86 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ 87 STATS_DECLTRACK(NAME, Floating, \ 88 ("Number of floating values known to be '" #NAME "'")) 89 90 // Specialization of the operator<< for abstract attributes subclasses. This 91 // disambiguates situations where multiple operators are applicable. 92 namespace llvm { 93 #define PIPE_OPERATOR(CLASS) \ 94 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ 95 return OS << static_cast<const AbstractAttribute &>(AA); \ 96 } 97 98 PIPE_OPERATOR(AAIsDead) 99 PIPE_OPERATOR(AANoUnwind) 100 PIPE_OPERATOR(AANoSync) 101 PIPE_OPERATOR(AANoRecurse) 102 PIPE_OPERATOR(AAWillReturn) 103 PIPE_OPERATOR(AANoReturn) 104 PIPE_OPERATOR(AAReturnedValues) 105 PIPE_OPERATOR(AANonNull) 106 PIPE_OPERATOR(AANoAlias) 107 PIPE_OPERATOR(AADereferenceable) 108 PIPE_OPERATOR(AAAlign) 109 PIPE_OPERATOR(AANoCapture) 110 PIPE_OPERATOR(AAValueSimplify) 111 PIPE_OPERATOR(AANoFree) 112 PIPE_OPERATOR(AAHeapToStack) 113 PIPE_OPERATOR(AAReachability) 114 PIPE_OPERATOR(AAMemoryBehavior) 115 PIPE_OPERATOR(AAMemoryLocation) 116 PIPE_OPERATOR(AAValueConstantRange) 117 PIPE_OPERATOR(AAPrivatizablePtr) 118 PIPE_OPERATOR(AAUndefinedBehavior) 119 120 #undef PIPE_OPERATOR 121 } // namespace llvm 122 123 namespace { 124 125 static Optional<ConstantInt *> 126 getAssumedConstantInt(Attributor &A, const Value &V, 127 const AbstractAttribute &AA, 128 bool &UsedAssumedInformation) { 129 Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation); 130 if (C.hasValue()) 131 return dyn_cast_or_null<ConstantInt>(C.getValue()); 132 return llvm::None; 133 } 134 135 /// Get pointer operand of memory accessing instruction. If \p I is 136 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, 137 /// is set to false and the instruction is volatile, return nullptr. 138 static const Value *getPointerOperand(const Instruction *I, 139 bool AllowVolatile) { 140 if (auto *LI = dyn_cast<LoadInst>(I)) { 141 if (!AllowVolatile && LI->isVolatile()) 142 return nullptr; 143 return LI->getPointerOperand(); 144 } 145 146 if (auto *SI = dyn_cast<StoreInst>(I)) { 147 if (!AllowVolatile && SI->isVolatile()) 148 return nullptr; 149 return SI->getPointerOperand(); 150 } 151 152 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { 153 if (!AllowVolatile && CXI->isVolatile()) 154 return nullptr; 155 return CXI->getPointerOperand(); 156 } 157 158 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { 159 if (!AllowVolatile && RMWI->isVolatile()) 160 return nullptr; 161 return RMWI->getPointerOperand(); 162 } 163 164 return nullptr; 165 } 166 167 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and 168 /// advanced by \p Offset bytes. To aid later analysis the method tries to build 169 /// getelement pointer instructions that traverse the natural type of \p Ptr if 170 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence 171 /// through a cast to i8*. 172 /// 173 /// TODO: This could probably live somewhere more prominantly if it doesn't 174 /// already exist. 175 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset, 176 IRBuilder<NoFolder> &IRB, const DataLayout &DL) { 177 assert(Offset >= 0 && "Negative offset not supported yet!"); 178 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset 179 << "-bytes as " << *ResTy << "\n"); 180 181 // The initial type we are trying to traverse to get nice GEPs. 182 Type *Ty = Ptr->getType(); 183 184 SmallVector<Value *, 4> Indices; 185 std::string GEPName = Ptr->getName().str(); 186 while (Offset) { 187 uint64_t Idx, Rem; 188 189 if (auto *STy = dyn_cast<StructType>(Ty)) { 190 const StructLayout *SL = DL.getStructLayout(STy); 191 if (int64_t(SL->getSizeInBytes()) < Offset) 192 break; 193 Idx = SL->getElementContainingOffset(Offset); 194 assert(Idx < STy->getNumElements() && "Offset calculation error!"); 195 Rem = Offset - SL->getElementOffset(Idx); 196 Ty = STy->getElementType(Idx); 197 } else if (auto *PTy = dyn_cast<PointerType>(Ty)) { 198 Ty = PTy->getElementType(); 199 if (!Ty->isSized()) 200 break; 201 uint64_t ElementSize = DL.getTypeAllocSize(Ty); 202 assert(ElementSize && "Expected type with size!"); 203 Idx = Offset / ElementSize; 204 Rem = Offset % ElementSize; 205 } else { 206 // Non-aggregate type, we cast and make byte-wise progress now. 207 break; 208 } 209 210 LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset 211 << " Idx: " << Idx << " Rem: " << Rem << "\n"); 212 213 GEPName += "." + std::to_string(Idx); 214 Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx)); 215 Offset = Rem; 216 } 217 218 // Create a GEP if we collected indices above. 219 if (Indices.size()) 220 Ptr = IRB.CreateGEP(Ptr, Indices, GEPName); 221 222 // If an offset is left we use byte-wise adjustment. 223 if (Offset) { 224 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); 225 Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset), 226 GEPName + ".b" + Twine(Offset)); 227 } 228 229 // Ensure the result has the requested type. 230 Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast"); 231 232 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n"); 233 return Ptr; 234 } 235 236 /// Recursively visit all values that might become \p IRP at some point. This 237 /// will be done by looking through cast instructions, selects, phis, and calls 238 /// with the "returned" attribute. Once we cannot look through the value any 239 /// further, the callback \p VisitValueCB is invoked and passed the current 240 /// value, the \p State, and a flag to indicate if we stripped anything. 241 /// Stripped means that we unpacked the value associated with \p IRP at least 242 /// once. Note that the value used for the callback may still be the value 243 /// associated with \p IRP (due to PHIs). To limit how much effort is invested, 244 /// we will never visit more values than specified by \p MaxValues. 245 template <typename AAType, typename StateTy> 246 static bool genericValueTraversal( 247 Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State, 248 function_ref<bool(Value &, const Instruction *, StateTy &, bool)> 249 VisitValueCB, 250 const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16, 251 function_ref<Value *(Value *)> StripCB = nullptr) { 252 253 const AAIsDead *LivenessAA = nullptr; 254 if (IRP.getAnchorScope()) 255 LivenessAA = &A.getAAFor<AAIsDead>( 256 QueryingAA, IRPosition::function(*IRP.getAnchorScope()), 257 /* TrackDependence */ false); 258 bool AnyDead = false; 259 260 using Item = std::pair<Value *, const Instruction *>; 261 SmallSet<Item, 16> Visited; 262 SmallVector<Item, 16> Worklist; 263 Worklist.push_back({&IRP.getAssociatedValue(), CtxI}); 264 265 int Iteration = 0; 266 do { 267 Item I = Worklist.pop_back_val(); 268 Value *V = I.first; 269 CtxI = I.second; 270 if (StripCB) 271 V = StripCB(V); 272 273 // Check if we should process the current value. To prevent endless 274 // recursion keep a record of the values we followed! 275 if (!Visited.insert(I).second) 276 continue; 277 278 // Make sure we limit the compile time for complex expressions. 279 if (Iteration++ >= MaxValues) 280 return false; 281 282 // Explicitly look through calls with a "returned" attribute if we do 283 // not have a pointer as stripPointerCasts only works on them. 284 Value *NewV = nullptr; 285 if (V->getType()->isPointerTy()) { 286 NewV = V->stripPointerCasts(); 287 } else { 288 auto *CB = dyn_cast<CallBase>(V); 289 if (CB && CB->getCalledFunction()) { 290 for (Argument &Arg : CB->getCalledFunction()->args()) 291 if (Arg.hasReturnedAttr()) { 292 NewV = CB->getArgOperand(Arg.getArgNo()); 293 break; 294 } 295 } 296 } 297 if (NewV && NewV != V) { 298 Worklist.push_back({NewV, CtxI}); 299 continue; 300 } 301 302 // Look through select instructions, visit both potential values. 303 if (auto *SI = dyn_cast<SelectInst>(V)) { 304 Worklist.push_back({SI->getTrueValue(), CtxI}); 305 Worklist.push_back({SI->getFalseValue(), CtxI}); 306 continue; 307 } 308 309 // Look through phi nodes, visit all live operands. 310 if (auto *PHI = dyn_cast<PHINode>(V)) { 311 assert(LivenessAA && 312 "Expected liveness in the presence of instructions!"); 313 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 314 BasicBlock *IncomingBB = PHI->getIncomingBlock(u); 315 if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA, 316 LivenessAA, 317 /* CheckBBLivenessOnly */ true)) { 318 AnyDead = true; 319 continue; 320 } 321 Worklist.push_back( 322 {PHI->getIncomingValue(u), IncomingBB->getTerminator()}); 323 } 324 continue; 325 } 326 327 if (UseValueSimplify && !isa<Constant>(V)) { 328 bool UsedAssumedInformation = false; 329 Optional<Constant *> C = 330 A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation); 331 if (!C.hasValue()) 332 continue; 333 if (Value *NewV = C.getValue()) { 334 Worklist.push_back({NewV, CtxI}); 335 continue; 336 } 337 } 338 339 // Once a leaf is reached we inform the user through the callback. 340 if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) 341 return false; 342 } while (!Worklist.empty()); 343 344 // If we actually used liveness information so we have to record a dependence. 345 if (AnyDead) 346 A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL); 347 348 // All values have been visited. 349 return true; 350 } 351 352 const Value *stripAndAccumulateMinimalOffsets( 353 Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val, 354 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, 355 bool UseAssumed = false) { 356 357 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { 358 const IRPosition &Pos = IRPosition::value(V); 359 // Only track dependence if we are going to use the assumed info. 360 const AAValueConstantRange &ValueConstantRangeAA = 361 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, 362 /* TrackDependence */ UseAssumed); 363 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() 364 : ValueConstantRangeAA.getKnown(); 365 // We can only use the lower part of the range because the upper part can 366 // be higher than what the value can really be. 367 ROffset = Range.getSignedMin(); 368 return true; 369 }; 370 371 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, 372 AttributorAnalysis); 373 } 374 375 static const Value *getMinimalBaseOfAccsesPointerOperand( 376 Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I, 377 int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) { 378 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); 379 if (!Ptr) 380 return nullptr; 381 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 382 const Value *Base = stripAndAccumulateMinimalOffsets( 383 A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds); 384 385 BytesOffset = OffsetAPInt.getSExtValue(); 386 return Base; 387 } 388 389 static const Value * 390 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset, 391 const DataLayout &DL, 392 bool AllowNonInbounds = false) { 393 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); 394 if (!Ptr) 395 return nullptr; 396 397 return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL, 398 AllowNonInbounds); 399 } 400 401 /// Helper function to clamp a state \p S of type \p StateType with the 402 /// information in \p R and indicate/return if \p S did change (as-in update is 403 /// required to be run again). 404 template <typename StateType> 405 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) { 406 auto Assumed = S.getAssumed(); 407 S ^= R; 408 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 409 : ChangeStatus::CHANGED; 410 } 411 412 /// Clamp the information known for all returned values of a function 413 /// (identified by \p QueryingAA) into \p S. 414 template <typename AAType, typename StateType = typename AAType::StateType> 415 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA, 416 StateType &S) { 417 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " 418 << QueryingAA << " into " << S << "\n"); 419 420 assert((QueryingAA.getIRPosition().getPositionKind() == 421 IRPosition::IRP_RETURNED || 422 QueryingAA.getIRPosition().getPositionKind() == 423 IRPosition::IRP_CALL_SITE_RETURNED) && 424 "Can only clamp returned value states for a function returned or call " 425 "site returned position!"); 426 427 // Use an optional state as there might not be any return values and we want 428 // to join (IntegerState::operator&) the state of all there are. 429 Optional<StateType> T; 430 431 // Callback for each possibly returned value. 432 auto CheckReturnValue = [&](Value &RV) -> bool { 433 const IRPosition &RVPos = IRPosition::value(RV); 434 const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos); 435 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() 436 << " @ " << RVPos << "\n"); 437 const StateType &AAS = static_cast<const StateType &>(AA.getState()); 438 if (T.hasValue()) 439 *T &= AAS; 440 else 441 T = AAS; 442 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T 443 << "\n"); 444 return T->isValidState(); 445 }; 446 447 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) 448 S.indicatePessimisticFixpoint(); 449 else if (T.hasValue()) 450 S ^= *T; 451 } 452 453 /// Helper class for generic deduction: return value -> returned position. 454 template <typename AAType, typename BaseType, 455 typename StateType = typename BaseType::StateType> 456 struct AAReturnedFromReturnedValues : public BaseType { 457 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) 458 : BaseType(IRP, A) {} 459 460 /// See AbstractAttribute::updateImpl(...). 461 ChangeStatus updateImpl(Attributor &A) override { 462 StateType S(StateType::getBestState(this->getState())); 463 clampReturnedValueStates<AAType, StateType>(A, *this, S); 464 // TODO: If we know we visited all returned values, thus no are assumed 465 // dead, we can take the known information from the state T. 466 return clampStateAndIndicateChange<StateType>(this->getState(), S); 467 } 468 }; 469 470 /// Clamp the information known at all call sites for a given argument 471 /// (identified by \p QueryingAA) into \p S. 472 template <typename AAType, typename StateType = typename AAType::StateType> 473 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, 474 StateType &S) { 475 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " 476 << QueryingAA << " into " << S << "\n"); 477 478 assert(QueryingAA.getIRPosition().getPositionKind() == 479 IRPosition::IRP_ARGUMENT && 480 "Can only clamp call site argument states for an argument position!"); 481 482 // Use an optional state as there might not be any return values and we want 483 // to join (IntegerState::operator&) the state of all there are. 484 Optional<StateType> T; 485 486 // The argument number which is also the call site argument number. 487 unsigned ArgNo = QueryingAA.getIRPosition().getArgNo(); 488 489 auto CallSiteCheck = [&](AbstractCallSite ACS) { 490 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 491 // Check if a coresponding argument was found or if it is on not associated 492 // (which can happen for callback calls). 493 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 494 return false; 495 496 const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos); 497 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() 498 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n"); 499 const StateType &AAS = static_cast<const StateType &>(AA.getState()); 500 if (T.hasValue()) 501 *T &= AAS; 502 else 503 T = AAS; 504 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T 505 << "\n"); 506 return T->isValidState(); 507 }; 508 509 bool AllCallSitesKnown; 510 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, 511 AllCallSitesKnown)) 512 S.indicatePessimisticFixpoint(); 513 else if (T.hasValue()) 514 S ^= *T; 515 } 516 517 /// Helper class for generic deduction: call site argument -> argument position. 518 template <typename AAType, typename BaseType, 519 typename StateType = typename AAType::StateType> 520 struct AAArgumentFromCallSiteArguments : public BaseType { 521 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) 522 : BaseType(IRP, A) {} 523 524 /// See AbstractAttribute::updateImpl(...). 525 ChangeStatus updateImpl(Attributor &A) override { 526 StateType S(StateType::getBestState(this->getState())); 527 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); 528 // TODO: If we know we visited all incoming values, thus no are assumed 529 // dead, we can take the known information from the state T. 530 return clampStateAndIndicateChange<StateType>(this->getState(), S); 531 } 532 }; 533 534 /// Helper class for generic replication: function returned -> cs returned. 535 template <typename AAType, typename BaseType, 536 typename StateType = typename BaseType::StateType> 537 struct AACallSiteReturnedFromReturned : public BaseType { 538 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) 539 : BaseType(IRP, A) {} 540 541 /// See AbstractAttribute::updateImpl(...). 542 ChangeStatus updateImpl(Attributor &A) override { 543 assert(this->getIRPosition().getPositionKind() == 544 IRPosition::IRP_CALL_SITE_RETURNED && 545 "Can only wrap function returned positions for call site returned " 546 "positions!"); 547 auto &S = this->getState(); 548 549 const Function *AssociatedFunction = 550 this->getIRPosition().getAssociatedFunction(); 551 if (!AssociatedFunction) 552 return S.indicatePessimisticFixpoint(); 553 554 IRPosition FnPos = IRPosition::returned(*AssociatedFunction); 555 const AAType &AA = A.getAAFor<AAType>(*this, FnPos); 556 return clampStateAndIndicateChange( 557 S, static_cast<const StateType &>(AA.getState())); 558 } 559 }; 560 561 /// Helper function to accumulate uses. 562 template <class AAType, typename StateType = typename AAType::StateType> 563 static void followUsesInContext(AAType &AA, Attributor &A, 564 MustBeExecutedContextExplorer &Explorer, 565 const Instruction *CtxI, 566 SetVector<const Use *> &Uses, 567 StateType &State) { 568 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); 569 for (unsigned u = 0; u < Uses.size(); ++u) { 570 const Use *U = Uses[u]; 571 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { 572 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); 573 if (Found && AA.followUseInMBEC(A, U, UserI, State)) 574 for (const Use &Us : UserI->uses()) 575 Uses.insert(&Us); 576 } 577 } 578 } 579 580 /// Use the must-be-executed-context around \p I to add information into \p S. 581 /// The AAType class is required to have `followUseInMBEC` method with the 582 /// following signature and behaviour: 583 /// 584 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) 585 /// U - Underlying use. 586 /// I - The user of the \p U. 587 /// Returns true if the value should be tracked transitively. 588 /// 589 template <class AAType, typename StateType = typename AAType::StateType> 590 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, 591 Instruction &CtxI) { 592 593 // Container for (transitive) uses of the associated value. 594 SetVector<const Use *> Uses; 595 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) 596 Uses.insert(&U); 597 598 MustBeExecutedContextExplorer &Explorer = 599 A.getInfoCache().getMustBeExecutedContextExplorer(); 600 601 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); 602 603 if (S.isAtFixpoint()) 604 return; 605 606 SmallVector<const BranchInst *, 4> BrInsts; 607 auto Pred = [&](const Instruction *I) { 608 if (const BranchInst *Br = dyn_cast<BranchInst>(I)) 609 if (Br->isConditional()) 610 BrInsts.push_back(Br); 611 return true; 612 }; 613 614 // Here, accumulate conditional branch instructions in the context. We 615 // explore the child paths and collect the known states. The disjunction of 616 // those states can be merged to its own state. Let ParentState_i be a state 617 // to indicate the known information for an i-th branch instruction in the 618 // context. ChildStates are created for its successors respectively. 619 // 620 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} 621 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} 622 // ... 623 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} 624 // 625 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m 626 // 627 // FIXME: Currently, recursive branches are not handled. For example, we 628 // can't deduce that ptr must be dereferenced in below function. 629 // 630 // void f(int a, int c, int *ptr) { 631 // if(a) 632 // if (b) { 633 // *ptr = 0; 634 // } else { 635 // *ptr = 1; 636 // } 637 // else { 638 // if (b) { 639 // *ptr = 0; 640 // } else { 641 // *ptr = 1; 642 // } 643 // } 644 // } 645 646 Explorer.checkForAllContext(&CtxI, Pred); 647 for (const BranchInst *Br : BrInsts) { 648 StateType ParentState; 649 650 // The known state of the parent state is a conjunction of children's 651 // known states so it is initialized with a best state. 652 ParentState.indicateOptimisticFixpoint(); 653 654 for (const BasicBlock *BB : Br->successors()) { 655 StateType ChildState; 656 657 size_t BeforeSize = Uses.size(); 658 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); 659 660 // Erase uses which only appear in the child. 661 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) 662 It = Uses.erase(It); 663 664 ParentState &= ChildState; 665 } 666 667 // Use only known state. 668 S += ParentState; 669 } 670 } 671 672 /// -----------------------NoUnwind Function Attribute-------------------------- 673 674 struct AANoUnwindImpl : AANoUnwind { 675 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} 676 677 const std::string getAsStr() const override { 678 return getAssumed() ? "nounwind" : "may-unwind"; 679 } 680 681 /// See AbstractAttribute::updateImpl(...). 682 ChangeStatus updateImpl(Attributor &A) override { 683 auto Opcodes = { 684 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 685 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 686 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 687 688 auto CheckForNoUnwind = [&](Instruction &I) { 689 if (!I.mayThrow()) 690 return true; 691 692 if (const auto *CB = dyn_cast<CallBase>(&I)) { 693 const auto &NoUnwindAA = 694 A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB)); 695 return NoUnwindAA.isAssumedNoUnwind(); 696 } 697 return false; 698 }; 699 700 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes)) 701 return indicatePessimisticFixpoint(); 702 703 return ChangeStatus::UNCHANGED; 704 } 705 }; 706 707 struct AANoUnwindFunction final : public AANoUnwindImpl { 708 AANoUnwindFunction(const IRPosition &IRP, Attributor &A) 709 : AANoUnwindImpl(IRP, A) {} 710 711 /// See AbstractAttribute::trackStatistics() 712 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } 713 }; 714 715 /// NoUnwind attribute deduction for a call sites. 716 struct AANoUnwindCallSite final : AANoUnwindImpl { 717 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) 718 : AANoUnwindImpl(IRP, A) {} 719 720 /// See AbstractAttribute::initialize(...). 721 void initialize(Attributor &A) override { 722 AANoUnwindImpl::initialize(A); 723 Function *F = getAssociatedFunction(); 724 if (!F) 725 indicatePessimisticFixpoint(); 726 } 727 728 /// See AbstractAttribute::updateImpl(...). 729 ChangeStatus updateImpl(Attributor &A) override { 730 // TODO: Once we have call site specific value information we can provide 731 // call site specific liveness information and then it makes 732 // sense to specialize attributes for call sites arguments instead of 733 // redirecting requests to the callee argument. 734 Function *F = getAssociatedFunction(); 735 const IRPosition &FnPos = IRPosition::function(*F); 736 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos); 737 return clampStateAndIndicateChange( 738 getState(), 739 static_cast<const AANoUnwind::StateType &>(FnAA.getState())); 740 } 741 742 /// See AbstractAttribute::trackStatistics() 743 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } 744 }; 745 746 /// --------------------- Function Return Values ------------------------------- 747 748 /// "Attribute" that collects all potential returned values and the return 749 /// instructions that they arise from. 750 /// 751 /// If there is a unique returned value R, the manifest method will: 752 /// - mark R with the "returned" attribute, if R is an argument. 753 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { 754 755 /// Mapping of values potentially returned by the associated function to the 756 /// return instructions that might return them. 757 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; 758 759 /// Mapping to remember the number of returned values for a call site such 760 /// that we can avoid updates if nothing changed. 761 DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA; 762 763 /// Set of unresolved calls returned by the associated function. 764 SmallSetVector<CallBase *, 4> UnresolvedCalls; 765 766 /// State flags 767 /// 768 ///{ 769 bool IsFixed = false; 770 bool IsValidState = true; 771 ///} 772 773 public: 774 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) 775 : AAReturnedValues(IRP, A) {} 776 777 /// See AbstractAttribute::initialize(...). 778 void initialize(Attributor &A) override { 779 // Reset the state. 780 IsFixed = false; 781 IsValidState = true; 782 ReturnedValues.clear(); 783 784 Function *F = getAssociatedFunction(); 785 if (!F) { 786 indicatePessimisticFixpoint(); 787 return; 788 } 789 assert(!F->getReturnType()->isVoidTy() && 790 "Did not expect a void return type!"); 791 792 // The map from instruction opcodes to those instructions in the function. 793 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); 794 795 // Look through all arguments, if one is marked as returned we are done. 796 for (Argument &Arg : F->args()) { 797 if (Arg.hasReturnedAttr()) { 798 auto &ReturnInstSet = ReturnedValues[&Arg]; 799 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) 800 for (Instruction *RI : *Insts) 801 ReturnInstSet.insert(cast<ReturnInst>(RI)); 802 803 indicateOptimisticFixpoint(); 804 return; 805 } 806 } 807 808 if (!A.isFunctionIPOAmendable(*F)) 809 indicatePessimisticFixpoint(); 810 } 811 812 /// See AbstractAttribute::manifest(...). 813 ChangeStatus manifest(Attributor &A) override; 814 815 /// See AbstractAttribute::getState(...). 816 AbstractState &getState() override { return *this; } 817 818 /// See AbstractAttribute::getState(...). 819 const AbstractState &getState() const override { return *this; } 820 821 /// See AbstractAttribute::updateImpl(Attributor &A). 822 ChangeStatus updateImpl(Attributor &A) override; 823 824 llvm::iterator_range<iterator> returned_values() override { 825 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 826 } 827 828 llvm::iterator_range<const_iterator> returned_values() const override { 829 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 830 } 831 832 const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override { 833 return UnresolvedCalls; 834 } 835 836 /// Return the number of potential return values, -1 if unknown. 837 size_t getNumReturnValues() const override { 838 return isValidState() ? ReturnedValues.size() : -1; 839 } 840 841 /// Return an assumed unique return value if a single candidate is found. If 842 /// there cannot be one, return a nullptr. If it is not clear yet, return the 843 /// Optional::NoneType. 844 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 845 846 /// See AbstractState::checkForAllReturnedValues(...). 847 bool checkForAllReturnedValuesAndReturnInsts( 848 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 849 const override; 850 851 /// Pretty print the attribute similar to the IR representation. 852 const std::string getAsStr() const override; 853 854 /// See AbstractState::isAtFixpoint(). 855 bool isAtFixpoint() const override { return IsFixed; } 856 857 /// See AbstractState::isValidState(). 858 bool isValidState() const override { return IsValidState; } 859 860 /// See AbstractState::indicateOptimisticFixpoint(...). 861 ChangeStatus indicateOptimisticFixpoint() override { 862 IsFixed = true; 863 return ChangeStatus::UNCHANGED; 864 } 865 866 ChangeStatus indicatePessimisticFixpoint() override { 867 IsFixed = true; 868 IsValidState = false; 869 return ChangeStatus::CHANGED; 870 } 871 }; 872 873 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 874 ChangeStatus Changed = ChangeStatus::UNCHANGED; 875 876 // Bookkeeping. 877 assert(isValidState()); 878 STATS_DECLTRACK(KnownReturnValues, FunctionReturn, 879 "Number of function with known return values"); 880 881 // Check if we have an assumed unique return value that we could manifest. 882 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); 883 884 if (!UniqueRV.hasValue() || !UniqueRV.getValue()) 885 return Changed; 886 887 // Bookkeeping. 888 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, 889 "Number of function with unique return"); 890 891 // Callback to replace the uses of CB with the constant C. 892 auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) { 893 if (CB.use_empty()) 894 return ChangeStatus::UNCHANGED; 895 if (A.changeValueAfterManifest(CB, C)) 896 return ChangeStatus::CHANGED; 897 return ChangeStatus::UNCHANGED; 898 }; 899 900 // If the assumed unique return value is an argument, annotate it. 901 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { 902 if (UniqueRVArg->getType()->canLosslesslyBitCastTo( 903 getAssociatedFunction()->getReturnType())) { 904 getIRPosition() = IRPosition::argument(*UniqueRVArg); 905 Changed = IRAttribute::manifest(A); 906 } 907 } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) { 908 // We can replace the returned value with the unique returned constant. 909 Value &AnchorValue = getAnchorValue(); 910 if (Function *F = dyn_cast<Function>(&AnchorValue)) { 911 for (const Use &U : F->uses()) 912 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) 913 if (CB->isCallee(&U)) { 914 Constant *RVCCast = 915 CB->getType() == RVC->getType() 916 ? RVC 917 : ConstantExpr::getTruncOrBitCast(RVC, CB->getType()); 918 Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed; 919 } 920 } else { 921 assert(isa<CallBase>(AnchorValue) && 922 "Expcected a function or call base anchor!"); 923 Constant *RVCCast = 924 AnchorValue.getType() == RVC->getType() 925 ? RVC 926 : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType()); 927 Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast); 928 } 929 if (Changed == ChangeStatus::CHANGED) 930 STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn, 931 "Number of function returns replaced by constant return"); 932 } 933 934 return Changed; 935 } 936 937 const std::string AAReturnedValuesImpl::getAsStr() const { 938 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 939 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + 940 ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]"; 941 } 942 943 Optional<Value *> 944 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { 945 // If checkForAllReturnedValues provides a unique value, ignoring potential 946 // undef values that can also be present, it is assumed to be the actual 947 // return value and forwarded to the caller of this method. If there are 948 // multiple, a nullptr is returned indicating there cannot be a unique 949 // returned value. 950 Optional<Value *> UniqueRV; 951 952 auto Pred = [&](Value &RV) -> bool { 953 // If we found a second returned value and neither the current nor the saved 954 // one is an undef, there is no unique returned value. Undefs are special 955 // since we can pretend they have any value. 956 if (UniqueRV.hasValue() && UniqueRV != &RV && 957 !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) { 958 UniqueRV = nullptr; 959 return false; 960 } 961 962 // Do not overwrite a value with an undef. 963 if (!UniqueRV.hasValue() || !isa<UndefValue>(RV)) 964 UniqueRV = &RV; 965 966 return true; 967 }; 968 969 if (!A.checkForAllReturnedValues(Pred, *this)) 970 UniqueRV = nullptr; 971 972 return UniqueRV; 973 } 974 975 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( 976 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 977 const { 978 if (!isValidState()) 979 return false; 980 981 // Check all returned values but ignore call sites as long as we have not 982 // encountered an overdefined one during an update. 983 for (auto &It : ReturnedValues) { 984 Value *RV = It.first; 985 986 CallBase *CB = dyn_cast<CallBase>(RV); 987 if (CB && !UnresolvedCalls.count(CB)) 988 continue; 989 990 if (!Pred(*RV, It.second)) 991 return false; 992 } 993 994 return true; 995 } 996 997 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 998 size_t NumUnresolvedCalls = UnresolvedCalls.size(); 999 bool Changed = false; 1000 1001 // State used in the value traversals starting in returned values. 1002 struct RVState { 1003 // The map in which we collect return values -> return instrs. 1004 decltype(ReturnedValues) &RetValsMap; 1005 // The flag to indicate a change. 1006 bool &Changed; 1007 // The return instrs we come from. 1008 SmallSetVector<ReturnInst *, 4> RetInsts; 1009 }; 1010 1011 // Callback for a leaf value returned by the associated function. 1012 auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS, 1013 bool) -> bool { 1014 auto Size = RVS.RetValsMap[&Val].size(); 1015 RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end()); 1016 bool Inserted = RVS.RetValsMap[&Val].size() != Size; 1017 RVS.Changed |= Inserted; 1018 LLVM_DEBUG({ 1019 if (Inserted) 1020 dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val 1021 << " => " << RVS.RetInsts.size() << "\n"; 1022 }); 1023 return true; 1024 }; 1025 1026 // Helper method to invoke the generic value traversal. 1027 auto VisitReturnedValue = [&](Value &RV, RVState &RVS, 1028 const Instruction *CtxI) { 1029 IRPosition RetValPos = IRPosition::value(RV); 1030 return genericValueTraversal<AAReturnedValues, RVState>( 1031 A, RetValPos, *this, RVS, VisitValueCB, CtxI, 1032 /* UseValueSimplify */ false); 1033 }; 1034 1035 // Callback for all "return intructions" live in the associated function. 1036 auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) { 1037 ReturnInst &Ret = cast<ReturnInst>(I); 1038 RVState RVS({ReturnedValues, Changed, {}}); 1039 RVS.RetInsts.insert(&Ret); 1040 return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I); 1041 }; 1042 1043 // Start by discovering returned values from all live returned instructions in 1044 // the associated function. 1045 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret})) 1046 return indicatePessimisticFixpoint(); 1047 1048 // Once returned values "directly" present in the code are handled we try to 1049 // resolve returned calls. To avoid modifications to the ReturnedValues map 1050 // while we iterate over it we kept record of potential new entries in a copy 1051 // map, NewRVsMap. 1052 decltype(ReturnedValues) NewRVsMap; 1053 1054 auto HandleReturnValue = [&](Value *RV, SmallSetVector<ReturnInst *, 4> &RIs) { 1055 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV 1056 << " by #" << RIs.size() << " RIs\n"); 1057 CallBase *CB = dyn_cast<CallBase>(RV); 1058 if (!CB || UnresolvedCalls.count(CB)) 1059 return; 1060 1061 if (!CB->getCalledFunction()) { 1062 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB 1063 << "\n"); 1064 UnresolvedCalls.insert(CB); 1065 return; 1066 } 1067 1068 // TODO: use the function scope once we have call site AAReturnedValues. 1069 const auto &RetValAA = A.getAAFor<AAReturnedValues>( 1070 *this, IRPosition::function(*CB->getCalledFunction())); 1071 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: " 1072 << RetValAA << "\n"); 1073 1074 // Skip dead ends, thus if we do not know anything about the returned 1075 // call we mark it as unresolved and it will stay that way. 1076 if (!RetValAA.getState().isValidState()) { 1077 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB 1078 << "\n"); 1079 UnresolvedCalls.insert(CB); 1080 return; 1081 } 1082 1083 // Do not try to learn partial information. If the callee has unresolved 1084 // return values we will treat the call as unresolved/opaque. 1085 auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls(); 1086 if (!RetValAAUnresolvedCalls.empty()) { 1087 UnresolvedCalls.insert(CB); 1088 return; 1089 } 1090 1091 // Now check if we can track transitively returned values. If possible, thus 1092 // if all return value can be represented in the current scope, do so. 1093 bool Unresolved = false; 1094 for (auto &RetValAAIt : RetValAA.returned_values()) { 1095 Value *RetVal = RetValAAIt.first; 1096 if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) || 1097 isa<Constant>(RetVal)) 1098 continue; 1099 // Anything that did not fit in the above categories cannot be resolved, 1100 // mark the call as unresolved. 1101 LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value " 1102 "cannot be translated: " 1103 << *RetVal << "\n"); 1104 UnresolvedCalls.insert(CB); 1105 Unresolved = true; 1106 break; 1107 } 1108 1109 if (Unresolved) 1110 return; 1111 1112 // Now track transitively returned values. 1113 unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB]; 1114 if (NumRetAA == RetValAA.getNumReturnValues()) { 1115 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not " 1116 "changed since it was seen last\n"); 1117 return; 1118 } 1119 NumRetAA = RetValAA.getNumReturnValues(); 1120 1121 for (auto &RetValAAIt : RetValAA.returned_values()) { 1122 Value *RetVal = RetValAAIt.first; 1123 if (Argument *Arg = dyn_cast<Argument>(RetVal)) { 1124 // Arguments are mapped to call site operands and we begin the traversal 1125 // again. 1126 bool Unused = false; 1127 RVState RVS({NewRVsMap, Unused, RetValAAIt.second}); 1128 VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB); 1129 continue; 1130 } else if (isa<CallBase>(RetVal)) { 1131 // Call sites are resolved by the callee attribute over time, no need to 1132 // do anything for us. 1133 continue; 1134 } else if (isa<Constant>(RetVal)) { 1135 // Constants are valid everywhere, we can simply take them. 1136 NewRVsMap[RetVal].insert(RIs.begin(), RIs.end()); 1137 continue; 1138 } 1139 } 1140 }; 1141 1142 for (auto &It : ReturnedValues) 1143 HandleReturnValue(It.first, It.second); 1144 1145 // Because processing the new information can again lead to new return values 1146 // we have to be careful and iterate until this iteration is complete. The 1147 // idea is that we are in a stable state at the end of an update. All return 1148 // values have been handled and properly categorized. We might not update 1149 // again if we have not requested a non-fix attribute so we cannot "wait" for 1150 // the next update to analyze a new return value. 1151 while (!NewRVsMap.empty()) { 1152 auto It = std::move(NewRVsMap.back()); 1153 NewRVsMap.pop_back(); 1154 1155 assert(!It.second.empty() && "Entry does not add anything."); 1156 auto &ReturnInsts = ReturnedValues[It.first]; 1157 for (ReturnInst *RI : It.second) 1158 if (ReturnInsts.insert(RI)) { 1159 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value " 1160 << *It.first << " => " << *RI << "\n"); 1161 HandleReturnValue(It.first, ReturnInsts); 1162 Changed = true; 1163 } 1164 } 1165 1166 Changed |= (NumUnresolvedCalls != UnresolvedCalls.size()); 1167 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 1168 } 1169 1170 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { 1171 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) 1172 : AAReturnedValuesImpl(IRP, A) {} 1173 1174 /// See AbstractAttribute::trackStatistics() 1175 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } 1176 }; 1177 1178 /// Returned values information for a call sites. 1179 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { 1180 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) 1181 : AAReturnedValuesImpl(IRP, A) {} 1182 1183 /// See AbstractAttribute::initialize(...). 1184 void initialize(Attributor &A) override { 1185 // TODO: Once we have call site specific value information we can provide 1186 // call site specific liveness information and then it makes 1187 // sense to specialize attributes for call sites instead of 1188 // redirecting requests to the callee. 1189 llvm_unreachable("Abstract attributes for returned values are not " 1190 "supported for call sites yet!"); 1191 } 1192 1193 /// See AbstractAttribute::updateImpl(...). 1194 ChangeStatus updateImpl(Attributor &A) override { 1195 return indicatePessimisticFixpoint(); 1196 } 1197 1198 /// See AbstractAttribute::trackStatistics() 1199 void trackStatistics() const override {} 1200 }; 1201 1202 /// ------------------------ NoSync Function Attribute ------------------------- 1203 1204 struct AANoSyncImpl : AANoSync { 1205 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} 1206 1207 const std::string getAsStr() const override { 1208 return getAssumed() ? "nosync" : "may-sync"; 1209 } 1210 1211 /// See AbstractAttribute::updateImpl(...). 1212 ChangeStatus updateImpl(Attributor &A) override; 1213 1214 /// Helper function used to determine whether an instruction is non-relaxed 1215 /// atomic. In other words, if an atomic instruction does not have unordered 1216 /// or monotonic ordering 1217 static bool isNonRelaxedAtomic(Instruction *I); 1218 1219 /// Helper function used to determine whether an instruction is volatile. 1220 static bool isVolatile(Instruction *I); 1221 1222 /// Helper function uset to check if intrinsic is volatile (memcpy, memmove, 1223 /// memset). 1224 static bool isNoSyncIntrinsic(Instruction *I); 1225 }; 1226 1227 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) { 1228 if (!I->isAtomic()) 1229 return false; 1230 1231 AtomicOrdering Ordering; 1232 switch (I->getOpcode()) { 1233 case Instruction::AtomicRMW: 1234 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 1235 break; 1236 case Instruction::Store: 1237 Ordering = cast<StoreInst>(I)->getOrdering(); 1238 break; 1239 case Instruction::Load: 1240 Ordering = cast<LoadInst>(I)->getOrdering(); 1241 break; 1242 case Instruction::Fence: { 1243 auto *FI = cast<FenceInst>(I); 1244 if (FI->getSyncScopeID() == SyncScope::SingleThread) 1245 return false; 1246 Ordering = FI->getOrdering(); 1247 break; 1248 } 1249 case Instruction::AtomicCmpXchg: { 1250 AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering(); 1251 AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering(); 1252 // Only if both are relaxed, than it can be treated as relaxed. 1253 // Otherwise it is non-relaxed. 1254 if (Success != AtomicOrdering::Unordered && 1255 Success != AtomicOrdering::Monotonic) 1256 return true; 1257 if (Failure != AtomicOrdering::Unordered && 1258 Failure != AtomicOrdering::Monotonic) 1259 return true; 1260 return false; 1261 } 1262 default: 1263 llvm_unreachable( 1264 "New atomic operations need to be known in the attributor."); 1265 } 1266 1267 // Relaxed. 1268 if (Ordering == AtomicOrdering::Unordered || 1269 Ordering == AtomicOrdering::Monotonic) 1270 return false; 1271 return true; 1272 } 1273 1274 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics. 1275 /// FIXME: We should ipmrove the handling of intrinsics. 1276 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) { 1277 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 1278 switch (II->getIntrinsicID()) { 1279 /// Element wise atomic memory intrinsics are can only be unordered, 1280 /// therefore nosync. 1281 case Intrinsic::memset_element_unordered_atomic: 1282 case Intrinsic::memmove_element_unordered_atomic: 1283 case Intrinsic::memcpy_element_unordered_atomic: 1284 return true; 1285 case Intrinsic::memset: 1286 case Intrinsic::memmove: 1287 case Intrinsic::memcpy: 1288 if (!cast<MemIntrinsic>(II)->isVolatile()) 1289 return true; 1290 return false; 1291 default: 1292 return false; 1293 } 1294 } 1295 return false; 1296 } 1297 1298 bool AANoSyncImpl::isVolatile(Instruction *I) { 1299 assert(!isa<CallBase>(I) && "Calls should not be checked here"); 1300 1301 switch (I->getOpcode()) { 1302 case Instruction::AtomicRMW: 1303 return cast<AtomicRMWInst>(I)->isVolatile(); 1304 case Instruction::Store: 1305 return cast<StoreInst>(I)->isVolatile(); 1306 case Instruction::Load: 1307 return cast<LoadInst>(I)->isVolatile(); 1308 case Instruction::AtomicCmpXchg: 1309 return cast<AtomicCmpXchgInst>(I)->isVolatile(); 1310 default: 1311 return false; 1312 } 1313 } 1314 1315 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { 1316 1317 auto CheckRWInstForNoSync = [&](Instruction &I) { 1318 /// We are looking for volatile instructions or Non-Relaxed atomics. 1319 /// FIXME: We should improve the handling of intrinsics. 1320 1321 if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I)) 1322 return true; 1323 1324 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1325 if (CB->hasFnAttr(Attribute::NoSync)) 1326 return true; 1327 1328 const auto &NoSyncAA = 1329 A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB)); 1330 if (NoSyncAA.isAssumedNoSync()) 1331 return true; 1332 return false; 1333 } 1334 1335 if (!isVolatile(&I) && !isNonRelaxedAtomic(&I)) 1336 return true; 1337 1338 return false; 1339 }; 1340 1341 auto CheckForNoSync = [&](Instruction &I) { 1342 // At this point we handled all read/write effects and they are all 1343 // nosync, so they can be skipped. 1344 if (I.mayReadOrWriteMemory()) 1345 return true; 1346 1347 // non-convergent and readnone imply nosync. 1348 return !cast<CallBase>(I).isConvergent(); 1349 }; 1350 1351 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) || 1352 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this)) 1353 return indicatePessimisticFixpoint(); 1354 1355 return ChangeStatus::UNCHANGED; 1356 } 1357 1358 struct AANoSyncFunction final : public AANoSyncImpl { 1359 AANoSyncFunction(const IRPosition &IRP, Attributor &A) 1360 : AANoSyncImpl(IRP, A) {} 1361 1362 /// See AbstractAttribute::trackStatistics() 1363 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } 1364 }; 1365 1366 /// NoSync attribute deduction for a call sites. 1367 struct AANoSyncCallSite final : AANoSyncImpl { 1368 AANoSyncCallSite(const IRPosition &IRP, Attributor &A) 1369 : AANoSyncImpl(IRP, A) {} 1370 1371 /// See AbstractAttribute::initialize(...). 1372 void initialize(Attributor &A) override { 1373 AANoSyncImpl::initialize(A); 1374 Function *F = getAssociatedFunction(); 1375 if (!F) 1376 indicatePessimisticFixpoint(); 1377 } 1378 1379 /// See AbstractAttribute::updateImpl(...). 1380 ChangeStatus updateImpl(Attributor &A) override { 1381 // TODO: Once we have call site specific value information we can provide 1382 // call site specific liveness information and then it makes 1383 // sense to specialize attributes for call sites arguments instead of 1384 // redirecting requests to the callee argument. 1385 Function *F = getAssociatedFunction(); 1386 const IRPosition &FnPos = IRPosition::function(*F); 1387 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos); 1388 return clampStateAndIndicateChange( 1389 getState(), static_cast<const AANoSync::StateType &>(FnAA.getState())); 1390 } 1391 1392 /// See AbstractAttribute::trackStatistics() 1393 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } 1394 }; 1395 1396 /// ------------------------ No-Free Attributes ---------------------------- 1397 1398 struct AANoFreeImpl : public AANoFree { 1399 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} 1400 1401 /// See AbstractAttribute::updateImpl(...). 1402 ChangeStatus updateImpl(Attributor &A) override { 1403 auto CheckForNoFree = [&](Instruction &I) { 1404 const auto &CB = cast<CallBase>(I); 1405 if (CB.hasFnAttr(Attribute::NoFree)) 1406 return true; 1407 1408 const auto &NoFreeAA = 1409 A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB)); 1410 return NoFreeAA.isAssumedNoFree(); 1411 }; 1412 1413 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this)) 1414 return indicatePessimisticFixpoint(); 1415 return ChangeStatus::UNCHANGED; 1416 } 1417 1418 /// See AbstractAttribute::getAsStr(). 1419 const std::string getAsStr() const override { 1420 return getAssumed() ? "nofree" : "may-free"; 1421 } 1422 }; 1423 1424 struct AANoFreeFunction final : public AANoFreeImpl { 1425 AANoFreeFunction(const IRPosition &IRP, Attributor &A) 1426 : AANoFreeImpl(IRP, A) {} 1427 1428 /// See AbstractAttribute::trackStatistics() 1429 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } 1430 }; 1431 1432 /// NoFree attribute deduction for a call sites. 1433 struct AANoFreeCallSite final : AANoFreeImpl { 1434 AANoFreeCallSite(const IRPosition &IRP, Attributor &A) 1435 : AANoFreeImpl(IRP, A) {} 1436 1437 /// See AbstractAttribute::initialize(...). 1438 void initialize(Attributor &A) override { 1439 AANoFreeImpl::initialize(A); 1440 Function *F = getAssociatedFunction(); 1441 if (!F) 1442 indicatePessimisticFixpoint(); 1443 } 1444 1445 /// See AbstractAttribute::updateImpl(...). 1446 ChangeStatus updateImpl(Attributor &A) override { 1447 // TODO: Once we have call site specific value information we can provide 1448 // call site specific liveness information and then it makes 1449 // sense to specialize attributes for call sites arguments instead of 1450 // redirecting requests to the callee argument. 1451 Function *F = getAssociatedFunction(); 1452 const IRPosition &FnPos = IRPosition::function(*F); 1453 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos); 1454 return clampStateAndIndicateChange( 1455 getState(), static_cast<const AANoFree::StateType &>(FnAA.getState())); 1456 } 1457 1458 /// See AbstractAttribute::trackStatistics() 1459 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } 1460 }; 1461 1462 /// NoFree attribute for floating values. 1463 struct AANoFreeFloating : AANoFreeImpl { 1464 AANoFreeFloating(const IRPosition &IRP, Attributor &A) 1465 : AANoFreeImpl(IRP, A) {} 1466 1467 /// See AbstractAttribute::trackStatistics() 1468 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} 1469 1470 /// See Abstract Attribute::updateImpl(...). 1471 ChangeStatus updateImpl(Attributor &A) override { 1472 const IRPosition &IRP = getIRPosition(); 1473 1474 const auto &NoFreeAA = 1475 A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP)); 1476 if (NoFreeAA.isAssumedNoFree()) 1477 return ChangeStatus::UNCHANGED; 1478 1479 Value &AssociatedValue = getIRPosition().getAssociatedValue(); 1480 auto Pred = [&](const Use &U, bool &Follow) -> bool { 1481 Instruction *UserI = cast<Instruction>(U.getUser()); 1482 if (auto *CB = dyn_cast<CallBase>(UserI)) { 1483 if (CB->isBundleOperand(&U)) 1484 return false; 1485 if (!CB->isArgOperand(&U)) 1486 return true; 1487 unsigned ArgNo = CB->getArgOperandNo(&U); 1488 1489 const auto &NoFreeArg = A.getAAFor<AANoFree>( 1490 *this, IRPosition::callsite_argument(*CB, ArgNo)); 1491 return NoFreeArg.isAssumedNoFree(); 1492 } 1493 1494 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 1495 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 1496 Follow = true; 1497 return true; 1498 } 1499 if (isa<ReturnInst>(UserI)) 1500 return true; 1501 1502 // Unknown user. 1503 return false; 1504 }; 1505 if (!A.checkForAllUses(Pred, *this, AssociatedValue)) 1506 return indicatePessimisticFixpoint(); 1507 1508 return ChangeStatus::UNCHANGED; 1509 } 1510 }; 1511 1512 /// NoFree attribute for a call site argument. 1513 struct AANoFreeArgument final : AANoFreeFloating { 1514 AANoFreeArgument(const IRPosition &IRP, Attributor &A) 1515 : AANoFreeFloating(IRP, A) {} 1516 1517 /// See AbstractAttribute::trackStatistics() 1518 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } 1519 }; 1520 1521 /// NoFree attribute for call site arguments. 1522 struct AANoFreeCallSiteArgument final : AANoFreeFloating { 1523 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) 1524 : AANoFreeFloating(IRP, A) {} 1525 1526 /// See AbstractAttribute::updateImpl(...). 1527 ChangeStatus updateImpl(Attributor &A) override { 1528 // TODO: Once we have call site specific value information we can provide 1529 // call site specific liveness information and then it makes 1530 // sense to specialize attributes for call sites arguments instead of 1531 // redirecting requests to the callee argument. 1532 Argument *Arg = getAssociatedArgument(); 1533 if (!Arg) 1534 return indicatePessimisticFixpoint(); 1535 const IRPosition &ArgPos = IRPosition::argument(*Arg); 1536 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos); 1537 return clampStateAndIndicateChange( 1538 getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState())); 1539 } 1540 1541 /// See AbstractAttribute::trackStatistics() 1542 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)}; 1543 }; 1544 1545 /// NoFree attribute for function return value. 1546 struct AANoFreeReturned final : AANoFreeFloating { 1547 AANoFreeReturned(const IRPosition &IRP, Attributor &A) 1548 : AANoFreeFloating(IRP, A) { 1549 llvm_unreachable("NoFree is not applicable to function returns!"); 1550 } 1551 1552 /// See AbstractAttribute::initialize(...). 1553 void initialize(Attributor &A) override { 1554 llvm_unreachable("NoFree is not applicable to function returns!"); 1555 } 1556 1557 /// See AbstractAttribute::updateImpl(...). 1558 ChangeStatus updateImpl(Attributor &A) override { 1559 llvm_unreachable("NoFree is not applicable to function returns!"); 1560 } 1561 1562 /// See AbstractAttribute::trackStatistics() 1563 void trackStatistics() const override {} 1564 }; 1565 1566 /// NoFree attribute deduction for a call site return value. 1567 struct AANoFreeCallSiteReturned final : AANoFreeFloating { 1568 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) 1569 : AANoFreeFloating(IRP, A) {} 1570 1571 ChangeStatus manifest(Attributor &A) override { 1572 return ChangeStatus::UNCHANGED; 1573 } 1574 /// See AbstractAttribute::trackStatistics() 1575 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } 1576 }; 1577 1578 /// ------------------------ NonNull Argument Attribute ------------------------ 1579 static int64_t getKnownNonNullAndDerefBytesForUse( 1580 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, 1581 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { 1582 TrackUse = false; 1583 1584 const Value *UseV = U->get(); 1585 if (!UseV->getType()->isPointerTy()) 1586 return 0; 1587 1588 Type *PtrTy = UseV->getType(); 1589 const Function *F = I->getFunction(); 1590 bool NullPointerIsDefined = 1591 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; 1592 const DataLayout &DL = A.getInfoCache().getDL(); 1593 if (const auto *CB = dyn_cast<CallBase>(I)) { 1594 if (CB->isBundleOperand(U)) { 1595 if (RetainedKnowledge RK = getKnowledgeFromUse( 1596 U, {Attribute::NonNull, Attribute::Dereferenceable})) { 1597 IsNonNull |= 1598 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); 1599 return RK.ArgValue; 1600 } 1601 return 0; 1602 } 1603 1604 if (CB->isCallee(U)) { 1605 IsNonNull |= !NullPointerIsDefined; 1606 return 0; 1607 } 1608 1609 unsigned ArgNo = CB->getArgOperandNo(U); 1610 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 1611 // As long as we only use known information there is no need to track 1612 // dependences here. 1613 auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP, 1614 /* TrackDependence */ false); 1615 IsNonNull |= DerefAA.isKnownNonNull(); 1616 return DerefAA.getKnownDereferenceableBytes(); 1617 } 1618 1619 // We need to follow common pointer manipulation uses to the accesses they 1620 // feed into. We can try to be smart to avoid looking through things we do not 1621 // like for now, e.g., non-inbounds GEPs. 1622 if (isa<CastInst>(I)) { 1623 TrackUse = true; 1624 return 0; 1625 } 1626 1627 if (isa<GetElementPtrInst>(I)) { 1628 TrackUse = true; 1629 return 0; 1630 } 1631 1632 int64_t Offset; 1633 const Value *Base = 1634 getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL); 1635 if (Base) { 1636 if (Base == &AssociatedValue && 1637 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 1638 int64_t DerefBytes = 1639 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset; 1640 1641 IsNonNull |= !NullPointerIsDefined; 1642 return std::max(int64_t(0), DerefBytes); 1643 } 1644 } 1645 1646 /// Corner case when an offset is 0. 1647 Base = getBasePointerOfAccessPointerOperand(I, Offset, DL, 1648 /*AllowNonInbounds*/ true); 1649 if (Base) { 1650 if (Offset == 0 && Base == &AssociatedValue && 1651 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 1652 int64_t DerefBytes = 1653 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()); 1654 IsNonNull |= !NullPointerIsDefined; 1655 return std::max(int64_t(0), DerefBytes); 1656 } 1657 } 1658 1659 return 0; 1660 } 1661 1662 struct AANonNullImpl : AANonNull { 1663 AANonNullImpl(const IRPosition &IRP, Attributor &A) 1664 : AANonNull(IRP, A), 1665 NullIsDefined(NullPointerIsDefined( 1666 getAnchorScope(), 1667 getAssociatedValue().getType()->getPointerAddressSpace())) {} 1668 1669 /// See AbstractAttribute::initialize(...). 1670 void initialize(Attributor &A) override { 1671 Value &V = getAssociatedValue(); 1672 if (!NullIsDefined && 1673 hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, 1674 /* IgnoreSubsumingPositions */ false, &A)) 1675 indicateOptimisticFixpoint(); 1676 else if (isa<ConstantPointerNull>(V)) 1677 indicatePessimisticFixpoint(); 1678 else 1679 AANonNull::initialize(A); 1680 1681 bool CanBeNull = true; 1682 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull)) 1683 if (!CanBeNull) 1684 indicateOptimisticFixpoint(); 1685 1686 if (!getState().isAtFixpoint()) 1687 if (Instruction *CtxI = getCtxI()) 1688 followUsesInMBEC(*this, A, getState(), *CtxI); 1689 } 1690 1691 /// See followUsesInMBEC 1692 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 1693 AANonNull::StateType &State) { 1694 bool IsNonNull = false; 1695 bool TrackUse = false; 1696 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, 1697 IsNonNull, TrackUse); 1698 State.setKnown(IsNonNull); 1699 return TrackUse; 1700 } 1701 1702 /// See AbstractAttribute::getAsStr(). 1703 const std::string getAsStr() const override { 1704 return getAssumed() ? "nonnull" : "may-null"; 1705 } 1706 1707 /// Flag to determine if the underlying value can be null and still allow 1708 /// valid accesses. 1709 const bool NullIsDefined; 1710 }; 1711 1712 /// NonNull attribute for a floating value. 1713 struct AANonNullFloating : public AANonNullImpl { 1714 AANonNullFloating(const IRPosition &IRP, Attributor &A) 1715 : AANonNullImpl(IRP, A) {} 1716 1717 /// See AbstractAttribute::updateImpl(...). 1718 ChangeStatus updateImpl(Attributor &A) override { 1719 if (!NullIsDefined) { 1720 const auto &DerefAA = 1721 A.getAAFor<AADereferenceable>(*this, getIRPosition()); 1722 if (DerefAA.getAssumedDereferenceableBytes()) 1723 return ChangeStatus::UNCHANGED; 1724 } 1725 1726 const DataLayout &DL = A.getDataLayout(); 1727 1728 DominatorTree *DT = nullptr; 1729 AssumptionCache *AC = nullptr; 1730 InformationCache &InfoCache = A.getInfoCache(); 1731 if (const Function *Fn = getAnchorScope()) { 1732 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); 1733 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); 1734 } 1735 1736 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 1737 AANonNull::StateType &T, bool Stripped) -> bool { 1738 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V)); 1739 if (!Stripped && this == &AA) { 1740 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) 1741 T.indicatePessimisticFixpoint(); 1742 } else { 1743 // Use abstract attribute information. 1744 const AANonNull::StateType &NS = 1745 static_cast<const AANonNull::StateType &>(AA.getState()); 1746 T ^= NS; 1747 } 1748 return T.isValidState(); 1749 }; 1750 1751 StateType T; 1752 if (!genericValueTraversal<AANonNull, StateType>( 1753 A, getIRPosition(), *this, T, VisitValueCB, getCtxI())) 1754 return indicatePessimisticFixpoint(); 1755 1756 return clampStateAndIndicateChange(getState(), T); 1757 } 1758 1759 /// See AbstractAttribute::trackStatistics() 1760 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 1761 }; 1762 1763 /// NonNull attribute for function return value. 1764 struct AANonNullReturned final 1765 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> { 1766 AANonNullReturned(const IRPosition &IRP, Attributor &A) 1767 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP, A) {} 1768 1769 /// See AbstractAttribute::trackStatistics() 1770 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 1771 }; 1772 1773 /// NonNull attribute for function argument. 1774 struct AANonNullArgument final 1775 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { 1776 AANonNullArgument(const IRPosition &IRP, Attributor &A) 1777 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} 1778 1779 /// See AbstractAttribute::trackStatistics() 1780 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } 1781 }; 1782 1783 struct AANonNullCallSiteArgument final : AANonNullFloating { 1784 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) 1785 : AANonNullFloating(IRP, A) {} 1786 1787 /// See AbstractAttribute::trackStatistics() 1788 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } 1789 }; 1790 1791 /// NonNull attribute for a call site return position. 1792 struct AANonNullCallSiteReturned final 1793 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { 1794 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) 1795 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} 1796 1797 /// See AbstractAttribute::trackStatistics() 1798 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } 1799 }; 1800 1801 /// ------------------------ No-Recurse Attributes ---------------------------- 1802 1803 struct AANoRecurseImpl : public AANoRecurse { 1804 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} 1805 1806 /// See AbstractAttribute::getAsStr() 1807 const std::string getAsStr() const override { 1808 return getAssumed() ? "norecurse" : "may-recurse"; 1809 } 1810 }; 1811 1812 struct AANoRecurseFunction final : AANoRecurseImpl { 1813 AANoRecurseFunction(const IRPosition &IRP, Attributor &A) 1814 : AANoRecurseImpl(IRP, A) {} 1815 1816 /// See AbstractAttribute::initialize(...). 1817 void initialize(Attributor &A) override { 1818 AANoRecurseImpl::initialize(A); 1819 if (const Function *F = getAnchorScope()) 1820 if (A.getInfoCache().getSccSize(*F) != 1) 1821 indicatePessimisticFixpoint(); 1822 } 1823 1824 /// See AbstractAttribute::updateImpl(...). 1825 ChangeStatus updateImpl(Attributor &A) override { 1826 1827 // If all live call sites are known to be no-recurse, we are as well. 1828 auto CallSitePred = [&](AbstractCallSite ACS) { 1829 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1830 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 1831 /* TrackDependence */ false, DepClassTy::OPTIONAL); 1832 return NoRecurseAA.isKnownNoRecurse(); 1833 }; 1834 bool AllCallSitesKnown; 1835 if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) { 1836 // If we know all call sites and all are known no-recurse, we are done. 1837 // If all known call sites, which might not be all that exist, are known 1838 // to be no-recurse, we are not done but we can continue to assume 1839 // no-recurse. If one of the call sites we have not visited will become 1840 // live, another update is triggered. 1841 if (AllCallSitesKnown) 1842 indicateOptimisticFixpoint(); 1843 return ChangeStatus::UNCHANGED; 1844 } 1845 1846 // If the above check does not hold anymore we look at the calls. 1847 auto CheckForNoRecurse = [&](Instruction &I) { 1848 const auto &CB = cast<CallBase>(I); 1849 if (CB.hasFnAttr(Attribute::NoRecurse)) 1850 return true; 1851 1852 const auto &NoRecurseAA = 1853 A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB)); 1854 if (!NoRecurseAA.isAssumedNoRecurse()) 1855 return false; 1856 1857 // Recursion to the same function 1858 if (CB.getCalledFunction() == getAnchorScope()) 1859 return false; 1860 1861 return true; 1862 }; 1863 1864 if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this)) 1865 return indicatePessimisticFixpoint(); 1866 return ChangeStatus::UNCHANGED; 1867 } 1868 1869 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } 1870 }; 1871 1872 /// NoRecurse attribute deduction for a call sites. 1873 struct AANoRecurseCallSite final : AANoRecurseImpl { 1874 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) 1875 : AANoRecurseImpl(IRP, A) {} 1876 1877 /// See AbstractAttribute::initialize(...). 1878 void initialize(Attributor &A) override { 1879 AANoRecurseImpl::initialize(A); 1880 Function *F = getAssociatedFunction(); 1881 if (!F) 1882 indicatePessimisticFixpoint(); 1883 } 1884 1885 /// See AbstractAttribute::updateImpl(...). 1886 ChangeStatus updateImpl(Attributor &A) override { 1887 // TODO: Once we have call site specific value information we can provide 1888 // call site specific liveness information and then it makes 1889 // sense to specialize attributes for call sites arguments instead of 1890 // redirecting requests to the callee argument. 1891 Function *F = getAssociatedFunction(); 1892 const IRPosition &FnPos = IRPosition::function(*F); 1893 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos); 1894 return clampStateAndIndicateChange( 1895 getState(), 1896 static_cast<const AANoRecurse::StateType &>(FnAA.getState())); 1897 } 1898 1899 /// See AbstractAttribute::trackStatistics() 1900 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } 1901 }; 1902 1903 /// -------------------- Undefined-Behavior Attributes ------------------------ 1904 1905 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { 1906 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) 1907 : AAUndefinedBehavior(IRP, A) {} 1908 1909 /// See AbstractAttribute::updateImpl(...). 1910 // through a pointer (i.e. also branches etc.) 1911 ChangeStatus updateImpl(Attributor &A) override { 1912 const size_t UBPrevSize = KnownUBInsts.size(); 1913 const size_t NoUBPrevSize = AssumedNoUBInsts.size(); 1914 1915 auto InspectMemAccessInstForUB = [&](Instruction &I) { 1916 // Skip instructions that are already saved. 1917 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 1918 return true; 1919 1920 // If we reach here, we know we have an instruction 1921 // that accesses memory through a pointer operand, 1922 // for which getPointerOperand() should give it to us. 1923 const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true); 1924 assert(PtrOp && 1925 "Expected pointer operand of memory accessing instruction"); 1926 1927 // Either we stopped and the appropriate action was taken, 1928 // or we got back a simplified value to continue. 1929 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); 1930 if (!SimplifiedPtrOp.hasValue()) 1931 return true; 1932 const Value *PtrOpVal = SimplifiedPtrOp.getValue(); 1933 1934 // A memory access through a pointer is considered UB 1935 // only if the pointer has constant null value. 1936 // TODO: Expand it to not only check constant values. 1937 if (!isa<ConstantPointerNull>(PtrOpVal)) { 1938 AssumedNoUBInsts.insert(&I); 1939 return true; 1940 } 1941 const Type *PtrTy = PtrOpVal->getType(); 1942 1943 // Because we only consider instructions inside functions, 1944 // assume that a parent function exists. 1945 const Function *F = I.getFunction(); 1946 1947 // A memory access using constant null pointer is only considered UB 1948 // if null pointer is _not_ defined for the target platform. 1949 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) 1950 AssumedNoUBInsts.insert(&I); 1951 else 1952 KnownUBInsts.insert(&I); 1953 return true; 1954 }; 1955 1956 auto InspectBrInstForUB = [&](Instruction &I) { 1957 // A conditional branch instruction is considered UB if it has `undef` 1958 // condition. 1959 1960 // Skip instructions that are already saved. 1961 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 1962 return true; 1963 1964 // We know we have a branch instruction. 1965 auto BrInst = cast<BranchInst>(&I); 1966 1967 // Unconditional branches are never considered UB. 1968 if (BrInst->isUnconditional()) 1969 return true; 1970 1971 // Either we stopped and the appropriate action was taken, 1972 // or we got back a simplified value to continue. 1973 Optional<Value *> SimplifiedCond = 1974 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); 1975 if (!SimplifiedCond.hasValue()) 1976 return true; 1977 AssumedNoUBInsts.insert(&I); 1978 return true; 1979 }; 1980 1981 A.checkForAllInstructions(InspectMemAccessInstForUB, *this, 1982 {Instruction::Load, Instruction::Store, 1983 Instruction::AtomicCmpXchg, 1984 Instruction::AtomicRMW}, 1985 /* CheckBBLivenessOnly */ true); 1986 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, 1987 /* CheckBBLivenessOnly */ true); 1988 if (NoUBPrevSize != AssumedNoUBInsts.size() || 1989 UBPrevSize != KnownUBInsts.size()) 1990 return ChangeStatus::CHANGED; 1991 return ChangeStatus::UNCHANGED; 1992 } 1993 1994 bool isKnownToCauseUB(Instruction *I) const override { 1995 return KnownUBInsts.count(I); 1996 } 1997 1998 bool isAssumedToCauseUB(Instruction *I) const override { 1999 // In simple words, if an instruction is not in the assumed to _not_ 2000 // cause UB, then it is assumed UB (that includes those 2001 // in the KnownUBInsts set). The rest is boilerplate 2002 // is to ensure that it is one of the instructions we test 2003 // for UB. 2004 2005 switch (I->getOpcode()) { 2006 case Instruction::Load: 2007 case Instruction::Store: 2008 case Instruction::AtomicCmpXchg: 2009 case Instruction::AtomicRMW: 2010 return !AssumedNoUBInsts.count(I); 2011 case Instruction::Br: { 2012 auto BrInst = cast<BranchInst>(I); 2013 if (BrInst->isUnconditional()) 2014 return false; 2015 return !AssumedNoUBInsts.count(I); 2016 } break; 2017 default: 2018 return false; 2019 } 2020 return false; 2021 } 2022 2023 ChangeStatus manifest(Attributor &A) override { 2024 if (KnownUBInsts.empty()) 2025 return ChangeStatus::UNCHANGED; 2026 for (Instruction *I : KnownUBInsts) 2027 A.changeToUnreachableAfterManifest(I); 2028 return ChangeStatus::CHANGED; 2029 } 2030 2031 /// See AbstractAttribute::getAsStr() 2032 const std::string getAsStr() const override { 2033 return getAssumed() ? "undefined-behavior" : "no-ub"; 2034 } 2035 2036 /// Note: The correctness of this analysis depends on the fact that the 2037 /// following 2 sets will stop changing after some point. 2038 /// "Change" here means that their size changes. 2039 /// The size of each set is monotonically increasing 2040 /// (we only add items to them) and it is upper bounded by the number of 2041 /// instructions in the processed function (we can never save more 2042 /// elements in either set than this number). Hence, at some point, 2043 /// they will stop increasing. 2044 /// Consequently, at some point, both sets will have stopped 2045 /// changing, effectively making the analysis reach a fixpoint. 2046 2047 /// Note: These 2 sets are disjoint and an instruction can be considered 2048 /// one of 3 things: 2049 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in 2050 /// the KnownUBInsts set. 2051 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior 2052 /// has a reason to assume it). 2053 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior 2054 /// could not find a reason to assume or prove that it can cause UB, 2055 /// hence it assumes it doesn't. We have a set for these instructions 2056 /// so that we don't reprocess them in every update. 2057 /// Note however that instructions in this set may cause UB. 2058 2059 protected: 2060 /// A set of all live instructions _known_ to cause UB. 2061 SmallPtrSet<Instruction *, 8> KnownUBInsts; 2062 2063 private: 2064 /// A set of all the (live) instructions that are assumed to _not_ cause UB. 2065 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; 2066 2067 // Should be called on updates in which if we're processing an instruction 2068 // \p I that depends on a value \p V, one of the following has to happen: 2069 // - If the value is assumed, then stop. 2070 // - If the value is known but undef, then consider it UB. 2071 // - Otherwise, do specific processing with the simplified value. 2072 // We return None in the first 2 cases to signify that an appropriate 2073 // action was taken and the caller should stop. 2074 // Otherwise, we return the simplified value that the caller should 2075 // use for specific processing. 2076 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V, 2077 Instruction *I) { 2078 const auto &ValueSimplifyAA = 2079 A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V)); 2080 Optional<Value *> SimplifiedV = 2081 ValueSimplifyAA.getAssumedSimplifiedValue(A); 2082 if (!ValueSimplifyAA.isKnown()) { 2083 // Don't depend on assumed values. 2084 return llvm::None; 2085 } 2086 if (!SimplifiedV.hasValue()) { 2087 // If it is known (which we tested above) but it doesn't have a value, 2088 // then we can assume `undef` and hence the instruction is UB. 2089 KnownUBInsts.insert(I); 2090 return llvm::None; 2091 } 2092 Value *Val = SimplifiedV.getValue(); 2093 if (isa<UndefValue>(Val)) { 2094 KnownUBInsts.insert(I); 2095 return llvm::None; 2096 } 2097 return Val; 2098 } 2099 }; 2100 2101 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { 2102 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) 2103 : AAUndefinedBehaviorImpl(IRP, A) {} 2104 2105 /// See AbstractAttribute::trackStatistics() 2106 void trackStatistics() const override { 2107 STATS_DECL(UndefinedBehaviorInstruction, Instruction, 2108 "Number of instructions known to have UB"); 2109 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += 2110 KnownUBInsts.size(); 2111 } 2112 }; 2113 2114 /// ------------------------ Will-Return Attributes ---------------------------- 2115 2116 // Helper function that checks whether a function has any cycle which we don't 2117 // know if it is bounded or not. 2118 // Loops with maximum trip count are considered bounded, any other cycle not. 2119 static bool mayContainUnboundedCycle(Function &F, Attributor &A) { 2120 ScalarEvolution *SE = 2121 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); 2122 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); 2123 // If either SCEV or LoopInfo is not available for the function then we assume 2124 // any cycle to be unbounded cycle. 2125 // We use scc_iterator which uses Tarjan algorithm to find all the maximal 2126 // SCCs.To detect if there's a cycle, we only need to find the maximal ones. 2127 if (!SE || !LI) { 2128 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) 2129 if (SCCI.hasCycle()) 2130 return true; 2131 return false; 2132 } 2133 2134 // If there's irreducible control, the function may contain non-loop cycles. 2135 if (mayContainIrreducibleControl(F, LI)) 2136 return true; 2137 2138 // Any loop that does not have a max trip count is considered unbounded cycle. 2139 for (auto *L : LI->getLoopsInPreorder()) { 2140 if (!SE->getSmallConstantMaxTripCount(L)) 2141 return true; 2142 } 2143 return false; 2144 } 2145 2146 struct AAWillReturnImpl : public AAWillReturn { 2147 AAWillReturnImpl(const IRPosition &IRP, Attributor &A) 2148 : AAWillReturn(IRP, A) {} 2149 2150 /// See AbstractAttribute::initialize(...). 2151 void initialize(Attributor &A) override { 2152 AAWillReturn::initialize(A); 2153 2154 Function *F = getAnchorScope(); 2155 if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A)) 2156 indicatePessimisticFixpoint(); 2157 } 2158 2159 /// See AbstractAttribute::updateImpl(...). 2160 ChangeStatus updateImpl(Attributor &A) override { 2161 auto CheckForWillReturn = [&](Instruction &I) { 2162 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); 2163 const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos); 2164 if (WillReturnAA.isKnownWillReturn()) 2165 return true; 2166 if (!WillReturnAA.isAssumedWillReturn()) 2167 return false; 2168 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos); 2169 return NoRecurseAA.isAssumedNoRecurse(); 2170 }; 2171 2172 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this)) 2173 return indicatePessimisticFixpoint(); 2174 2175 return ChangeStatus::UNCHANGED; 2176 } 2177 2178 /// See AbstractAttribute::getAsStr() 2179 const std::string getAsStr() const override { 2180 return getAssumed() ? "willreturn" : "may-noreturn"; 2181 } 2182 }; 2183 2184 struct AAWillReturnFunction final : AAWillReturnImpl { 2185 AAWillReturnFunction(const IRPosition &IRP, Attributor &A) 2186 : AAWillReturnImpl(IRP, A) {} 2187 2188 /// See AbstractAttribute::trackStatistics() 2189 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } 2190 }; 2191 2192 /// WillReturn attribute deduction for a call sites. 2193 struct AAWillReturnCallSite final : AAWillReturnImpl { 2194 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) 2195 : AAWillReturnImpl(IRP, A) {} 2196 2197 /// See AbstractAttribute::initialize(...). 2198 void initialize(Attributor &A) override { 2199 AAWillReturnImpl::initialize(A); 2200 Function *F = getAssociatedFunction(); 2201 if (!F) 2202 indicatePessimisticFixpoint(); 2203 } 2204 2205 /// See AbstractAttribute::updateImpl(...). 2206 ChangeStatus updateImpl(Attributor &A) override { 2207 // TODO: Once we have call site specific value information we can provide 2208 // call site specific liveness information and then it makes 2209 // sense to specialize attributes for call sites arguments instead of 2210 // redirecting requests to the callee argument. 2211 Function *F = getAssociatedFunction(); 2212 const IRPosition &FnPos = IRPosition::function(*F); 2213 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos); 2214 return clampStateAndIndicateChange( 2215 getState(), 2216 static_cast<const AAWillReturn::StateType &>(FnAA.getState())); 2217 } 2218 2219 /// See AbstractAttribute::trackStatistics() 2220 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } 2221 }; 2222 2223 /// -------------------AAReachability Attribute-------------------------- 2224 2225 struct AAReachabilityImpl : AAReachability { 2226 AAReachabilityImpl(const IRPosition &IRP, Attributor &A) 2227 : AAReachability(IRP, A) {} 2228 2229 const std::string getAsStr() const override { 2230 // TODO: Return the number of reachable queries. 2231 return "reachable"; 2232 } 2233 2234 /// See AbstractAttribute::initialize(...). 2235 void initialize(Attributor &A) override { indicatePessimisticFixpoint(); } 2236 2237 /// See AbstractAttribute::updateImpl(...). 2238 ChangeStatus updateImpl(Attributor &A) override { 2239 return indicatePessimisticFixpoint(); 2240 } 2241 }; 2242 2243 struct AAReachabilityFunction final : public AAReachabilityImpl { 2244 AAReachabilityFunction(const IRPosition &IRP, Attributor &A) 2245 : AAReachabilityImpl(IRP, A) {} 2246 2247 /// See AbstractAttribute::trackStatistics() 2248 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); } 2249 }; 2250 2251 /// ------------------------ NoAlias Argument Attribute ------------------------ 2252 2253 struct AANoAliasImpl : AANoAlias { 2254 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { 2255 assert(getAssociatedType()->isPointerTy() && 2256 "Noalias is a pointer attribute"); 2257 } 2258 2259 const std::string getAsStr() const override { 2260 return getAssumed() ? "noalias" : "may-alias"; 2261 } 2262 }; 2263 2264 /// NoAlias attribute for a floating value. 2265 struct AANoAliasFloating final : AANoAliasImpl { 2266 AANoAliasFloating(const IRPosition &IRP, Attributor &A) 2267 : AANoAliasImpl(IRP, A) {} 2268 2269 /// See AbstractAttribute::initialize(...). 2270 void initialize(Attributor &A) override { 2271 AANoAliasImpl::initialize(A); 2272 Value *Val = &getAssociatedValue(); 2273 do { 2274 CastInst *CI = dyn_cast<CastInst>(Val); 2275 if (!CI) 2276 break; 2277 Value *Base = CI->getOperand(0); 2278 if (!Base->hasOneUse()) 2279 break; 2280 Val = Base; 2281 } while (true); 2282 2283 if (!Val->getType()->isPointerTy()) { 2284 indicatePessimisticFixpoint(); 2285 return; 2286 } 2287 2288 if (isa<AllocaInst>(Val)) 2289 indicateOptimisticFixpoint(); 2290 else if (isa<ConstantPointerNull>(Val) && 2291 !NullPointerIsDefined(getAnchorScope(), 2292 Val->getType()->getPointerAddressSpace())) 2293 indicateOptimisticFixpoint(); 2294 else if (Val != &getAssociatedValue()) { 2295 const auto &ValNoAliasAA = 2296 A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val)); 2297 if (ValNoAliasAA.isKnownNoAlias()) 2298 indicateOptimisticFixpoint(); 2299 } 2300 } 2301 2302 /// See AbstractAttribute::updateImpl(...). 2303 ChangeStatus updateImpl(Attributor &A) override { 2304 // TODO: Implement this. 2305 return indicatePessimisticFixpoint(); 2306 } 2307 2308 /// See AbstractAttribute::trackStatistics() 2309 void trackStatistics() const override { 2310 STATS_DECLTRACK_FLOATING_ATTR(noalias) 2311 } 2312 }; 2313 2314 /// NoAlias attribute for an argument. 2315 struct AANoAliasArgument final 2316 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { 2317 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; 2318 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 2319 2320 /// See AbstractAttribute::initialize(...). 2321 void initialize(Attributor &A) override { 2322 Base::initialize(A); 2323 // See callsite argument attribute and callee argument attribute. 2324 if (hasAttr({Attribute::ByVal})) 2325 indicateOptimisticFixpoint(); 2326 } 2327 2328 /// See AbstractAttribute::update(...). 2329 ChangeStatus updateImpl(Attributor &A) override { 2330 // We have to make sure no-alias on the argument does not break 2331 // synchronization when this is a callback argument, see also [1] below. 2332 // If synchronization cannot be affected, we delegate to the base updateImpl 2333 // function, otherwise we give up for now. 2334 2335 // If the function is no-sync, no-alias cannot break synchronization. 2336 const auto &NoSyncAA = A.getAAFor<AANoSync>( 2337 *this, IRPosition::function_scope(getIRPosition())); 2338 if (NoSyncAA.isAssumedNoSync()) 2339 return Base::updateImpl(A); 2340 2341 // If the argument is read-only, no-alias cannot break synchronization. 2342 const auto &MemBehaviorAA = 2343 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition()); 2344 if (MemBehaviorAA.isAssumedReadOnly()) 2345 return Base::updateImpl(A); 2346 2347 // If the argument is never passed through callbacks, no-alias cannot break 2348 // synchronization. 2349 bool AllCallSitesKnown; 2350 if (A.checkForAllCallSites( 2351 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, 2352 true, AllCallSitesKnown)) 2353 return Base::updateImpl(A); 2354 2355 // TODO: add no-alias but make sure it doesn't break synchronization by 2356 // introducing fake uses. See: 2357 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, 2358 // International Workshop on OpenMP 2018, 2359 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf 2360 2361 return indicatePessimisticFixpoint(); 2362 } 2363 2364 /// See AbstractAttribute::trackStatistics() 2365 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } 2366 }; 2367 2368 struct AANoAliasCallSiteArgument final : AANoAliasImpl { 2369 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) 2370 : AANoAliasImpl(IRP, A) {} 2371 2372 /// See AbstractAttribute::initialize(...). 2373 void initialize(Attributor &A) override { 2374 // See callsite argument attribute and callee argument attribute. 2375 const auto &CB = cast<CallBase>(getAnchorValue()); 2376 if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias)) 2377 indicateOptimisticFixpoint(); 2378 Value &Val = getAssociatedValue(); 2379 if (isa<ConstantPointerNull>(Val) && 2380 !NullPointerIsDefined(getAnchorScope(), 2381 Val.getType()->getPointerAddressSpace())) 2382 indicateOptimisticFixpoint(); 2383 } 2384 2385 /// Determine if the underlying value may alias with the call site argument 2386 /// \p OtherArgNo of \p ICS (= the underlying call site). 2387 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, 2388 const AAMemoryBehavior &MemBehaviorAA, 2389 const CallBase &CB, unsigned OtherArgNo) { 2390 // We do not need to worry about aliasing with the underlying IRP. 2391 if (this->getArgNo() == (int)OtherArgNo) 2392 return false; 2393 2394 // If it is not a pointer or pointer vector we do not alias. 2395 const Value *ArgOp = CB.getArgOperand(OtherArgNo); 2396 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 2397 return false; 2398 2399 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 2400 *this, IRPosition::callsite_argument(CB, OtherArgNo), 2401 /* TrackDependence */ false); 2402 2403 // If the argument is readnone, there is no read-write aliasing. 2404 if (CBArgMemBehaviorAA.isAssumedReadNone()) { 2405 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 2406 return false; 2407 } 2408 2409 // If the argument is readonly and the underlying value is readonly, there 2410 // is no read-write aliasing. 2411 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); 2412 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { 2413 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2414 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 2415 return false; 2416 } 2417 2418 // We have to utilize actual alias analysis queries so we need the object. 2419 if (!AAR) 2420 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); 2421 2422 // Try to rule it out at the call site. 2423 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); 2424 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " 2425 "callsite arguments: " 2426 << getAssociatedValue() << " " << *ArgOp << " => " 2427 << (IsAliasing ? "" : "no-") << "alias \n"); 2428 2429 return IsAliasing; 2430 } 2431 2432 bool 2433 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, 2434 const AAMemoryBehavior &MemBehaviorAA, 2435 const AANoAlias &NoAliasAA) { 2436 // We can deduce "noalias" if the following conditions hold. 2437 // (i) Associated value is assumed to be noalias in the definition. 2438 // (ii) Associated value is assumed to be no-capture in all the uses 2439 // possibly executed before this callsite. 2440 // (iii) There is no other pointer argument which could alias with the 2441 // value. 2442 2443 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); 2444 if (!AssociatedValueIsNoAliasAtDef) { 2445 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() 2446 << " is not no-alias at the definition\n"); 2447 return false; 2448 } 2449 2450 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); 2451 2452 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 2453 auto &NoCaptureAA = 2454 A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false); 2455 // Check whether the value is captured in the scope using AANoCapture. 2456 // Look at CFG and check only uses possibly executed before this 2457 // callsite. 2458 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 2459 Instruction *UserI = cast<Instruction>(U.getUser()); 2460 2461 // If user if curr instr and only use. 2462 if (UserI == getCtxI() && UserI->hasOneUse()) 2463 return true; 2464 2465 const Function *ScopeFn = VIRP.getAnchorScope(); 2466 if (ScopeFn) { 2467 const auto &ReachabilityAA = 2468 A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn)); 2469 2470 if (!ReachabilityAA.isAssumedReachable(UserI, getCtxI())) 2471 return true; 2472 2473 if (auto *CB = dyn_cast<CallBase>(UserI)) { 2474 if (CB->isArgOperand(&U)) { 2475 2476 unsigned ArgNo = CB->getArgOperandNo(&U); 2477 2478 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 2479 *this, IRPosition::callsite_argument(*CB, ArgNo)); 2480 2481 if (NoCaptureAA.isAssumedNoCapture()) 2482 return true; 2483 } 2484 } 2485 } 2486 2487 // For cases which can potentially have more users 2488 if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) || 2489 isa<SelectInst>(U)) { 2490 Follow = true; 2491 return true; 2492 } 2493 2494 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n"); 2495 return false; 2496 }; 2497 2498 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 2499 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { 2500 LLVM_DEBUG( 2501 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() 2502 << " cannot be noalias as it is potentially captured\n"); 2503 return false; 2504 } 2505 } 2506 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); 2507 2508 // Check there is no other pointer argument which could alias with the 2509 // value passed at this call site. 2510 // TODO: AbstractCallSite 2511 const auto &CB = cast<CallBase>(getAnchorValue()); 2512 for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands(); 2513 OtherArgNo++) 2514 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) 2515 return false; 2516 2517 return true; 2518 } 2519 2520 /// See AbstractAttribute::updateImpl(...). 2521 ChangeStatus updateImpl(Attributor &A) override { 2522 // If the argument is readnone we are done as there are no accesses via the 2523 // argument. 2524 auto &MemBehaviorAA = 2525 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), 2526 /* TrackDependence */ false); 2527 if (MemBehaviorAA.isAssumedReadNone()) { 2528 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2529 return ChangeStatus::UNCHANGED; 2530 } 2531 2532 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 2533 const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP, 2534 /* TrackDependence */ false); 2535 2536 AAResults *AAR = nullptr; 2537 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, 2538 NoAliasAA)) { 2539 LLVM_DEBUG( 2540 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"); 2541 return ChangeStatus::UNCHANGED; 2542 } 2543 2544 return indicatePessimisticFixpoint(); 2545 } 2546 2547 /// See AbstractAttribute::trackStatistics() 2548 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } 2549 }; 2550 2551 /// NoAlias attribute for function return value. 2552 struct AANoAliasReturned final : AANoAliasImpl { 2553 AANoAliasReturned(const IRPosition &IRP, Attributor &A) 2554 : AANoAliasImpl(IRP, A) {} 2555 2556 /// See AbstractAttribute::updateImpl(...). 2557 virtual ChangeStatus updateImpl(Attributor &A) override { 2558 2559 auto CheckReturnValue = [&](Value &RV) -> bool { 2560 if (Constant *C = dyn_cast<Constant>(&RV)) 2561 if (C->isNullValue() || isa<UndefValue>(C)) 2562 return true; 2563 2564 /// For now, we can only deduce noalias if we have call sites. 2565 /// FIXME: add more support. 2566 if (!isa<CallBase>(&RV)) 2567 return false; 2568 2569 const IRPosition &RVPos = IRPosition::value(RV); 2570 const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos); 2571 if (!NoAliasAA.isAssumedNoAlias()) 2572 return false; 2573 2574 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos); 2575 return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); 2576 }; 2577 2578 if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) 2579 return indicatePessimisticFixpoint(); 2580 2581 return ChangeStatus::UNCHANGED; 2582 } 2583 2584 /// See AbstractAttribute::trackStatistics() 2585 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } 2586 }; 2587 2588 /// NoAlias attribute deduction for a call site return value. 2589 struct AANoAliasCallSiteReturned final : AANoAliasImpl { 2590 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) 2591 : AANoAliasImpl(IRP, A) {} 2592 2593 /// See AbstractAttribute::initialize(...). 2594 void initialize(Attributor &A) override { 2595 AANoAliasImpl::initialize(A); 2596 Function *F = getAssociatedFunction(); 2597 if (!F) 2598 indicatePessimisticFixpoint(); 2599 } 2600 2601 /// See AbstractAttribute::updateImpl(...). 2602 ChangeStatus updateImpl(Attributor &A) override { 2603 // TODO: Once we have call site specific value information we can provide 2604 // call site specific liveness information and then it makes 2605 // sense to specialize attributes for call sites arguments instead of 2606 // redirecting requests to the callee argument. 2607 Function *F = getAssociatedFunction(); 2608 const IRPosition &FnPos = IRPosition::returned(*F); 2609 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos); 2610 return clampStateAndIndicateChange( 2611 getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState())); 2612 } 2613 2614 /// See AbstractAttribute::trackStatistics() 2615 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } 2616 }; 2617 2618 /// -------------------AAIsDead Function Attribute----------------------- 2619 2620 struct AAIsDeadValueImpl : public AAIsDead { 2621 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 2622 2623 /// See AAIsDead::isAssumedDead(). 2624 bool isAssumedDead() const override { return getAssumed(); } 2625 2626 /// See AAIsDead::isKnownDead(). 2627 bool isKnownDead() const override { return getKnown(); } 2628 2629 /// See AAIsDead::isAssumedDead(BasicBlock *). 2630 bool isAssumedDead(const BasicBlock *BB) const override { return false; } 2631 2632 /// See AAIsDead::isKnownDead(BasicBlock *). 2633 bool isKnownDead(const BasicBlock *BB) const override { return false; } 2634 2635 /// See AAIsDead::isAssumedDead(Instruction *I). 2636 bool isAssumedDead(const Instruction *I) const override { 2637 return I == getCtxI() && isAssumedDead(); 2638 } 2639 2640 /// See AAIsDead::isKnownDead(Instruction *I). 2641 bool isKnownDead(const Instruction *I) const override { 2642 return isAssumedDead(I) && getKnown(); 2643 } 2644 2645 /// See AbstractAttribute::getAsStr(). 2646 const std::string getAsStr() const override { 2647 return isAssumedDead() ? "assumed-dead" : "assumed-live"; 2648 } 2649 2650 /// Check if all uses are assumed dead. 2651 bool areAllUsesAssumedDead(Attributor &A, Value &V) { 2652 auto UsePred = [&](const Use &U, bool &Follow) { return false; }; 2653 // Explicitly set the dependence class to required because we want a long 2654 // chain of N dependent instructions to be considered live as soon as one is 2655 // without going through N update cycles. This is not required for 2656 // correctness. 2657 return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED); 2658 } 2659 2660 /// Determine if \p I is assumed to be side-effect free. 2661 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { 2662 if (!I || wouldInstructionBeTriviallyDead(I)) 2663 return true; 2664 2665 auto *CB = dyn_cast<CallBase>(I); 2666 if (!CB || isa<IntrinsicInst>(CB)) 2667 return false; 2668 2669 const IRPosition &CallIRP = IRPosition::callsite_function(*CB); 2670 const auto &NoUnwindAA = A.getAndUpdateAAFor<AANoUnwind>( 2671 *this, CallIRP, /* TrackDependence */ false); 2672 if (!NoUnwindAA.isAssumedNoUnwind()) 2673 return false; 2674 if (!NoUnwindAA.isKnownNoUnwind()) 2675 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); 2676 2677 const auto &MemBehaviorAA = A.getAndUpdateAAFor<AAMemoryBehavior>( 2678 *this, CallIRP, /* TrackDependence */ false); 2679 if (MemBehaviorAA.isAssumedReadOnly()) { 2680 if (!MemBehaviorAA.isKnownReadOnly()) 2681 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2682 return true; 2683 } 2684 return false; 2685 } 2686 }; 2687 2688 struct AAIsDeadFloating : public AAIsDeadValueImpl { 2689 AAIsDeadFloating(const IRPosition &IRP, Attributor &A) 2690 : AAIsDeadValueImpl(IRP, A) {} 2691 2692 /// See AbstractAttribute::initialize(...). 2693 void initialize(Attributor &A) override { 2694 if (isa<UndefValue>(getAssociatedValue())) { 2695 indicatePessimisticFixpoint(); 2696 return; 2697 } 2698 2699 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 2700 if (!isAssumedSideEffectFree(A, I)) 2701 indicatePessimisticFixpoint(); 2702 } 2703 2704 /// See AbstractAttribute::updateImpl(...). 2705 ChangeStatus updateImpl(Attributor &A) override { 2706 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 2707 if (!isAssumedSideEffectFree(A, I)) 2708 return indicatePessimisticFixpoint(); 2709 2710 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 2711 return indicatePessimisticFixpoint(); 2712 return ChangeStatus::UNCHANGED; 2713 } 2714 2715 /// See AbstractAttribute::manifest(...). 2716 ChangeStatus manifest(Attributor &A) override { 2717 Value &V = getAssociatedValue(); 2718 if (auto *I = dyn_cast<Instruction>(&V)) { 2719 // If we get here we basically know the users are all dead. We check if 2720 // isAssumedSideEffectFree returns true here again because it might not be 2721 // the case and only the users are dead but the instruction (=call) is 2722 // still needed. 2723 if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) { 2724 A.deleteAfterManifest(*I); 2725 return ChangeStatus::CHANGED; 2726 } 2727 } 2728 if (V.use_empty()) 2729 return ChangeStatus::UNCHANGED; 2730 2731 bool UsedAssumedInformation = false; 2732 Optional<Constant *> C = 2733 A.getAssumedConstant(V, *this, UsedAssumedInformation); 2734 if (C.hasValue() && C.getValue()) 2735 return ChangeStatus::UNCHANGED; 2736 2737 // Replace the value with undef as it is dead but keep droppable uses around 2738 // as they provide information we don't want to give up on just yet. 2739 UndefValue &UV = *UndefValue::get(V.getType()); 2740 bool AnyChange = 2741 A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false); 2742 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 2743 } 2744 2745 /// See AbstractAttribute::trackStatistics() 2746 void trackStatistics() const override { 2747 STATS_DECLTRACK_FLOATING_ATTR(IsDead) 2748 } 2749 }; 2750 2751 struct AAIsDeadArgument : public AAIsDeadFloating { 2752 AAIsDeadArgument(const IRPosition &IRP, Attributor &A) 2753 : AAIsDeadFloating(IRP, A) {} 2754 2755 /// See AbstractAttribute::initialize(...). 2756 void initialize(Attributor &A) override { 2757 if (!A.isFunctionIPOAmendable(*getAnchorScope())) 2758 indicatePessimisticFixpoint(); 2759 } 2760 2761 /// See AbstractAttribute::manifest(...). 2762 ChangeStatus manifest(Attributor &A) override { 2763 ChangeStatus Changed = AAIsDeadFloating::manifest(A); 2764 Argument &Arg = *getAssociatedArgument(); 2765 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) 2766 if (A.registerFunctionSignatureRewrite( 2767 Arg, /* ReplacementTypes */ {}, 2768 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, 2769 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { 2770 Arg.dropDroppableUses(); 2771 return ChangeStatus::CHANGED; 2772 } 2773 return Changed; 2774 } 2775 2776 /// See AbstractAttribute::trackStatistics() 2777 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) } 2778 }; 2779 2780 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { 2781 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) 2782 : AAIsDeadValueImpl(IRP, A) {} 2783 2784 /// See AbstractAttribute::initialize(...). 2785 void initialize(Attributor &A) override { 2786 if (isa<UndefValue>(getAssociatedValue())) 2787 indicatePessimisticFixpoint(); 2788 } 2789 2790 /// See AbstractAttribute::updateImpl(...). 2791 ChangeStatus updateImpl(Attributor &A) override { 2792 // TODO: Once we have call site specific value information we can provide 2793 // call site specific liveness information and then it makes 2794 // sense to specialize attributes for call sites arguments instead of 2795 // redirecting requests to the callee argument. 2796 Argument *Arg = getAssociatedArgument(); 2797 if (!Arg) 2798 return indicatePessimisticFixpoint(); 2799 const IRPosition &ArgPos = IRPosition::argument(*Arg); 2800 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos); 2801 return clampStateAndIndicateChange( 2802 getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState())); 2803 } 2804 2805 /// See AbstractAttribute::manifest(...). 2806 ChangeStatus manifest(Attributor &A) override { 2807 CallBase &CB = cast<CallBase>(getAnchorValue()); 2808 Use &U = CB.getArgOperandUse(getArgNo()); 2809 assert(!isa<UndefValue>(U.get()) && 2810 "Expected undef values to be filtered out!"); 2811 UndefValue &UV = *UndefValue::get(U->getType()); 2812 if (A.changeUseAfterManifest(U, UV)) 2813 return ChangeStatus::CHANGED; 2814 return ChangeStatus::UNCHANGED; 2815 } 2816 2817 /// See AbstractAttribute::trackStatistics() 2818 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) } 2819 }; 2820 2821 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { 2822 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) 2823 : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {} 2824 2825 /// See AAIsDead::isAssumedDead(). 2826 bool isAssumedDead() const override { 2827 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; 2828 } 2829 2830 /// See AbstractAttribute::initialize(...). 2831 void initialize(Attributor &A) override { 2832 if (isa<UndefValue>(getAssociatedValue())) { 2833 indicatePessimisticFixpoint(); 2834 return; 2835 } 2836 2837 // We track this separately as a secondary state. 2838 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); 2839 } 2840 2841 /// See AbstractAttribute::updateImpl(...). 2842 ChangeStatus updateImpl(Attributor &A) override { 2843 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2844 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { 2845 IsAssumedSideEffectFree = false; 2846 Changed = ChangeStatus::CHANGED; 2847 } 2848 2849 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 2850 return indicatePessimisticFixpoint(); 2851 return Changed; 2852 } 2853 2854 /// See AbstractAttribute::trackStatistics() 2855 void trackStatistics() const override { 2856 if (IsAssumedSideEffectFree) 2857 STATS_DECLTRACK_CSRET_ATTR(IsDead) 2858 else 2859 STATS_DECLTRACK_CSRET_ATTR(UnusedResult) 2860 } 2861 2862 /// See AbstractAttribute::getAsStr(). 2863 const std::string getAsStr() const override { 2864 return isAssumedDead() 2865 ? "assumed-dead" 2866 : (getAssumed() ? "assumed-dead-users" : "assumed-live"); 2867 } 2868 2869 private: 2870 bool IsAssumedSideEffectFree; 2871 }; 2872 2873 struct AAIsDeadReturned : public AAIsDeadValueImpl { 2874 AAIsDeadReturned(const IRPosition &IRP, Attributor &A) 2875 : AAIsDeadValueImpl(IRP, A) {} 2876 2877 /// See AbstractAttribute::updateImpl(...). 2878 ChangeStatus updateImpl(Attributor &A) override { 2879 2880 A.checkForAllInstructions([](Instruction &) { return true; }, *this, 2881 {Instruction::Ret}); 2882 2883 auto PredForCallSite = [&](AbstractCallSite ACS) { 2884 if (ACS.isCallbackCall() || !ACS.getInstruction()) 2885 return false; 2886 return areAllUsesAssumedDead(A, *ACS.getInstruction()); 2887 }; 2888 2889 bool AllCallSitesKnown; 2890 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 2891 AllCallSitesKnown)) 2892 return indicatePessimisticFixpoint(); 2893 2894 return ChangeStatus::UNCHANGED; 2895 } 2896 2897 /// See AbstractAttribute::manifest(...). 2898 ChangeStatus manifest(Attributor &A) override { 2899 // TODO: Rewrite the signature to return void? 2900 bool AnyChange = false; 2901 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); 2902 auto RetInstPred = [&](Instruction &I) { 2903 ReturnInst &RI = cast<ReturnInst>(I); 2904 if (!isa<UndefValue>(RI.getReturnValue())) 2905 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); 2906 return true; 2907 }; 2908 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}); 2909 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 2910 } 2911 2912 /// See AbstractAttribute::trackStatistics() 2913 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) } 2914 }; 2915 2916 struct AAIsDeadFunction : public AAIsDead { 2917 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 2918 2919 /// See AbstractAttribute::initialize(...). 2920 void initialize(Attributor &A) override { 2921 const Function *F = getAnchorScope(); 2922 if (F && !F->isDeclaration()) { 2923 ToBeExploredFrom.insert(&F->getEntryBlock().front()); 2924 assumeLive(A, F->getEntryBlock()); 2925 } 2926 } 2927 2928 /// See AbstractAttribute::getAsStr(). 2929 const std::string getAsStr() const override { 2930 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + 2931 std::to_string(getAnchorScope()->size()) + "][#TBEP " + 2932 std::to_string(ToBeExploredFrom.size()) + "][#KDE " + 2933 std::to_string(KnownDeadEnds.size()) + "]"; 2934 } 2935 2936 /// See AbstractAttribute::manifest(...). 2937 ChangeStatus manifest(Attributor &A) override { 2938 assert(getState().isValidState() && 2939 "Attempted to manifest an invalid state!"); 2940 2941 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 2942 Function &F = *getAnchorScope(); 2943 2944 if (AssumedLiveBlocks.empty()) { 2945 A.deleteAfterManifest(F); 2946 return ChangeStatus::CHANGED; 2947 } 2948 2949 // Flag to determine if we can change an invoke to a call assuming the 2950 // callee is nounwind. This is not possible if the personality of the 2951 // function allows to catch asynchronous exceptions. 2952 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 2953 2954 KnownDeadEnds.set_union(ToBeExploredFrom); 2955 for (const Instruction *DeadEndI : KnownDeadEnds) { 2956 auto *CB = dyn_cast<CallBase>(DeadEndI); 2957 if (!CB) 2958 continue; 2959 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 2960 *this, IRPosition::callsite_function(*CB), /* TrackDependence */ true, 2961 DepClassTy::OPTIONAL); 2962 bool MayReturn = !NoReturnAA.isAssumedNoReturn(); 2963 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) 2964 continue; 2965 2966 if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) 2967 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); 2968 else 2969 A.changeToUnreachableAfterManifest( 2970 const_cast<Instruction *>(DeadEndI->getNextNode())); 2971 HasChanged = ChangeStatus::CHANGED; 2972 } 2973 2974 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted."); 2975 for (BasicBlock &BB : F) 2976 if (!AssumedLiveBlocks.count(&BB)) { 2977 A.deleteAfterManifest(BB); 2978 ++BUILD_STAT_NAME(AAIsDead, BasicBlock); 2979 } 2980 2981 return HasChanged; 2982 } 2983 2984 /// See AbstractAttribute::updateImpl(...). 2985 ChangeStatus updateImpl(Attributor &A) override; 2986 2987 /// See AbstractAttribute::trackStatistics() 2988 void trackStatistics() const override {} 2989 2990 /// Returns true if the function is assumed dead. 2991 bool isAssumedDead() const override { return false; } 2992 2993 /// See AAIsDead::isKnownDead(). 2994 bool isKnownDead() const override { return false; } 2995 2996 /// See AAIsDead::isAssumedDead(BasicBlock *). 2997 bool isAssumedDead(const BasicBlock *BB) const override { 2998 assert(BB->getParent() == getAnchorScope() && 2999 "BB must be in the same anchor scope function."); 3000 3001 if (!getAssumed()) 3002 return false; 3003 return !AssumedLiveBlocks.count(BB); 3004 } 3005 3006 /// See AAIsDead::isKnownDead(BasicBlock *). 3007 bool isKnownDead(const BasicBlock *BB) const override { 3008 return getKnown() && isAssumedDead(BB); 3009 } 3010 3011 /// See AAIsDead::isAssumed(Instruction *I). 3012 bool isAssumedDead(const Instruction *I) const override { 3013 assert(I->getParent()->getParent() == getAnchorScope() && 3014 "Instruction must be in the same anchor scope function."); 3015 3016 if (!getAssumed()) 3017 return false; 3018 3019 // If it is not in AssumedLiveBlocks then it for sure dead. 3020 // Otherwise, it can still be after noreturn call in a live block. 3021 if (!AssumedLiveBlocks.count(I->getParent())) 3022 return true; 3023 3024 // If it is not after a liveness barrier it is live. 3025 const Instruction *PrevI = I->getPrevNode(); 3026 while (PrevI) { 3027 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) 3028 return true; 3029 PrevI = PrevI->getPrevNode(); 3030 } 3031 return false; 3032 } 3033 3034 /// See AAIsDead::isKnownDead(Instruction *I). 3035 bool isKnownDead(const Instruction *I) const override { 3036 return getKnown() && isAssumedDead(I); 3037 } 3038 3039 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A 3040 /// that internal function called from \p BB should now be looked at. 3041 bool assumeLive(Attributor &A, const BasicBlock &BB) { 3042 if (!AssumedLiveBlocks.insert(&BB).second) 3043 return false; 3044 3045 // We assume that all of BB is (probably) live now and if there are calls to 3046 // internal functions we will assume that those are now live as well. This 3047 // is a performance optimization for blocks with calls to a lot of internal 3048 // functions. It can however cause dead functions to be treated as live. 3049 for (const Instruction &I : BB) 3050 if (const auto *CB = dyn_cast<CallBase>(&I)) 3051 if (const Function *F = CB->getCalledFunction()) 3052 if (F->hasLocalLinkage()) 3053 A.markLiveInternalFunction(*F); 3054 return true; 3055 } 3056 3057 /// Collection of instructions that need to be explored again, e.g., we 3058 /// did assume they do not transfer control to (one of their) successors. 3059 SmallSetVector<const Instruction *, 8> ToBeExploredFrom; 3060 3061 /// Collection of instructions that are known to not transfer control. 3062 SmallSetVector<const Instruction *, 8> KnownDeadEnds; 3063 3064 /// Collection of all assumed live BasicBlocks. 3065 DenseSet<const BasicBlock *> AssumedLiveBlocks; 3066 }; 3067 3068 static bool 3069 identifyAliveSuccessors(Attributor &A, const CallBase &CB, 3070 AbstractAttribute &AA, 3071 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3072 const IRPosition &IPos = IRPosition::callsite_function(CB); 3073 3074 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3075 AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 3076 if (NoReturnAA.isAssumedNoReturn()) 3077 return !NoReturnAA.isKnownNoReturn(); 3078 if (CB.isTerminator()) 3079 AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); 3080 else 3081 AliveSuccessors.push_back(CB.getNextNode()); 3082 return false; 3083 } 3084 3085 static bool 3086 identifyAliveSuccessors(Attributor &A, const InvokeInst &II, 3087 AbstractAttribute &AA, 3088 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3089 bool UsedAssumedInformation = 3090 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); 3091 3092 // First, determine if we can change an invoke to a call assuming the 3093 // callee is nounwind. This is not possible if the personality of the 3094 // function allows to catch asynchronous exceptions. 3095 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { 3096 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3097 } else { 3098 const IRPosition &IPos = IRPosition::callsite_function(II); 3099 const auto &AANoUnw = A.getAndUpdateAAFor<AANoUnwind>( 3100 AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 3101 if (AANoUnw.isAssumedNoUnwind()) { 3102 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); 3103 } else { 3104 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3105 } 3106 } 3107 return UsedAssumedInformation; 3108 } 3109 3110 static bool 3111 identifyAliveSuccessors(Attributor &A, const BranchInst &BI, 3112 AbstractAttribute &AA, 3113 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3114 bool UsedAssumedInformation = false; 3115 if (BI.getNumSuccessors() == 1) { 3116 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3117 } else { 3118 Optional<ConstantInt *> CI = getAssumedConstantInt( 3119 A, *BI.getCondition(), AA, UsedAssumedInformation); 3120 if (!CI.hasValue()) { 3121 // No value yet, assume both edges are dead. 3122 } else if (CI.getValue()) { 3123 const BasicBlock *SuccBB = 3124 BI.getSuccessor(1 - CI.getValue()->getZExtValue()); 3125 AliveSuccessors.push_back(&SuccBB->front()); 3126 } else { 3127 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3128 AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); 3129 UsedAssumedInformation = false; 3130 } 3131 } 3132 return UsedAssumedInformation; 3133 } 3134 3135 static bool 3136 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, 3137 AbstractAttribute &AA, 3138 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3139 bool UsedAssumedInformation = false; 3140 Optional<ConstantInt *> CI = 3141 getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation); 3142 if (!CI.hasValue()) { 3143 // No value yet, assume all edges are dead. 3144 } else if (CI.getValue()) { 3145 for (auto &CaseIt : SI.cases()) { 3146 if (CaseIt.getCaseValue() == CI.getValue()) { 3147 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); 3148 return UsedAssumedInformation; 3149 } 3150 } 3151 AliveSuccessors.push_back(&SI.getDefaultDest()->front()); 3152 return UsedAssumedInformation; 3153 } else { 3154 for (const BasicBlock *SuccBB : successors(SI.getParent())) 3155 AliveSuccessors.push_back(&SuccBB->front()); 3156 } 3157 return UsedAssumedInformation; 3158 } 3159 3160 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { 3161 ChangeStatus Change = ChangeStatus::UNCHANGED; 3162 3163 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" 3164 << getAnchorScope()->size() << "] BBs and " 3165 << ToBeExploredFrom.size() << " exploration points and " 3166 << KnownDeadEnds.size() << " known dead ends\n"); 3167 3168 // Copy and clear the list of instructions we need to explore from. It is 3169 // refilled with instructions the next update has to look at. 3170 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), 3171 ToBeExploredFrom.end()); 3172 decltype(ToBeExploredFrom) NewToBeExploredFrom; 3173 3174 SmallVector<const Instruction *, 8> AliveSuccessors; 3175 while (!Worklist.empty()) { 3176 const Instruction *I = Worklist.pop_back_val(); 3177 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"); 3178 3179 AliveSuccessors.clear(); 3180 3181 bool UsedAssumedInformation = false; 3182 switch (I->getOpcode()) { 3183 // TODO: look for (assumed) UB to backwards propagate "deadness". 3184 default: 3185 if (I->isTerminator()) { 3186 for (const BasicBlock *SuccBB : successors(I->getParent())) 3187 AliveSuccessors.push_back(&SuccBB->front()); 3188 } else { 3189 AliveSuccessors.push_back(I->getNextNode()); 3190 } 3191 break; 3192 case Instruction::Call: 3193 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), 3194 *this, AliveSuccessors); 3195 break; 3196 case Instruction::Invoke: 3197 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), 3198 *this, AliveSuccessors); 3199 break; 3200 case Instruction::Br: 3201 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), 3202 *this, AliveSuccessors); 3203 break; 3204 case Instruction::Switch: 3205 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), 3206 *this, AliveSuccessors); 3207 break; 3208 } 3209 3210 if (UsedAssumedInformation) { 3211 NewToBeExploredFrom.insert(I); 3212 } else { 3213 Change = ChangeStatus::CHANGED; 3214 if (AliveSuccessors.empty() || 3215 (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors())) 3216 KnownDeadEnds.insert(I); 3217 } 3218 3219 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: " 3220 << AliveSuccessors.size() << " UsedAssumedInformation: " 3221 << UsedAssumedInformation << "\n"); 3222 3223 for (const Instruction *AliveSuccessor : AliveSuccessors) { 3224 if (!I->isTerminator()) { 3225 assert(AliveSuccessors.size() == 1 && 3226 "Non-terminator expected to have a single successor!"); 3227 Worklist.push_back(AliveSuccessor); 3228 } else { 3229 if (assumeLive(A, *AliveSuccessor->getParent())) 3230 Worklist.push_back(AliveSuccessor); 3231 } 3232 } 3233 } 3234 3235 ToBeExploredFrom = std::move(NewToBeExploredFrom); 3236 3237 // If we know everything is live there is no need to query for liveness. 3238 // Instead, indicating a pessimistic fixpoint will cause the state to be 3239 // "invalid" and all queries to be answered conservatively without lookups. 3240 // To be in this state we have to (1) finished the exploration and (3) not 3241 // discovered any non-trivial dead end and (2) not ruled unreachable code 3242 // dead. 3243 if (ToBeExploredFrom.empty() && 3244 getAnchorScope()->size() == AssumedLiveBlocks.size() && 3245 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { 3246 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; 3247 })) 3248 return indicatePessimisticFixpoint(); 3249 return Change; 3250 } 3251 3252 /// Liveness information for a call sites. 3253 struct AAIsDeadCallSite final : AAIsDeadFunction { 3254 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) 3255 : AAIsDeadFunction(IRP, A) {} 3256 3257 /// See AbstractAttribute::initialize(...). 3258 void initialize(Attributor &A) override { 3259 // TODO: Once we have call site specific value information we can provide 3260 // call site specific liveness information and then it makes 3261 // sense to specialize attributes for call sites instead of 3262 // redirecting requests to the callee. 3263 llvm_unreachable("Abstract attributes for liveness are not " 3264 "supported for call sites yet!"); 3265 } 3266 3267 /// See AbstractAttribute::updateImpl(...). 3268 ChangeStatus updateImpl(Attributor &A) override { 3269 return indicatePessimisticFixpoint(); 3270 } 3271 3272 /// See AbstractAttribute::trackStatistics() 3273 void trackStatistics() const override {} 3274 }; 3275 3276 /// -------------------- Dereferenceable Argument Attribute -------------------- 3277 3278 template <> 3279 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, 3280 const DerefState &R) { 3281 ChangeStatus CS0 = 3282 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); 3283 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); 3284 return CS0 | CS1; 3285 } 3286 3287 struct AADereferenceableImpl : AADereferenceable { 3288 AADereferenceableImpl(const IRPosition &IRP, Attributor &A) 3289 : AADereferenceable(IRP, A) {} 3290 using StateType = DerefState; 3291 3292 /// See AbstractAttribute::initialize(...). 3293 void initialize(Attributor &A) override { 3294 SmallVector<Attribute, 4> Attrs; 3295 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, 3296 Attrs, /* IgnoreSubsumingPositions */ false, &A); 3297 for (const Attribute &Attr : Attrs) 3298 takeKnownDerefBytesMaximum(Attr.getValueAsInt()); 3299 3300 const IRPosition &IRP = this->getIRPosition(); 3301 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, 3302 /* TrackDependence */ false); 3303 3304 bool CanBeNull; 3305 takeKnownDerefBytesMaximum( 3306 IRP.getAssociatedValue().getPointerDereferenceableBytes( 3307 A.getDataLayout(), CanBeNull)); 3308 3309 bool IsFnInterface = IRP.isFnInterfaceKind(); 3310 Function *FnScope = IRP.getAnchorScope(); 3311 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { 3312 indicatePessimisticFixpoint(); 3313 return; 3314 } 3315 3316 if (Instruction *CtxI = getCtxI()) 3317 followUsesInMBEC(*this, A, getState(), *CtxI); 3318 } 3319 3320 /// See AbstractAttribute::getState() 3321 /// { 3322 StateType &getState() override { return *this; } 3323 const StateType &getState() const override { return *this; } 3324 /// } 3325 3326 /// Helper function for collecting accessed bytes in must-be-executed-context 3327 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, 3328 DerefState &State) { 3329 const Value *UseV = U->get(); 3330 if (!UseV->getType()->isPointerTy()) 3331 return; 3332 3333 Type *PtrTy = UseV->getType(); 3334 const DataLayout &DL = A.getDataLayout(); 3335 int64_t Offset; 3336 if (const Value *Base = getBasePointerOfAccessPointerOperand( 3337 I, Offset, DL, /*AllowNonInbounds*/ true)) { 3338 if (Base == &getAssociatedValue() && 3339 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 3340 uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType()); 3341 State.addAccessedBytes(Offset, Size); 3342 } 3343 } 3344 return; 3345 } 3346 3347 /// See followUsesInMBEC 3348 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 3349 AADereferenceable::StateType &State) { 3350 bool IsNonNull = false; 3351 bool TrackUse = false; 3352 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( 3353 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); 3354 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes 3355 << " for instruction " << *I << "\n"); 3356 3357 addAccessedBytesForUse(A, U, I, State); 3358 State.takeKnownDerefBytesMaximum(DerefBytes); 3359 return TrackUse; 3360 } 3361 3362 /// See AbstractAttribute::manifest(...). 3363 ChangeStatus manifest(Attributor &A) override { 3364 ChangeStatus Change = AADereferenceable::manifest(A); 3365 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { 3366 removeAttrs({Attribute::DereferenceableOrNull}); 3367 return ChangeStatus::CHANGED; 3368 } 3369 return Change; 3370 } 3371 3372 void getDeducedAttributes(LLVMContext &Ctx, 3373 SmallVectorImpl<Attribute> &Attrs) const override { 3374 // TODO: Add *_globally support 3375 if (isAssumedNonNull()) 3376 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 3377 Ctx, getAssumedDereferenceableBytes())); 3378 else 3379 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 3380 Ctx, getAssumedDereferenceableBytes())); 3381 } 3382 3383 /// See AbstractAttribute::getAsStr(). 3384 const std::string getAsStr() const override { 3385 if (!getAssumedDereferenceableBytes()) 3386 return "unknown-dereferenceable"; 3387 return std::string("dereferenceable") + 3388 (isAssumedNonNull() ? "" : "_or_null") + 3389 (isAssumedGlobal() ? "_globally" : "") + "<" + 3390 std::to_string(getKnownDereferenceableBytes()) + "-" + 3391 std::to_string(getAssumedDereferenceableBytes()) + ">"; 3392 } 3393 }; 3394 3395 /// Dereferenceable attribute for a floating value. 3396 struct AADereferenceableFloating : AADereferenceableImpl { 3397 AADereferenceableFloating(const IRPosition &IRP, Attributor &A) 3398 : AADereferenceableImpl(IRP, A) {} 3399 3400 /// See AbstractAttribute::updateImpl(...). 3401 ChangeStatus updateImpl(Attributor &A) override { 3402 const DataLayout &DL = A.getDataLayout(); 3403 3404 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T, 3405 bool Stripped) -> bool { 3406 unsigned IdxWidth = 3407 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 3408 APInt Offset(IdxWidth, 0); 3409 const Value *Base = 3410 stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false); 3411 3412 const auto &AA = 3413 A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base)); 3414 int64_t DerefBytes = 0; 3415 if (!Stripped && this == &AA) { 3416 // Use IR information if we did not strip anything. 3417 // TODO: track globally. 3418 bool CanBeNull; 3419 DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull); 3420 T.GlobalState.indicatePessimisticFixpoint(); 3421 } else { 3422 const DerefState &DS = static_cast<const DerefState &>(AA.getState()); 3423 DerefBytes = DS.DerefBytesState.getAssumed(); 3424 T.GlobalState &= DS.GlobalState; 3425 } 3426 3427 3428 // For now we do not try to "increase" dereferenceability due to negative 3429 // indices as we first have to come up with code to deal with loops and 3430 // for overflows of the dereferenceable bytes. 3431 int64_t OffsetSExt = Offset.getSExtValue(); 3432 if (OffsetSExt < 0) 3433 OffsetSExt = 0; 3434 3435 T.takeAssumedDerefBytesMinimum( 3436 std::max(int64_t(0), DerefBytes - OffsetSExt)); 3437 3438 if (this == &AA) { 3439 if (!Stripped) { 3440 // If nothing was stripped IR information is all we got. 3441 T.takeKnownDerefBytesMaximum( 3442 std::max(int64_t(0), DerefBytes - OffsetSExt)); 3443 T.indicatePessimisticFixpoint(); 3444 } else if (OffsetSExt > 0) { 3445 // If something was stripped but there is circular reasoning we look 3446 // for the offset. If it is positive we basically decrease the 3447 // dereferenceable bytes in a circluar loop now, which will simply 3448 // drive them down to the known value in a very slow way which we 3449 // can accelerate. 3450 T.indicatePessimisticFixpoint(); 3451 } 3452 } 3453 3454 return T.isValidState(); 3455 }; 3456 3457 DerefState T; 3458 if (!genericValueTraversal<AADereferenceable, DerefState>( 3459 A, getIRPosition(), *this, T, VisitValueCB, getCtxI())) 3460 return indicatePessimisticFixpoint(); 3461 3462 return clampStateAndIndicateChange(getState(), T); 3463 } 3464 3465 /// See AbstractAttribute::trackStatistics() 3466 void trackStatistics() const override { 3467 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) 3468 } 3469 }; 3470 3471 /// Dereferenceable attribute for a return value. 3472 struct AADereferenceableReturned final 3473 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { 3474 AADereferenceableReturned(const IRPosition &IRP, Attributor &A) 3475 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( 3476 IRP, A) {} 3477 3478 /// See AbstractAttribute::trackStatistics() 3479 void trackStatistics() const override { 3480 STATS_DECLTRACK_FNRET_ATTR(dereferenceable) 3481 } 3482 }; 3483 3484 /// Dereferenceable attribute for an argument 3485 struct AADereferenceableArgument final 3486 : AAArgumentFromCallSiteArguments<AADereferenceable, 3487 AADereferenceableImpl> { 3488 using Base = 3489 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; 3490 AADereferenceableArgument(const IRPosition &IRP, Attributor &A) 3491 : Base(IRP, A) {} 3492 3493 /// See AbstractAttribute::trackStatistics() 3494 void trackStatistics() const override { 3495 STATS_DECLTRACK_ARG_ATTR(dereferenceable) 3496 } 3497 }; 3498 3499 /// Dereferenceable attribute for a call site argument. 3500 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { 3501 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) 3502 : AADereferenceableFloating(IRP, A) {} 3503 3504 /// See AbstractAttribute::trackStatistics() 3505 void trackStatistics() const override { 3506 STATS_DECLTRACK_CSARG_ATTR(dereferenceable) 3507 } 3508 }; 3509 3510 /// Dereferenceable attribute deduction for a call site return value. 3511 struct AADereferenceableCallSiteReturned final 3512 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { 3513 using Base = 3514 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; 3515 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) 3516 : Base(IRP, A) {} 3517 3518 /// See AbstractAttribute::trackStatistics() 3519 void trackStatistics() const override { 3520 STATS_DECLTRACK_CS_ATTR(dereferenceable); 3521 } 3522 }; 3523 3524 // ------------------------ Align Argument Attribute ------------------------ 3525 3526 /// \p Ptr is accessed so we can get alignment information if the ABI requires 3527 /// the element type to be aligned. 3528 static MaybeAlign getKnownAlignmentFromAccessedPtr(const Value *Ptr, 3529 const DataLayout &DL) { 3530 MaybeAlign KnownAlignment = Ptr->getPointerAlignment(DL); 3531 Type *ElementTy = Ptr->getType()->getPointerElementType(); 3532 if (ElementTy->isSized()) 3533 KnownAlignment = max(KnownAlignment, DL.getABITypeAlign(ElementTy)); 3534 return KnownAlignment; 3535 } 3536 3537 static unsigned getKnownAlignForUse(Attributor &A, 3538 AbstractAttribute &QueryingAA, 3539 Value &AssociatedValue, const Use *U, 3540 const Instruction *I, bool &TrackUse) { 3541 // We need to follow common pointer manipulation uses to the accesses they 3542 // feed into. 3543 if (isa<CastInst>(I)) { 3544 // Follow all but ptr2int casts. 3545 TrackUse = !isa<PtrToIntInst>(I); 3546 return 0; 3547 } 3548 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 3549 if (GEP->hasAllConstantIndices()) { 3550 TrackUse = true; 3551 return 0; 3552 } 3553 } 3554 3555 MaybeAlign MA; 3556 if (const auto *CB = dyn_cast<CallBase>(I)) { 3557 if (CB->isBundleOperand(U) || CB->isCallee(U)) 3558 return 0; 3559 3560 unsigned ArgNo = CB->getArgOperandNo(U); 3561 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 3562 // As long as we only use known information there is no need to track 3563 // dependences here. 3564 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, 3565 /* TrackDependence */ false); 3566 MA = MaybeAlign(AlignAA.getKnownAlign()); 3567 } 3568 3569 const DataLayout &DL = A.getDataLayout(); 3570 const Value *UseV = U->get(); 3571 if (auto *SI = dyn_cast<StoreInst>(I)) { 3572 if (SI->getPointerOperand() == UseV) { 3573 if (unsigned SIAlign = SI->getAlignment()) 3574 MA = MaybeAlign(SIAlign); 3575 else 3576 MA = getKnownAlignmentFromAccessedPtr(UseV, DL); 3577 } 3578 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 3579 if (LI->getPointerOperand() == UseV) { 3580 if (unsigned LIAlign = LI->getAlignment()) 3581 MA = MaybeAlign(LIAlign); 3582 else 3583 MA = getKnownAlignmentFromAccessedPtr(UseV, DL); 3584 } 3585 } 3586 3587 if (!MA.hasValue() || MA <= 1) 3588 return 0; 3589 3590 unsigned Alignment = MA->value(); 3591 int64_t Offset; 3592 3593 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { 3594 if (Base == &AssociatedValue) { 3595 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 3596 // So we can say that the maximum power of two which is a divisor of 3597 // gcd(Offset, Alignment) is an alignment. 3598 3599 uint32_t gcd = 3600 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment); 3601 Alignment = llvm::PowerOf2Floor(gcd); 3602 } 3603 } 3604 3605 return Alignment; 3606 } 3607 3608 struct AAAlignImpl : AAAlign { 3609 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} 3610 3611 /// See AbstractAttribute::initialize(...). 3612 void initialize(Attributor &A) override { 3613 SmallVector<Attribute, 4> Attrs; 3614 getAttrs({Attribute::Alignment}, Attrs); 3615 for (const Attribute &Attr : Attrs) 3616 takeKnownMaximum(Attr.getValueAsInt()); 3617 3618 Value &V = getAssociatedValue(); 3619 // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int 3620 // use of the function pointer. This was caused by D73131. We want to 3621 // avoid this for function pointers especially because we iterate 3622 // their uses and int2ptr is not handled. It is not a correctness 3623 // problem though! 3624 if (!V.getType()->getPointerElementType()->isFunctionTy()) 3625 takeKnownMaximum( 3626 V.getPointerAlignment(A.getDataLayout()).valueOrOne().value()); 3627 3628 if (getIRPosition().isFnInterfaceKind() && 3629 (!getAnchorScope() || 3630 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { 3631 indicatePessimisticFixpoint(); 3632 return; 3633 } 3634 3635 if (Instruction *CtxI = getCtxI()) 3636 followUsesInMBEC(*this, A, getState(), *CtxI); 3637 } 3638 3639 /// See AbstractAttribute::manifest(...). 3640 ChangeStatus manifest(Attributor &A) override { 3641 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; 3642 3643 // Check for users that allow alignment annotations. 3644 Value &AssociatedValue = getAssociatedValue(); 3645 for (const Use &U : AssociatedValue.uses()) { 3646 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { 3647 if (SI->getPointerOperand() == &AssociatedValue) 3648 if (SI->getAlignment() < getAssumedAlign()) { 3649 STATS_DECLTRACK(AAAlign, Store, 3650 "Number of times alignment added to a store"); 3651 SI->setAlignment(Align(getAssumedAlign())); 3652 LoadStoreChanged = ChangeStatus::CHANGED; 3653 } 3654 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { 3655 if (LI->getPointerOperand() == &AssociatedValue) 3656 if (LI->getAlignment() < getAssumedAlign()) { 3657 LI->setAlignment(Align(getAssumedAlign())); 3658 STATS_DECLTRACK(AAAlign, Load, 3659 "Number of times alignment added to a load"); 3660 LoadStoreChanged = ChangeStatus::CHANGED; 3661 } 3662 } 3663 } 3664 3665 ChangeStatus Changed = AAAlign::manifest(A); 3666 3667 MaybeAlign InheritAlign = 3668 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 3669 if (InheritAlign.valueOrOne() >= getAssumedAlign()) 3670 return LoadStoreChanged; 3671 return Changed | LoadStoreChanged; 3672 } 3673 3674 // TODO: Provide a helper to determine the implied ABI alignment and check in 3675 // the existing manifest method and a new one for AAAlignImpl that value 3676 // to avoid making the alignment explicit if it did not improve. 3677 3678 /// See AbstractAttribute::getDeducedAttributes 3679 virtual void 3680 getDeducedAttributes(LLVMContext &Ctx, 3681 SmallVectorImpl<Attribute> &Attrs) const override { 3682 if (getAssumedAlign() > 1) 3683 Attrs.emplace_back( 3684 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); 3685 } 3686 3687 /// See followUsesInMBEC 3688 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 3689 AAAlign::StateType &State) { 3690 bool TrackUse = false; 3691 3692 unsigned int KnownAlign = 3693 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); 3694 State.takeKnownMaximum(KnownAlign); 3695 3696 return TrackUse; 3697 } 3698 3699 /// See AbstractAttribute::getAsStr(). 3700 const std::string getAsStr() const override { 3701 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + 3702 "-" + std::to_string(getAssumedAlign()) + ">") 3703 : "unknown-align"; 3704 } 3705 }; 3706 3707 /// Align attribute for a floating value. 3708 struct AAAlignFloating : AAAlignImpl { 3709 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} 3710 3711 /// See AbstractAttribute::updateImpl(...). 3712 ChangeStatus updateImpl(Attributor &A) override { 3713 const DataLayout &DL = A.getDataLayout(); 3714 3715 auto VisitValueCB = [&](Value &V, const Instruction *, 3716 AAAlign::StateType &T, bool Stripped) -> bool { 3717 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V)); 3718 if (!Stripped && this == &AA) { 3719 // Use only IR information if we did not strip anything. 3720 const MaybeAlign PA = V.getPointerAlignment(DL); 3721 T.takeKnownMaximum(PA ? PA->value() : 0); 3722 T.indicatePessimisticFixpoint(); 3723 } else { 3724 // Use abstract attribute information. 3725 const AAAlign::StateType &DS = 3726 static_cast<const AAAlign::StateType &>(AA.getState()); 3727 T ^= DS; 3728 } 3729 return T.isValidState(); 3730 }; 3731 3732 StateType T; 3733 if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T, 3734 VisitValueCB, getCtxI())) 3735 return indicatePessimisticFixpoint(); 3736 3737 // TODO: If we know we visited all incoming values, thus no are assumed 3738 // dead, we can take the known information from the state T. 3739 return clampStateAndIndicateChange(getState(), T); 3740 } 3741 3742 /// See AbstractAttribute::trackStatistics() 3743 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } 3744 }; 3745 3746 /// Align attribute for function return value. 3747 struct AAAlignReturned final 3748 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { 3749 AAAlignReturned(const IRPosition &IRP, Attributor &A) 3750 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP, A) {} 3751 3752 /// See AbstractAttribute::trackStatistics() 3753 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } 3754 }; 3755 3756 /// Align attribute for function argument. 3757 struct AAAlignArgument final 3758 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { 3759 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; 3760 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 3761 3762 /// See AbstractAttribute::manifest(...). 3763 ChangeStatus manifest(Attributor &A) override { 3764 // If the associated argument is involved in a must-tail call we give up 3765 // because we would need to keep the argument alignments of caller and 3766 // callee in-sync. Just does not seem worth the trouble right now. 3767 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) 3768 return ChangeStatus::UNCHANGED; 3769 return Base::manifest(A); 3770 } 3771 3772 /// See AbstractAttribute::trackStatistics() 3773 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } 3774 }; 3775 3776 struct AAAlignCallSiteArgument final : AAAlignFloating { 3777 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) 3778 : AAAlignFloating(IRP, A) {} 3779 3780 /// See AbstractAttribute::manifest(...). 3781 ChangeStatus manifest(Attributor &A) override { 3782 // If the associated argument is involved in a must-tail call we give up 3783 // because we would need to keep the argument alignments of caller and 3784 // callee in-sync. Just does not seem worth the trouble right now. 3785 if (Argument *Arg = getAssociatedArgument()) 3786 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) 3787 return ChangeStatus::UNCHANGED; 3788 ChangeStatus Changed = AAAlignImpl::manifest(A); 3789 MaybeAlign InheritAlign = 3790 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 3791 if (InheritAlign.valueOrOne() >= getAssumedAlign()) 3792 Changed = ChangeStatus::UNCHANGED; 3793 return Changed; 3794 } 3795 3796 /// See AbstractAttribute::updateImpl(Attributor &A). 3797 ChangeStatus updateImpl(Attributor &A) override { 3798 ChangeStatus Changed = AAAlignFloating::updateImpl(A); 3799 if (Argument *Arg = getAssociatedArgument()) { 3800 // We only take known information from the argument 3801 // so we do not need to track a dependence. 3802 const auto &ArgAlignAA = A.getAAFor<AAAlign>( 3803 *this, IRPosition::argument(*Arg), /* TrackDependence */ false); 3804 takeKnownMaximum(ArgAlignAA.getKnownAlign()); 3805 } 3806 return Changed; 3807 } 3808 3809 /// See AbstractAttribute::trackStatistics() 3810 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } 3811 }; 3812 3813 /// Align attribute deduction for a call site return value. 3814 struct AAAlignCallSiteReturned final 3815 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { 3816 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; 3817 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) 3818 : Base(IRP, A) {} 3819 3820 /// See AbstractAttribute::initialize(...). 3821 void initialize(Attributor &A) override { 3822 Base::initialize(A); 3823 Function *F = getAssociatedFunction(); 3824 if (!F) 3825 indicatePessimisticFixpoint(); 3826 } 3827 3828 /// See AbstractAttribute::trackStatistics() 3829 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } 3830 }; 3831 3832 /// ------------------ Function No-Return Attribute ---------------------------- 3833 struct AANoReturnImpl : public AANoReturn { 3834 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} 3835 3836 /// See AbstractAttribute::initialize(...). 3837 void initialize(Attributor &A) override { 3838 AANoReturn::initialize(A); 3839 Function *F = getAssociatedFunction(); 3840 if (!F) 3841 indicatePessimisticFixpoint(); 3842 } 3843 3844 /// See AbstractAttribute::getAsStr(). 3845 const std::string getAsStr() const override { 3846 return getAssumed() ? "noreturn" : "may-return"; 3847 } 3848 3849 /// See AbstractAttribute::updateImpl(Attributor &A). 3850 virtual ChangeStatus updateImpl(Attributor &A) override { 3851 auto CheckForNoReturn = [](Instruction &) { return false; }; 3852 if (!A.checkForAllInstructions(CheckForNoReturn, *this, 3853 {(unsigned)Instruction::Ret})) 3854 return indicatePessimisticFixpoint(); 3855 return ChangeStatus::UNCHANGED; 3856 } 3857 }; 3858 3859 struct AANoReturnFunction final : AANoReturnImpl { 3860 AANoReturnFunction(const IRPosition &IRP, Attributor &A) 3861 : AANoReturnImpl(IRP, A) {} 3862 3863 /// See AbstractAttribute::trackStatistics() 3864 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } 3865 }; 3866 3867 /// NoReturn attribute deduction for a call sites. 3868 struct AANoReturnCallSite final : AANoReturnImpl { 3869 AANoReturnCallSite(const IRPosition &IRP, Attributor &A) 3870 : AANoReturnImpl(IRP, A) {} 3871 3872 /// See AbstractAttribute::updateImpl(...). 3873 ChangeStatus updateImpl(Attributor &A) override { 3874 // TODO: Once we have call site specific value information we can provide 3875 // call site specific liveness information and then it makes 3876 // sense to specialize attributes for call sites arguments instead of 3877 // redirecting requests to the callee argument. 3878 Function *F = getAssociatedFunction(); 3879 const IRPosition &FnPos = IRPosition::function(*F); 3880 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos); 3881 return clampStateAndIndicateChange( 3882 getState(), 3883 static_cast<const AANoReturn::StateType &>(FnAA.getState())); 3884 } 3885 3886 /// See AbstractAttribute::trackStatistics() 3887 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } 3888 }; 3889 3890 /// ----------------------- Variable Capturing --------------------------------- 3891 3892 /// A class to hold the state of for no-capture attributes. 3893 struct AANoCaptureImpl : public AANoCapture { 3894 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} 3895 3896 /// See AbstractAttribute::initialize(...). 3897 void initialize(Attributor &A) override { 3898 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { 3899 indicateOptimisticFixpoint(); 3900 return; 3901 } 3902 Function *AnchorScope = getAnchorScope(); 3903 if (isFnInterfaceKind() && 3904 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { 3905 indicatePessimisticFixpoint(); 3906 return; 3907 } 3908 3909 // You cannot "capture" null in the default address space. 3910 if (isa<ConstantPointerNull>(getAssociatedValue()) && 3911 getAssociatedValue().getType()->getPointerAddressSpace() == 0) { 3912 indicateOptimisticFixpoint(); 3913 return; 3914 } 3915 3916 const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope; 3917 3918 // Check what state the associated function can actually capture. 3919 if (F) 3920 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 3921 else 3922 indicatePessimisticFixpoint(); 3923 } 3924 3925 /// See AbstractAttribute::updateImpl(...). 3926 ChangeStatus updateImpl(Attributor &A) override; 3927 3928 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). 3929 virtual void 3930 getDeducedAttributes(LLVMContext &Ctx, 3931 SmallVectorImpl<Attribute> &Attrs) const override { 3932 if (!isAssumedNoCaptureMaybeReturned()) 3933 return; 3934 3935 if (getArgNo() >= 0) { 3936 if (isAssumedNoCapture()) 3937 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); 3938 else if (ManifestInternal) 3939 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); 3940 } 3941 } 3942 3943 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known 3944 /// depending on the ability of the function associated with \p IRP to capture 3945 /// state in memory and through "returning/throwing", respectively. 3946 static void determineFunctionCaptureCapabilities(const IRPosition &IRP, 3947 const Function &F, 3948 BitIntegerState &State) { 3949 // TODO: Once we have memory behavior attributes we should use them here. 3950 3951 // If we know we cannot communicate or write to memory, we do not care about 3952 // ptr2int anymore. 3953 if (F.onlyReadsMemory() && F.doesNotThrow() && 3954 F.getReturnType()->isVoidTy()) { 3955 State.addKnownBits(NO_CAPTURE); 3956 return; 3957 } 3958 3959 // A function cannot capture state in memory if it only reads memory, it can 3960 // however return/throw state and the state might be influenced by the 3961 // pointer value, e.g., loading from a returned pointer might reveal a bit. 3962 if (F.onlyReadsMemory()) 3963 State.addKnownBits(NOT_CAPTURED_IN_MEM); 3964 3965 // A function cannot communicate state back if it does not through 3966 // exceptions and doesn not return values. 3967 if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) 3968 State.addKnownBits(NOT_CAPTURED_IN_RET); 3969 3970 // Check existing "returned" attributes. 3971 int ArgNo = IRP.getArgNo(); 3972 if (F.doesNotThrow() && ArgNo >= 0) { 3973 for (unsigned u = 0, e = F.arg_size(); u < e; ++u) 3974 if (F.hasParamAttribute(u, Attribute::Returned)) { 3975 if (u == unsigned(ArgNo)) 3976 State.removeAssumedBits(NOT_CAPTURED_IN_RET); 3977 else if (F.onlyReadsMemory()) 3978 State.addKnownBits(NO_CAPTURE); 3979 else 3980 State.addKnownBits(NOT_CAPTURED_IN_RET); 3981 break; 3982 } 3983 } 3984 } 3985 3986 /// See AbstractState::getAsStr(). 3987 const std::string getAsStr() const override { 3988 if (isKnownNoCapture()) 3989 return "known not-captured"; 3990 if (isAssumedNoCapture()) 3991 return "assumed not-captured"; 3992 if (isKnownNoCaptureMaybeReturned()) 3993 return "known not-captured-maybe-returned"; 3994 if (isAssumedNoCaptureMaybeReturned()) 3995 return "assumed not-captured-maybe-returned"; 3996 return "assumed-captured"; 3997 } 3998 }; 3999 4000 /// Attributor-aware capture tracker. 4001 struct AACaptureUseTracker final : public CaptureTracker { 4002 4003 /// Create a capture tracker that can lookup in-flight abstract attributes 4004 /// through the Attributor \p A. 4005 /// 4006 /// If a use leads to a potential capture, \p CapturedInMemory is set and the 4007 /// search is stopped. If a use leads to a return instruction, 4008 /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed. 4009 /// If a use leads to a ptr2int which may capture the value, 4010 /// \p CapturedInInteger is set. If a use is found that is currently assumed 4011 /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies 4012 /// set. All values in \p PotentialCopies are later tracked as well. For every 4013 /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0, 4014 /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger 4015 /// conservatively set to true. 4016 AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA, 4017 const AAIsDead &IsDeadAA, AANoCapture::StateType &State, 4018 SmallVectorImpl<const Value *> &PotentialCopies, 4019 unsigned &RemainingUsesToExplore) 4020 : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State), 4021 PotentialCopies(PotentialCopies), 4022 RemainingUsesToExplore(RemainingUsesToExplore) {} 4023 4024 /// Determine if \p V maybe captured. *Also updates the state!* 4025 bool valueMayBeCaptured(const Value *V) { 4026 if (V->getType()->isPointerTy()) { 4027 PointerMayBeCaptured(V, this); 4028 } else { 4029 State.indicatePessimisticFixpoint(); 4030 } 4031 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4032 } 4033 4034 /// See CaptureTracker::tooManyUses(). 4035 void tooManyUses() override { 4036 State.removeAssumedBits(AANoCapture::NO_CAPTURE); 4037 } 4038 4039 bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override { 4040 if (CaptureTracker::isDereferenceableOrNull(O, DL)) 4041 return true; 4042 const auto &DerefAA = A.getAAFor<AADereferenceable>( 4043 NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true, 4044 DepClassTy::OPTIONAL); 4045 return DerefAA.getAssumedDereferenceableBytes(); 4046 } 4047 4048 /// See CaptureTracker::captured(...). 4049 bool captured(const Use *U) override { 4050 Instruction *UInst = cast<Instruction>(U->getUser()); 4051 LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst 4052 << "\n"); 4053 4054 // Because we may reuse the tracker multiple times we keep track of the 4055 // number of explored uses ourselves as well. 4056 if (RemainingUsesToExplore-- == 0) { 4057 LLVM_DEBUG(dbgs() << " - too many uses to explore!\n"); 4058 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4059 /* Return */ true); 4060 } 4061 4062 // Deal with ptr2int by following uses. 4063 if (isa<PtrToIntInst>(UInst)) { 4064 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n"); 4065 return valueMayBeCaptured(UInst); 4066 } 4067 4068 // Explicitly catch return instructions. 4069 if (isa<ReturnInst>(UInst)) 4070 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4071 /* Return */ true); 4072 4073 // For now we only use special logic for call sites. However, the tracker 4074 // itself knows about a lot of other non-capturing cases already. 4075 auto *CB = dyn_cast<CallBase>(UInst); 4076 if (!CB || !CB->isArgOperand(U)) 4077 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4078 /* Return */ true); 4079 4080 unsigned ArgNo = CB->getArgOperandNo(U); 4081 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); 4082 // If we have a abstract no-capture attribute for the argument we can use 4083 // it to justify a non-capture attribute here. This allows recursion! 4084 auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos); 4085 if (ArgNoCaptureAA.isAssumedNoCapture()) 4086 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4087 /* Return */ false); 4088 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 4089 addPotentialCopy(*CB); 4090 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4091 /* Return */ false); 4092 } 4093 4094 // Lastly, we could not find a reason no-capture can be assumed so we don't. 4095 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4096 /* Return */ true); 4097 } 4098 4099 /// Register \p CS as potential copy of the value we are checking. 4100 void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); } 4101 4102 /// See CaptureTracker::shouldExplore(...). 4103 bool shouldExplore(const Use *U) override { 4104 // Check liveness and ignore droppable users. 4105 return !U->getUser()->isDroppable() && 4106 !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA); 4107 } 4108 4109 /// Update the state according to \p CapturedInMem, \p CapturedInInt, and 4110 /// \p CapturedInRet, then return the appropriate value for use in the 4111 /// CaptureTracker::captured() interface. 4112 bool isCapturedIn(bool CapturedInMem, bool CapturedInInt, 4113 bool CapturedInRet) { 4114 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " 4115 << CapturedInInt << "|Ret " << CapturedInRet << "]\n"); 4116 if (CapturedInMem) 4117 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); 4118 if (CapturedInInt) 4119 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); 4120 if (CapturedInRet) 4121 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); 4122 return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4123 } 4124 4125 private: 4126 /// The attributor providing in-flight abstract attributes. 4127 Attributor &A; 4128 4129 /// The abstract attribute currently updated. 4130 AANoCapture &NoCaptureAA; 4131 4132 /// The abstract liveness state. 4133 const AAIsDead &IsDeadAA; 4134 4135 /// The state currently updated. 4136 AANoCapture::StateType &State; 4137 4138 /// Set of potential copies of the tracked value. 4139 SmallVectorImpl<const Value *> &PotentialCopies; 4140 4141 /// Global counter to limit the number of explored uses. 4142 unsigned &RemainingUsesToExplore; 4143 }; 4144 4145 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { 4146 const IRPosition &IRP = getIRPosition(); 4147 const Value *V = 4148 getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue(); 4149 if (!V) 4150 return indicatePessimisticFixpoint(); 4151 4152 const Function *F = 4153 getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 4154 assert(F && "Expected a function!"); 4155 const IRPosition &FnPos = IRPosition::function(*F); 4156 const auto &IsDeadAA = 4157 A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false); 4158 4159 AANoCapture::StateType T; 4160 4161 // Readonly means we cannot capture through memory. 4162 const auto &FnMemAA = 4163 A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false); 4164 if (FnMemAA.isAssumedReadOnly()) { 4165 T.addKnownBits(NOT_CAPTURED_IN_MEM); 4166 if (FnMemAA.isKnownReadOnly()) 4167 addKnownBits(NOT_CAPTURED_IN_MEM); 4168 else 4169 A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL); 4170 } 4171 4172 // Make sure all returned values are different than the underlying value. 4173 // TODO: we could do this in a more sophisticated way inside 4174 // AAReturnedValues, e.g., track all values that escape through returns 4175 // directly somehow. 4176 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { 4177 bool SeenConstant = false; 4178 for (auto &It : RVAA.returned_values()) { 4179 if (isa<Constant>(It.first)) { 4180 if (SeenConstant) 4181 return false; 4182 SeenConstant = true; 4183 } else if (!isa<Argument>(It.first) || 4184 It.first == getAssociatedArgument()) 4185 return false; 4186 } 4187 return true; 4188 }; 4189 4190 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( 4191 *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 4192 if (NoUnwindAA.isAssumedNoUnwind()) { 4193 bool IsVoidTy = F->getReturnType()->isVoidTy(); 4194 const AAReturnedValues *RVAA = 4195 IsVoidTy ? nullptr 4196 : &A.getAAFor<AAReturnedValues>(*this, FnPos, 4197 /* TrackDependence */ true, 4198 DepClassTy::OPTIONAL); 4199 if (IsVoidTy || CheckReturnedArgs(*RVAA)) { 4200 T.addKnownBits(NOT_CAPTURED_IN_RET); 4201 if (T.isKnown(NOT_CAPTURED_IN_MEM)) 4202 return ChangeStatus::UNCHANGED; 4203 if (NoUnwindAA.isKnownNoUnwind() && 4204 (IsVoidTy || RVAA->getState().isAtFixpoint())) { 4205 addKnownBits(NOT_CAPTURED_IN_RET); 4206 if (isKnown(NOT_CAPTURED_IN_MEM)) 4207 return indicateOptimisticFixpoint(); 4208 } 4209 } 4210 } 4211 4212 // Use the CaptureTracker interface and logic with the specialized tracker, 4213 // defined in AACaptureUseTracker, that can look at in-flight abstract 4214 // attributes and directly updates the assumed state. 4215 SmallVector<const Value *, 4> PotentialCopies; 4216 unsigned RemainingUsesToExplore = 4217 getDefaultMaxUsesToExploreForCaptureTracking(); 4218 AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies, 4219 RemainingUsesToExplore); 4220 4221 // Check all potential copies of the associated value until we can assume 4222 // none will be captured or we have to assume at least one might be. 4223 unsigned Idx = 0; 4224 PotentialCopies.push_back(V); 4225 while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size()) 4226 Tracker.valueMayBeCaptured(PotentialCopies[Idx++]); 4227 4228 AANoCapture::StateType &S = getState(); 4229 auto Assumed = S.getAssumed(); 4230 S.intersectAssumedBits(T.getAssumed()); 4231 if (!isAssumedNoCaptureMaybeReturned()) 4232 return indicatePessimisticFixpoint(); 4233 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 4234 : ChangeStatus::CHANGED; 4235 } 4236 4237 /// NoCapture attribute for function arguments. 4238 struct AANoCaptureArgument final : AANoCaptureImpl { 4239 AANoCaptureArgument(const IRPosition &IRP, Attributor &A) 4240 : AANoCaptureImpl(IRP, A) {} 4241 4242 /// See AbstractAttribute::trackStatistics() 4243 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } 4244 }; 4245 4246 /// NoCapture attribute for call site arguments. 4247 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { 4248 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) 4249 : AANoCaptureImpl(IRP, A) {} 4250 4251 /// See AbstractAttribute::initialize(...). 4252 void initialize(Attributor &A) override { 4253 if (Argument *Arg = getAssociatedArgument()) 4254 if (Arg->hasByValAttr()) 4255 indicateOptimisticFixpoint(); 4256 AANoCaptureImpl::initialize(A); 4257 } 4258 4259 /// See AbstractAttribute::updateImpl(...). 4260 ChangeStatus updateImpl(Attributor &A) override { 4261 // TODO: Once we have call site specific value information we can provide 4262 // call site specific liveness information and then it makes 4263 // sense to specialize attributes for call sites arguments instead of 4264 // redirecting requests to the callee argument. 4265 Argument *Arg = getAssociatedArgument(); 4266 if (!Arg) 4267 return indicatePessimisticFixpoint(); 4268 const IRPosition &ArgPos = IRPosition::argument(*Arg); 4269 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos); 4270 return clampStateAndIndicateChange( 4271 getState(), 4272 static_cast<const AANoCapture::StateType &>(ArgAA.getState())); 4273 } 4274 4275 /// See AbstractAttribute::trackStatistics() 4276 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)}; 4277 }; 4278 4279 /// NoCapture attribute for floating values. 4280 struct AANoCaptureFloating final : AANoCaptureImpl { 4281 AANoCaptureFloating(const IRPosition &IRP, Attributor &A) 4282 : AANoCaptureImpl(IRP, A) {} 4283 4284 /// See AbstractAttribute::trackStatistics() 4285 void trackStatistics() const override { 4286 STATS_DECLTRACK_FLOATING_ATTR(nocapture) 4287 } 4288 }; 4289 4290 /// NoCapture attribute for function return value. 4291 struct AANoCaptureReturned final : AANoCaptureImpl { 4292 AANoCaptureReturned(const IRPosition &IRP, Attributor &A) 4293 : AANoCaptureImpl(IRP, A) { 4294 llvm_unreachable("NoCapture is not applicable to function returns!"); 4295 } 4296 4297 /// See AbstractAttribute::initialize(...). 4298 void initialize(Attributor &A) override { 4299 llvm_unreachable("NoCapture is not applicable to function returns!"); 4300 } 4301 4302 /// See AbstractAttribute::updateImpl(...). 4303 ChangeStatus updateImpl(Attributor &A) override { 4304 llvm_unreachable("NoCapture is not applicable to function returns!"); 4305 } 4306 4307 /// See AbstractAttribute::trackStatistics() 4308 void trackStatistics() const override {} 4309 }; 4310 4311 /// NoCapture attribute deduction for a call site return value. 4312 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { 4313 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) 4314 : AANoCaptureImpl(IRP, A) {} 4315 4316 /// See AbstractAttribute::trackStatistics() 4317 void trackStatistics() const override { 4318 STATS_DECLTRACK_CSRET_ATTR(nocapture) 4319 } 4320 }; 4321 4322 /// ------------------ Value Simplify Attribute ---------------------------- 4323 struct AAValueSimplifyImpl : AAValueSimplify { 4324 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) 4325 : AAValueSimplify(IRP, A) {} 4326 4327 /// See AbstractAttribute::initialize(...). 4328 void initialize(Attributor &A) override { 4329 if (getAssociatedValue().getType()->isVoidTy()) 4330 indicatePessimisticFixpoint(); 4331 } 4332 4333 /// See AbstractAttribute::getAsStr(). 4334 const std::string getAsStr() const override { 4335 return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple") 4336 : "not-simple"; 4337 } 4338 4339 /// See AbstractAttribute::trackStatistics() 4340 void trackStatistics() const override {} 4341 4342 /// See AAValueSimplify::getAssumedSimplifiedValue() 4343 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 4344 if (!getAssumed()) 4345 return const_cast<Value *>(&getAssociatedValue()); 4346 return SimplifiedAssociatedValue; 4347 } 4348 4349 /// Helper function for querying AAValueSimplify and updating candicate. 4350 /// \param QueryingValue Value trying to unify with SimplifiedValue 4351 /// \param AccumulatedSimplifiedValue Current simplification result. 4352 static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, 4353 Value &QueryingValue, 4354 Optional<Value *> &AccumulatedSimplifiedValue) { 4355 // FIXME: Add a typecast support. 4356 4357 auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>( 4358 QueryingAA, IRPosition::value(QueryingValue)); 4359 4360 Optional<Value *> QueryingValueSimplified = 4361 ValueSimplifyAA.getAssumedSimplifiedValue(A); 4362 4363 if (!QueryingValueSimplified.hasValue()) 4364 return true; 4365 4366 if (!QueryingValueSimplified.getValue()) 4367 return false; 4368 4369 Value &QueryingValueSimplifiedUnwrapped = 4370 *QueryingValueSimplified.getValue(); 4371 4372 if (AccumulatedSimplifiedValue.hasValue() && 4373 !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) && 4374 !isa<UndefValue>(QueryingValueSimplifiedUnwrapped)) 4375 return AccumulatedSimplifiedValue == QueryingValueSimplified; 4376 if (AccumulatedSimplifiedValue.hasValue() && 4377 isa<UndefValue>(QueryingValueSimplifiedUnwrapped)) 4378 return true; 4379 4380 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue 4381 << " is assumed to be " 4382 << QueryingValueSimplifiedUnwrapped << "\n"); 4383 4384 AccumulatedSimplifiedValue = QueryingValueSimplified; 4385 return true; 4386 } 4387 4388 bool askSimplifiedValueForAAValueConstantRange(Attributor &A) { 4389 if (!getAssociatedValue().getType()->isIntegerTy()) 4390 return false; 4391 4392 const auto &ValueConstantRangeAA = 4393 A.getAAFor<AAValueConstantRange>(*this, getIRPosition()); 4394 4395 Optional<ConstantInt *> COpt = 4396 ValueConstantRangeAA.getAssumedConstantInt(A); 4397 if (COpt.hasValue()) { 4398 if (auto *C = COpt.getValue()) 4399 SimplifiedAssociatedValue = C; 4400 else 4401 return false; 4402 } else { 4403 SimplifiedAssociatedValue = llvm::None; 4404 } 4405 return true; 4406 } 4407 4408 /// See AbstractAttribute::manifest(...). 4409 ChangeStatus manifest(Attributor &A) override { 4410 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4411 4412 if (SimplifiedAssociatedValue.hasValue() && 4413 !SimplifiedAssociatedValue.getValue()) 4414 return Changed; 4415 4416 Value &V = getAssociatedValue(); 4417 auto *C = SimplifiedAssociatedValue.hasValue() 4418 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) 4419 : UndefValue::get(V.getType()); 4420 if (C) { 4421 // We can replace the AssociatedValue with the constant. 4422 if (!V.user_empty() && &V != C && V.getType() == C->getType()) { 4423 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C 4424 << " :: " << *this << "\n"); 4425 if (A.changeValueAfterManifest(V, *C)) 4426 Changed = ChangeStatus::CHANGED; 4427 } 4428 } 4429 4430 return Changed | AAValueSimplify::manifest(A); 4431 } 4432 4433 /// See AbstractState::indicatePessimisticFixpoint(...). 4434 ChangeStatus indicatePessimisticFixpoint() override { 4435 // NOTE: Associated value will be returned in a pessimistic fixpoint and is 4436 // regarded as known. That's why`indicateOptimisticFixpoint` is called. 4437 SimplifiedAssociatedValue = &getAssociatedValue(); 4438 indicateOptimisticFixpoint(); 4439 return ChangeStatus::CHANGED; 4440 } 4441 4442 protected: 4443 // An assumed simplified value. Initially, it is set to Optional::None, which 4444 // means that the value is not clear under current assumption. If in the 4445 // pessimistic state, getAssumedSimplifiedValue doesn't return this value but 4446 // returns orignal associated value. 4447 Optional<Value *> SimplifiedAssociatedValue; 4448 }; 4449 4450 struct AAValueSimplifyArgument final : AAValueSimplifyImpl { 4451 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) 4452 : AAValueSimplifyImpl(IRP, A) {} 4453 4454 void initialize(Attributor &A) override { 4455 AAValueSimplifyImpl::initialize(A); 4456 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) 4457 indicatePessimisticFixpoint(); 4458 if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest}, 4459 /* IgnoreSubsumingPositions */ true)) 4460 indicatePessimisticFixpoint(); 4461 4462 // FIXME: This is a hack to prevent us from propagating function poiner in 4463 // the new pass manager CGSCC pass as it creates call edges the 4464 // CallGraphUpdater cannot handle yet. 4465 Value &V = getAssociatedValue(); 4466 if (V.getType()->isPointerTy() && 4467 V.getType()->getPointerElementType()->isFunctionTy() && 4468 !A.isModulePass()) 4469 indicatePessimisticFixpoint(); 4470 } 4471 4472 /// See AbstractAttribute::updateImpl(...). 4473 ChangeStatus updateImpl(Attributor &A) override { 4474 // Byval is only replacable if it is readonly otherwise we would write into 4475 // the replaced value and not the copy that byval creates implicitly. 4476 Argument *Arg = getAssociatedArgument(); 4477 if (Arg->hasByValAttr()) { 4478 // TODO: We probably need to verify synchronization is not an issue, e.g., 4479 // there is no race by not copying a constant byval. 4480 const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition()); 4481 if (!MemAA.isAssumedReadOnly()) 4482 return indicatePessimisticFixpoint(); 4483 } 4484 4485 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4486 4487 auto PredForCallSite = [&](AbstractCallSite ACS) { 4488 const IRPosition &ACSArgPos = 4489 IRPosition::callsite_argument(ACS, getArgNo()); 4490 // Check if a coresponding argument was found or if it is on not 4491 // associated (which can happen for callback calls). 4492 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 4493 return false; 4494 4495 // We can only propagate thread independent values through callbacks. 4496 // This is different to direct/indirect call sites because for them we 4497 // know the thread executing the caller and callee is the same. For 4498 // callbacks this is not guaranteed, thus a thread dependent value could 4499 // be different for the caller and callee, making it invalid to propagate. 4500 Value &ArgOp = ACSArgPos.getAssociatedValue(); 4501 if (ACS.isCallbackCall()) 4502 if (auto *C = dyn_cast<Constant>(&ArgOp)) 4503 if (C->isThreadDependent()) 4504 return false; 4505 return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue); 4506 }; 4507 4508 bool AllCallSitesKnown; 4509 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 4510 AllCallSitesKnown)) 4511 if (!askSimplifiedValueForAAValueConstantRange(A)) 4512 return indicatePessimisticFixpoint(); 4513 4514 // If a candicate was found in this update, return CHANGED. 4515 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4516 ? ChangeStatus::UNCHANGED 4517 : ChangeStatus ::CHANGED; 4518 } 4519 4520 /// See AbstractAttribute::trackStatistics() 4521 void trackStatistics() const override { 4522 STATS_DECLTRACK_ARG_ATTR(value_simplify) 4523 } 4524 }; 4525 4526 struct AAValueSimplifyReturned : AAValueSimplifyImpl { 4527 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) 4528 : AAValueSimplifyImpl(IRP, A) {} 4529 4530 /// See AbstractAttribute::updateImpl(...). 4531 ChangeStatus updateImpl(Attributor &A) override { 4532 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4533 4534 auto PredForReturned = [&](Value &V) { 4535 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue); 4536 }; 4537 4538 if (!A.checkForAllReturnedValues(PredForReturned, *this)) 4539 if (!askSimplifiedValueForAAValueConstantRange(A)) 4540 return indicatePessimisticFixpoint(); 4541 4542 // If a candicate was found in this update, return CHANGED. 4543 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4544 ? ChangeStatus::UNCHANGED 4545 : ChangeStatus ::CHANGED; 4546 } 4547 4548 ChangeStatus manifest(Attributor &A) override { 4549 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4550 4551 if (SimplifiedAssociatedValue.hasValue() && 4552 !SimplifiedAssociatedValue.getValue()) 4553 return Changed; 4554 4555 Value &V = getAssociatedValue(); 4556 auto *C = SimplifiedAssociatedValue.hasValue() 4557 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) 4558 : UndefValue::get(V.getType()); 4559 if (C) { 4560 auto PredForReturned = 4561 [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 4562 // We can replace the AssociatedValue with the constant. 4563 if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V)) 4564 return true; 4565 4566 for (ReturnInst *RI : RetInsts) { 4567 if (RI->getFunction() != getAnchorScope()) 4568 continue; 4569 auto *RC = C; 4570 if (RC->getType() != RI->getReturnValue()->getType()) 4571 RC = ConstantExpr::getBitCast(RC, 4572 RI->getReturnValue()->getType()); 4573 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC 4574 << " in " << *RI << " :: " << *this << "\n"); 4575 if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC)) 4576 Changed = ChangeStatus::CHANGED; 4577 } 4578 return true; 4579 }; 4580 A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this); 4581 } 4582 4583 return Changed | AAValueSimplify::manifest(A); 4584 } 4585 4586 /// See AbstractAttribute::trackStatistics() 4587 void trackStatistics() const override { 4588 STATS_DECLTRACK_FNRET_ATTR(value_simplify) 4589 } 4590 }; 4591 4592 struct AAValueSimplifyFloating : AAValueSimplifyImpl { 4593 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) 4594 : AAValueSimplifyImpl(IRP, A) {} 4595 4596 /// See AbstractAttribute::initialize(...). 4597 void initialize(Attributor &A) override { 4598 // FIXME: This might have exposed a SCC iterator update bug in the old PM. 4599 // Needs investigation. 4600 // AAValueSimplifyImpl::initialize(A); 4601 Value &V = getAnchorValue(); 4602 4603 // TODO: add other stuffs 4604 if (isa<Constant>(V)) 4605 indicatePessimisticFixpoint(); 4606 } 4607 4608 /// See AbstractAttribute::updateImpl(...). 4609 ChangeStatus updateImpl(Attributor &A) override { 4610 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4611 4612 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &, 4613 bool Stripped) -> bool { 4614 auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V)); 4615 if (!Stripped && this == &AA) { 4616 // TODO: Look the instruction and check recursively. 4617 4618 LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V 4619 << "\n"); 4620 return false; 4621 } 4622 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue); 4623 }; 4624 4625 bool Dummy = false; 4626 if (!genericValueTraversal<AAValueSimplify, bool>( 4627 A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(), 4628 /* UseValueSimplify */ false)) 4629 if (!askSimplifiedValueForAAValueConstantRange(A)) 4630 return indicatePessimisticFixpoint(); 4631 4632 // If a candicate was found in this update, return CHANGED. 4633 4634 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4635 ? ChangeStatus::UNCHANGED 4636 : ChangeStatus ::CHANGED; 4637 } 4638 4639 /// See AbstractAttribute::trackStatistics() 4640 void trackStatistics() const override { 4641 STATS_DECLTRACK_FLOATING_ATTR(value_simplify) 4642 } 4643 }; 4644 4645 struct AAValueSimplifyFunction : AAValueSimplifyImpl { 4646 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) 4647 : AAValueSimplifyImpl(IRP, A) {} 4648 4649 /// See AbstractAttribute::initialize(...). 4650 void initialize(Attributor &A) override { 4651 SimplifiedAssociatedValue = &getAnchorValue(); 4652 indicateOptimisticFixpoint(); 4653 } 4654 /// See AbstractAttribute::initialize(...). 4655 ChangeStatus updateImpl(Attributor &A) override { 4656 llvm_unreachable( 4657 "AAValueSimplify(Function|CallSite)::updateImpl will not be called"); 4658 } 4659 /// See AbstractAttribute::trackStatistics() 4660 void trackStatistics() const override { 4661 STATS_DECLTRACK_FN_ATTR(value_simplify) 4662 } 4663 }; 4664 4665 struct AAValueSimplifyCallSite : AAValueSimplifyFunction { 4666 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) 4667 : AAValueSimplifyFunction(IRP, A) {} 4668 /// See AbstractAttribute::trackStatistics() 4669 void trackStatistics() const override { 4670 STATS_DECLTRACK_CS_ATTR(value_simplify) 4671 } 4672 }; 4673 4674 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned { 4675 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) 4676 : AAValueSimplifyReturned(IRP, A) {} 4677 4678 /// See AbstractAttribute::manifest(...). 4679 ChangeStatus manifest(Attributor &A) override { 4680 return AAValueSimplifyImpl::manifest(A); 4681 } 4682 4683 void trackStatistics() const override { 4684 STATS_DECLTRACK_CSRET_ATTR(value_simplify) 4685 } 4686 }; 4687 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { 4688 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) 4689 : AAValueSimplifyFloating(IRP, A) {} 4690 4691 void trackStatistics() const override { 4692 STATS_DECLTRACK_CSARG_ATTR(value_simplify) 4693 } 4694 }; 4695 4696 /// ----------------------- Heap-To-Stack Conversion --------------------------- 4697 struct AAHeapToStackImpl : public AAHeapToStack { 4698 AAHeapToStackImpl(const IRPosition &IRP, Attributor &A) 4699 : AAHeapToStack(IRP, A) {} 4700 4701 const std::string getAsStr() const override { 4702 return "[H2S] Mallocs: " + std::to_string(MallocCalls.size()); 4703 } 4704 4705 ChangeStatus manifest(Attributor &A) override { 4706 assert(getState().isValidState() && 4707 "Attempted to manifest an invalid state!"); 4708 4709 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 4710 Function *F = getAnchorScope(); 4711 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 4712 4713 for (Instruction *MallocCall : MallocCalls) { 4714 // This malloc cannot be replaced. 4715 if (BadMallocCalls.count(MallocCall)) 4716 continue; 4717 4718 for (Instruction *FreeCall : FreesForMalloc[MallocCall]) { 4719 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); 4720 A.deleteAfterManifest(*FreeCall); 4721 HasChanged = ChangeStatus::CHANGED; 4722 } 4723 4724 LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall 4725 << "\n"); 4726 4727 MaybeAlign Alignment; 4728 Constant *Size; 4729 if (isCallocLikeFn(MallocCall, TLI)) { 4730 auto *Num = cast<ConstantInt>(MallocCall->getOperand(0)); 4731 auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1)); 4732 APInt TotalSize = SizeT->getValue() * Num->getValue(); 4733 Size = 4734 ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize); 4735 } else if (isAlignedAllocLikeFn(MallocCall, TLI)) { 4736 Size = cast<ConstantInt>(MallocCall->getOperand(1)); 4737 Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0)) 4738 ->getValue() 4739 .getZExtValue()); 4740 } else { 4741 Size = cast<ConstantInt>(MallocCall->getOperand(0)); 4742 } 4743 4744 unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace(); 4745 Instruction *AI = 4746 new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment, 4747 "", MallocCall->getNextNode()); 4748 4749 if (AI->getType() != MallocCall->getType()) 4750 AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc", 4751 AI->getNextNode()); 4752 4753 A.changeValueAfterManifest(*MallocCall, *AI); 4754 4755 if (auto *II = dyn_cast<InvokeInst>(MallocCall)) { 4756 auto *NBB = II->getNormalDest(); 4757 BranchInst::Create(NBB, MallocCall->getParent()); 4758 A.deleteAfterManifest(*MallocCall); 4759 } else { 4760 A.deleteAfterManifest(*MallocCall); 4761 } 4762 4763 // Zero out the allocated memory if it was a calloc. 4764 if (isCallocLikeFn(MallocCall, TLI)) { 4765 auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc", 4766 AI->getNextNode()); 4767 Value *Ops[] = { 4768 BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size, 4769 ConstantInt::get(Type::getInt1Ty(F->getContext()), false)}; 4770 4771 Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()}; 4772 Module *M = F->getParent(); 4773 Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys); 4774 CallInst::Create(Fn, Ops, "", BI->getNextNode()); 4775 } 4776 HasChanged = ChangeStatus::CHANGED; 4777 } 4778 4779 return HasChanged; 4780 } 4781 4782 /// Collection of all malloc calls in a function. 4783 SmallSetVector<Instruction *, 4> MallocCalls; 4784 4785 /// Collection of malloc calls that cannot be converted. 4786 DenseSet<const Instruction *> BadMallocCalls; 4787 4788 /// A map for each malloc call to the set of associated free calls. 4789 DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc; 4790 4791 ChangeStatus updateImpl(Attributor &A) override; 4792 }; 4793 4794 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) { 4795 const Function *F = getAnchorScope(); 4796 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 4797 4798 MustBeExecutedContextExplorer &Explorer = 4799 A.getInfoCache().getMustBeExecutedContextExplorer(); 4800 4801 auto FreeCheck = [&](Instruction &I) { 4802 const auto &Frees = FreesForMalloc.lookup(&I); 4803 if (Frees.size() != 1) 4804 return false; 4805 Instruction *UniqueFree = *Frees.begin(); 4806 return Explorer.findInContextOf(UniqueFree, I.getNextNode()); 4807 }; 4808 4809 auto UsesCheck = [&](Instruction &I) { 4810 bool ValidUsesOnly = true; 4811 bool MustUse = true; 4812 auto Pred = [&](const Use &U, bool &Follow) -> bool { 4813 Instruction *UserI = cast<Instruction>(U.getUser()); 4814 if (isa<LoadInst>(UserI)) 4815 return true; 4816 if (auto *SI = dyn_cast<StoreInst>(UserI)) { 4817 if (SI->getValueOperand() == U.get()) { 4818 LLVM_DEBUG(dbgs() 4819 << "[H2S] escaping store to memory: " << *UserI << "\n"); 4820 ValidUsesOnly = false; 4821 } else { 4822 // A store into the malloc'ed memory is fine. 4823 } 4824 return true; 4825 } 4826 if (auto *CB = dyn_cast<CallBase>(UserI)) { 4827 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) 4828 return true; 4829 // Record malloc. 4830 if (isFreeCall(UserI, TLI)) { 4831 if (MustUse) { 4832 FreesForMalloc[&I].insert(UserI); 4833 } else { 4834 LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: " 4835 << *UserI << "\n"); 4836 ValidUsesOnly = false; 4837 } 4838 return true; 4839 } 4840 4841 unsigned ArgNo = CB->getArgOperandNo(&U); 4842 4843 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 4844 *this, IRPosition::callsite_argument(*CB, ArgNo)); 4845 4846 // If a callsite argument use is nofree, we are fine. 4847 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( 4848 *this, IRPosition::callsite_argument(*CB, ArgNo)); 4849 4850 if (!NoCaptureAA.isAssumedNoCapture() || 4851 !ArgNoFreeAA.isAssumedNoFree()) { 4852 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n"); 4853 ValidUsesOnly = false; 4854 } 4855 return true; 4856 } 4857 4858 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 4859 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 4860 MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI)); 4861 Follow = true; 4862 return true; 4863 } 4864 // Unknown user for which we can not track uses further (in a way that 4865 // makes sense). 4866 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n"); 4867 ValidUsesOnly = false; 4868 return true; 4869 }; 4870 A.checkForAllUses(Pred, *this, I); 4871 return ValidUsesOnly; 4872 }; 4873 4874 auto MallocCallocCheck = [&](Instruction &I) { 4875 if (BadMallocCalls.count(&I)) 4876 return true; 4877 4878 bool IsMalloc = isMallocLikeFn(&I, TLI); 4879 bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI); 4880 bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI); 4881 if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) { 4882 BadMallocCalls.insert(&I); 4883 return true; 4884 } 4885 4886 if (IsMalloc) { 4887 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0))) 4888 if (Size->getValue().ule(MaxHeapToStackSize)) 4889 if (UsesCheck(I) || FreeCheck(I)) { 4890 MallocCalls.insert(&I); 4891 return true; 4892 } 4893 } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) { 4894 // Only if the alignment and sizes are constant. 4895 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1))) 4896 if (Size->getValue().ule(MaxHeapToStackSize)) 4897 if (UsesCheck(I) || FreeCheck(I)) { 4898 MallocCalls.insert(&I); 4899 return true; 4900 } 4901 } else if (IsCalloc) { 4902 bool Overflow = false; 4903 if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0))) 4904 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1))) 4905 if ((Size->getValue().umul_ov(Num->getValue(), Overflow)) 4906 .ule(MaxHeapToStackSize)) 4907 if (!Overflow && (UsesCheck(I) || FreeCheck(I))) { 4908 MallocCalls.insert(&I); 4909 return true; 4910 } 4911 } 4912 4913 BadMallocCalls.insert(&I); 4914 return true; 4915 }; 4916 4917 size_t NumBadMallocs = BadMallocCalls.size(); 4918 4919 A.checkForAllCallLikeInstructions(MallocCallocCheck, *this); 4920 4921 if (NumBadMallocs != BadMallocCalls.size()) 4922 return ChangeStatus::CHANGED; 4923 4924 return ChangeStatus::UNCHANGED; 4925 } 4926 4927 struct AAHeapToStackFunction final : public AAHeapToStackImpl { 4928 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) 4929 : AAHeapToStackImpl(IRP, A) {} 4930 4931 /// See AbstractAttribute::trackStatistics(). 4932 void trackStatistics() const override { 4933 STATS_DECL( 4934 MallocCalls, Function, 4935 "Number of malloc/calloc/aligned_alloc calls converted to allocas"); 4936 for (auto *C : MallocCalls) 4937 if (!BadMallocCalls.count(C)) 4938 ++BUILD_STAT_NAME(MallocCalls, Function); 4939 } 4940 }; 4941 4942 /// ----------------------- Privatizable Pointers ------------------------------ 4943 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { 4944 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) 4945 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} 4946 4947 ChangeStatus indicatePessimisticFixpoint() override { 4948 AAPrivatizablePtr::indicatePessimisticFixpoint(); 4949 PrivatizableType = nullptr; 4950 return ChangeStatus::CHANGED; 4951 } 4952 4953 /// Identify the type we can chose for a private copy of the underlying 4954 /// argument. None means it is not clear yet, nullptr means there is none. 4955 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; 4956 4957 /// Return a privatizable type that encloses both T0 and T1. 4958 /// TODO: This is merely a stub for now as we should manage a mapping as well. 4959 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { 4960 if (!T0.hasValue()) 4961 return T1; 4962 if (!T1.hasValue()) 4963 return T0; 4964 if (T0 == T1) 4965 return T0; 4966 return nullptr; 4967 } 4968 4969 Optional<Type *> getPrivatizableType() const override { 4970 return PrivatizableType; 4971 } 4972 4973 const std::string getAsStr() const override { 4974 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; 4975 } 4976 4977 protected: 4978 Optional<Type *> PrivatizableType; 4979 }; 4980 4981 // TODO: Do this for call site arguments (probably also other values) as well. 4982 4983 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { 4984 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) 4985 : AAPrivatizablePtrImpl(IRP, A) {} 4986 4987 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 4988 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 4989 // If this is a byval argument and we know all the call sites (so we can 4990 // rewrite them), there is no need to check them explicitly. 4991 bool AllCallSitesKnown; 4992 if (getIRPosition().hasAttr(Attribute::ByVal) && 4993 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, 4994 true, AllCallSitesKnown)) 4995 return getAssociatedValue().getType()->getPointerElementType(); 4996 4997 Optional<Type *> Ty; 4998 unsigned ArgNo = getIRPosition().getArgNo(); 4999 5000 // Make sure the associated call site argument has the same type at all call 5001 // sites and it is an allocation we know is safe to privatize, for now that 5002 // means we only allow alloca instructions. 5003 // TODO: We can additionally analyze the accesses in the callee to create 5004 // the type from that information instead. That is a little more 5005 // involved and will be done in a follow up patch. 5006 auto CallSiteCheck = [&](AbstractCallSite ACS) { 5007 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 5008 // Check if a coresponding argument was found or if it is one not 5009 // associated (which can happen for callback calls). 5010 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 5011 return false; 5012 5013 // Check that all call sites agree on a type. 5014 auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos); 5015 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); 5016 5017 LLVM_DEBUG({ 5018 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; 5019 if (CSTy.hasValue() && CSTy.getValue()) 5020 CSTy.getValue()->print(dbgs()); 5021 else if (CSTy.hasValue()) 5022 dbgs() << "<nullptr>"; 5023 else 5024 dbgs() << "<none>"; 5025 }); 5026 5027 Ty = combineTypes(Ty, CSTy); 5028 5029 LLVM_DEBUG({ 5030 dbgs() << " : New Type: "; 5031 if (Ty.hasValue() && Ty.getValue()) 5032 Ty.getValue()->print(dbgs()); 5033 else if (Ty.hasValue()) 5034 dbgs() << "<nullptr>"; 5035 else 5036 dbgs() << "<none>"; 5037 dbgs() << "\n"; 5038 }); 5039 5040 return !Ty.hasValue() || Ty.getValue(); 5041 }; 5042 5043 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown)) 5044 return nullptr; 5045 return Ty; 5046 } 5047 5048 /// See AbstractAttribute::updateImpl(...). 5049 ChangeStatus updateImpl(Attributor &A) override { 5050 PrivatizableType = identifyPrivatizableType(A); 5051 if (!PrivatizableType.hasValue()) 5052 return ChangeStatus::UNCHANGED; 5053 if (!PrivatizableType.getValue()) 5054 return indicatePessimisticFixpoint(); 5055 5056 // The dependence is optional so we don't give up once we give up on the 5057 // alignment. 5058 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), 5059 /* TrackDependence */ true, DepClassTy::OPTIONAL); 5060 5061 // Avoid arguments with padding for now. 5062 if (!getIRPosition().hasAttr(Attribute::ByVal) && 5063 !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(), 5064 A.getInfoCache().getDL())) { 5065 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n"); 5066 return indicatePessimisticFixpoint(); 5067 } 5068 5069 // Verify callee and caller agree on how the promoted argument would be 5070 // passed. 5071 // TODO: The use of the ArgumentPromotion interface here is ugly, we need a 5072 // specialized form of TargetTransformInfo::areFunctionArgsABICompatible 5073 // which doesn't require the arguments ArgumentPromotion wanted to pass. 5074 Function &Fn = *getIRPosition().getAnchorScope(); 5075 SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy; 5076 ArgsToPromote.insert(getAssociatedArgument()); 5077 const auto *TTI = 5078 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); 5079 if (!TTI || 5080 !ArgumentPromotionPass::areFunctionArgsABICompatible( 5081 Fn, *TTI, ArgsToPromote, Dummy) || 5082 ArgsToPromote.empty()) { 5083 LLVM_DEBUG( 5084 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " 5085 << Fn.getName() << "\n"); 5086 return indicatePessimisticFixpoint(); 5087 } 5088 5089 // Collect the types that will replace the privatizable type in the function 5090 // signature. 5091 SmallVector<Type *, 16> ReplacementTypes; 5092 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 5093 5094 // Register a rewrite of the argument. 5095 Argument *Arg = getAssociatedArgument(); 5096 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { 5097 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"); 5098 return indicatePessimisticFixpoint(); 5099 } 5100 5101 unsigned ArgNo = Arg->getArgNo(); 5102 5103 // Helper to check if for the given call site the associated argument is 5104 // passed to a callback where the privatization would be different. 5105 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { 5106 SmallVector<const Use *, 4> CallbackUses; 5107 AbstractCallSite::getCallbackUses(CB, CallbackUses); 5108 for (const Use *U : CallbackUses) { 5109 AbstractCallSite CBACS(U); 5110 assert(CBACS && CBACS.isCallbackCall()); 5111 for (Argument &CBArg : CBACS.getCalledFunction()->args()) { 5112 int CBArgNo = CBACS.getCallArgOperandNo(CBArg); 5113 5114 LLVM_DEBUG({ 5115 dbgs() 5116 << "[AAPrivatizablePtr] Argument " << *Arg 5117 << "check if can be privatized in the context of its parent (" 5118 << Arg->getParent()->getName() 5119 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5120 "callback (" 5121 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 5122 << ")\n[AAPrivatizablePtr] " << CBArg << " : " 5123 << CBACS.getCallArgOperand(CBArg) << " vs " 5124 << CB.getArgOperand(ArgNo) << "\n" 5125 << "[AAPrivatizablePtr] " << CBArg << " : " 5126 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; 5127 }); 5128 5129 if (CBArgNo != int(ArgNo)) 5130 continue; 5131 const auto &CBArgPrivAA = 5132 A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg)); 5133 if (CBArgPrivAA.isValidState()) { 5134 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); 5135 if (!CBArgPrivTy.hasValue()) 5136 continue; 5137 if (CBArgPrivTy.getValue() == PrivatizableType) 5138 continue; 5139 } 5140 5141 LLVM_DEBUG({ 5142 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5143 << " cannot be privatized in the context of its parent (" 5144 << Arg->getParent()->getName() 5145 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5146 "callback (" 5147 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 5148 << ").\n[AAPrivatizablePtr] for which the argument " 5149 "privatization is not compatible.\n"; 5150 }); 5151 return false; 5152 } 5153 } 5154 return true; 5155 }; 5156 5157 // Helper to check if for the given call site the associated argument is 5158 // passed to a direct call where the privatization would be different. 5159 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { 5160 CallBase *DC = cast<CallBase>(ACS.getInstruction()); 5161 int DCArgNo = ACS.getCallArgOperandNo(ArgNo); 5162 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() && 5163 "Expected a direct call operand for callback call operand"); 5164 5165 LLVM_DEBUG({ 5166 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5167 << " check if be privatized in the context of its parent (" 5168 << Arg->getParent()->getName() 5169 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5170 "direct call of (" 5171 << DCArgNo << "@" << DC->getCalledFunction()->getName() 5172 << ").\n"; 5173 }); 5174 5175 Function *DCCallee = DC->getCalledFunction(); 5176 if (unsigned(DCArgNo) < DCCallee->arg_size()) { 5177 const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 5178 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo))); 5179 if (DCArgPrivAA.isValidState()) { 5180 auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); 5181 if (!DCArgPrivTy.hasValue()) 5182 return true; 5183 if (DCArgPrivTy.getValue() == PrivatizableType) 5184 return true; 5185 } 5186 } 5187 5188 LLVM_DEBUG({ 5189 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5190 << " cannot be privatized in the context of its parent (" 5191 << Arg->getParent()->getName() 5192 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5193 "direct call of (" 5194 << ACS.getInstruction()->getCalledFunction()->getName() 5195 << ").\n[AAPrivatizablePtr] for which the argument " 5196 "privatization is not compatible.\n"; 5197 }); 5198 return false; 5199 }; 5200 5201 // Helper to check if the associated argument is used at the given abstract 5202 // call site in a way that is incompatible with the privatization assumed 5203 // here. 5204 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { 5205 if (ACS.isDirectCall()) 5206 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); 5207 if (ACS.isCallbackCall()) 5208 return IsCompatiblePrivArgOfDirectCS(ACS); 5209 return false; 5210 }; 5211 5212 bool AllCallSitesKnown; 5213 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true, 5214 AllCallSitesKnown)) 5215 return indicatePessimisticFixpoint(); 5216 5217 return ChangeStatus::UNCHANGED; 5218 } 5219 5220 /// Given a type to private \p PrivType, collect the constituates (which are 5221 /// used) in \p ReplacementTypes. 5222 static void 5223 identifyReplacementTypes(Type *PrivType, 5224 SmallVectorImpl<Type *> &ReplacementTypes) { 5225 // TODO: For now we expand the privatization type to the fullest which can 5226 // lead to dead arguments that need to be removed later. 5227 assert(PrivType && "Expected privatizable type!"); 5228 5229 // Traverse the type, extract constituate types on the outermost level. 5230 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5231 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) 5232 ReplacementTypes.push_back(PrivStructType->getElementType(u)); 5233 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5234 ReplacementTypes.append(PrivArrayType->getNumElements(), 5235 PrivArrayType->getElementType()); 5236 } else { 5237 ReplacementTypes.push_back(PrivType); 5238 } 5239 } 5240 5241 /// Initialize \p Base according to the type \p PrivType at position \p IP. 5242 /// The values needed are taken from the arguments of \p F starting at 5243 /// position \p ArgNo. 5244 static void createInitialization(Type *PrivType, Value &Base, Function &F, 5245 unsigned ArgNo, Instruction &IP) { 5246 assert(PrivType && "Expected privatizable type!"); 5247 5248 IRBuilder<NoFolder> IRB(&IP); 5249 const DataLayout &DL = F.getParent()->getDataLayout(); 5250 5251 // Traverse the type, build GEPs and stores. 5252 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5253 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 5254 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 5255 Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo(); 5256 Value *Ptr = constructPointer( 5257 PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL); 5258 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 5259 } 5260 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5261 Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo(); 5262 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy); 5263 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 5264 Value *Ptr = 5265 constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL); 5266 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 5267 } 5268 } else { 5269 new StoreInst(F.getArg(ArgNo), &Base, &IP); 5270 } 5271 } 5272 5273 /// Extract values from \p Base according to the type \p PrivType at the 5274 /// call position \p ACS. The values are appended to \p ReplacementValues. 5275 void createReplacementValues(Align Alignment, Type *PrivType, 5276 AbstractCallSite ACS, Value *Base, 5277 SmallVectorImpl<Value *> &ReplacementValues) { 5278 assert(Base && "Expected base value!"); 5279 assert(PrivType && "Expected privatizable type!"); 5280 Instruction *IP = ACS.getInstruction(); 5281 5282 IRBuilder<NoFolder> IRB(IP); 5283 const DataLayout &DL = IP->getModule()->getDataLayout(); 5284 5285 if (Base->getType()->getPointerElementType() != PrivType) 5286 Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(), 5287 "", ACS.getInstruction()); 5288 5289 // Traverse the type, build GEPs and loads. 5290 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5291 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 5292 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 5293 Type *PointeeTy = PrivStructType->getElementType(u); 5294 Value *Ptr = 5295 constructPointer(PointeeTy->getPointerTo(), Base, 5296 PrivStructLayout->getElementOffset(u), IRB, DL); 5297 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 5298 L->setAlignment(Alignment); 5299 ReplacementValues.push_back(L); 5300 } 5301 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5302 Type *PointeeTy = PrivArrayType->getElementType(); 5303 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 5304 Type *PointeePtrTy = PointeeTy->getPointerTo(); 5305 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 5306 Value *Ptr = 5307 constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL); 5308 LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP); 5309 L->setAlignment(Alignment); 5310 ReplacementValues.push_back(L); 5311 } 5312 } else { 5313 LoadInst *L = new LoadInst(PrivType, Base, "", IP); 5314 L->setAlignment(Alignment); 5315 ReplacementValues.push_back(L); 5316 } 5317 } 5318 5319 /// See AbstractAttribute::manifest(...) 5320 ChangeStatus manifest(Attributor &A) override { 5321 if (!PrivatizableType.hasValue()) 5322 return ChangeStatus::UNCHANGED; 5323 assert(PrivatizableType.getValue() && "Expected privatizable type!"); 5324 5325 // Collect all tail calls in the function as we cannot allow new allocas to 5326 // escape into tail recursion. 5327 // TODO: Be smarter about new allocas escaping into tail calls. 5328 SmallVector<CallInst *, 16> TailCalls; 5329 if (!A.checkForAllInstructions( 5330 [&](Instruction &I) { 5331 CallInst &CI = cast<CallInst>(I); 5332 if (CI.isTailCall()) 5333 TailCalls.push_back(&CI); 5334 return true; 5335 }, 5336 *this, {Instruction::Call})) 5337 return ChangeStatus::UNCHANGED; 5338 5339 Argument *Arg = getAssociatedArgument(); 5340 // Query AAAlign attribute for alignment of associated argument to 5341 // determine the best alignment of loads. 5342 const auto &AlignAA = A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg)); 5343 5344 // Callback to repair the associated function. A new alloca is placed at the 5345 // beginning and initialized with the values passed through arguments. The 5346 // new alloca replaces the use of the old pointer argument. 5347 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = 5348 [=](const Attributor::ArgumentReplacementInfo &ARI, 5349 Function &ReplacementFn, Function::arg_iterator ArgIt) { 5350 BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); 5351 Instruction *IP = &*EntryBB.getFirstInsertionPt(); 5352 auto *AI = new AllocaInst(PrivatizableType.getValue(), 0, 5353 Arg->getName() + ".priv", IP); 5354 createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn, 5355 ArgIt->getArgNo(), *IP); 5356 Arg->replaceAllUsesWith(AI); 5357 5358 for (CallInst *CI : TailCalls) 5359 CI->setTailCall(false); 5360 }; 5361 5362 // Callback to repair a call site of the associated function. The elements 5363 // of the privatizable type are loaded prior to the call and passed to the 5364 // new function version. 5365 Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = 5366 [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI, 5367 AbstractCallSite ACS, 5368 SmallVectorImpl<Value *> &NewArgOperands) { 5369 // When no alignment is specified for the load instruction, 5370 // natural alignment is assumed. 5371 createReplacementValues( 5372 assumeAligned(AlignAA.getAssumedAlign()), 5373 PrivatizableType.getValue(), ACS, 5374 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()), 5375 NewArgOperands); 5376 }; 5377 5378 // Collect the types that will replace the privatizable type in the function 5379 // signature. 5380 SmallVector<Type *, 16> ReplacementTypes; 5381 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 5382 5383 // Register a rewrite of the argument. 5384 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes, 5385 std::move(FnRepairCB), 5386 std::move(ACSRepairCB))) 5387 return ChangeStatus::CHANGED; 5388 return ChangeStatus::UNCHANGED; 5389 } 5390 5391 /// See AbstractAttribute::trackStatistics() 5392 void trackStatistics() const override { 5393 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr); 5394 } 5395 }; 5396 5397 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { 5398 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) 5399 : AAPrivatizablePtrImpl(IRP, A) {} 5400 5401 /// See AbstractAttribute::initialize(...). 5402 virtual void initialize(Attributor &A) override { 5403 // TODO: We can privatize more than arguments. 5404 indicatePessimisticFixpoint(); 5405 } 5406 5407 ChangeStatus updateImpl(Attributor &A) override { 5408 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" 5409 "updateImpl will not be called"); 5410 } 5411 5412 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 5413 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 5414 Value *Obj = 5415 GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL()); 5416 if (!Obj) { 5417 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n"); 5418 return nullptr; 5419 } 5420 5421 if (auto *AI = dyn_cast<AllocaInst>(Obj)) 5422 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 5423 if (CI->isOne()) 5424 return Obj->getType()->getPointerElementType(); 5425 if (auto *Arg = dyn_cast<Argument>(Obj)) { 5426 auto &PrivArgAA = 5427 A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg)); 5428 if (PrivArgAA.isAssumedPrivatizablePtr()) 5429 return Obj->getType()->getPointerElementType(); 5430 } 5431 5432 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " 5433 "alloca nor privatizable argument: " 5434 << *Obj << "!\n"); 5435 return nullptr; 5436 } 5437 5438 /// See AbstractAttribute::trackStatistics() 5439 void trackStatistics() const override { 5440 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr); 5441 } 5442 }; 5443 5444 struct AAPrivatizablePtrCallSiteArgument final 5445 : public AAPrivatizablePtrFloating { 5446 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) 5447 : AAPrivatizablePtrFloating(IRP, A) {} 5448 5449 /// See AbstractAttribute::initialize(...). 5450 void initialize(Attributor &A) override { 5451 if (getIRPosition().hasAttr(Attribute::ByVal)) 5452 indicateOptimisticFixpoint(); 5453 } 5454 5455 /// See AbstractAttribute::updateImpl(...). 5456 ChangeStatus updateImpl(Attributor &A) override { 5457 PrivatizableType = identifyPrivatizableType(A); 5458 if (!PrivatizableType.hasValue()) 5459 return ChangeStatus::UNCHANGED; 5460 if (!PrivatizableType.getValue()) 5461 return indicatePessimisticFixpoint(); 5462 5463 const IRPosition &IRP = getIRPosition(); 5464 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP); 5465 if (!NoCaptureAA.isAssumedNoCapture()) { 5466 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n"); 5467 return indicatePessimisticFixpoint(); 5468 } 5469 5470 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP); 5471 if (!NoAliasAA.isAssumedNoAlias()) { 5472 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n"); 5473 return indicatePessimisticFixpoint(); 5474 } 5475 5476 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP); 5477 if (!MemBehaviorAA.isAssumedReadOnly()) { 5478 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n"); 5479 return indicatePessimisticFixpoint(); 5480 } 5481 5482 return ChangeStatus::UNCHANGED; 5483 } 5484 5485 /// See AbstractAttribute::trackStatistics() 5486 void trackStatistics() const override { 5487 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr); 5488 } 5489 }; 5490 5491 struct AAPrivatizablePtrCallSiteReturned final 5492 : public AAPrivatizablePtrFloating { 5493 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) 5494 : AAPrivatizablePtrFloating(IRP, A) {} 5495 5496 /// See AbstractAttribute::initialize(...). 5497 void initialize(Attributor &A) override { 5498 // TODO: We can privatize more than arguments. 5499 indicatePessimisticFixpoint(); 5500 } 5501 5502 /// See AbstractAttribute::trackStatistics() 5503 void trackStatistics() const override { 5504 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr); 5505 } 5506 }; 5507 5508 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { 5509 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) 5510 : AAPrivatizablePtrFloating(IRP, A) {} 5511 5512 /// See AbstractAttribute::initialize(...). 5513 void initialize(Attributor &A) override { 5514 // TODO: We can privatize more than arguments. 5515 indicatePessimisticFixpoint(); 5516 } 5517 5518 /// See AbstractAttribute::trackStatistics() 5519 void trackStatistics() const override { 5520 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr); 5521 } 5522 }; 5523 5524 /// -------------------- Memory Behavior Attributes ---------------------------- 5525 /// Includes read-none, read-only, and write-only. 5526 /// ---------------------------------------------------------------------------- 5527 struct AAMemoryBehaviorImpl : public AAMemoryBehavior { 5528 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) 5529 : AAMemoryBehavior(IRP, A) {} 5530 5531 /// See AbstractAttribute::initialize(...). 5532 void initialize(Attributor &A) override { 5533 intersectAssumedBits(BEST_STATE); 5534 getKnownStateFromValue(getIRPosition(), getState()); 5535 IRAttribute::initialize(A); 5536 } 5537 5538 /// Return the memory behavior information encoded in the IR for \p IRP. 5539 static void getKnownStateFromValue(const IRPosition &IRP, 5540 BitIntegerState &State, 5541 bool IgnoreSubsumingPositions = false) { 5542 SmallVector<Attribute, 2> Attrs; 5543 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 5544 for (const Attribute &Attr : Attrs) { 5545 switch (Attr.getKindAsEnum()) { 5546 case Attribute::ReadNone: 5547 State.addKnownBits(NO_ACCESSES); 5548 break; 5549 case Attribute::ReadOnly: 5550 State.addKnownBits(NO_WRITES); 5551 break; 5552 case Attribute::WriteOnly: 5553 State.addKnownBits(NO_READS); 5554 break; 5555 default: 5556 llvm_unreachable("Unexpected attribute!"); 5557 } 5558 } 5559 5560 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) { 5561 if (!I->mayReadFromMemory()) 5562 State.addKnownBits(NO_READS); 5563 if (!I->mayWriteToMemory()) 5564 State.addKnownBits(NO_WRITES); 5565 } 5566 } 5567 5568 /// See AbstractAttribute::getDeducedAttributes(...). 5569 void getDeducedAttributes(LLVMContext &Ctx, 5570 SmallVectorImpl<Attribute> &Attrs) const override { 5571 assert(Attrs.size() == 0); 5572 if (isAssumedReadNone()) 5573 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 5574 else if (isAssumedReadOnly()) 5575 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly)); 5576 else if (isAssumedWriteOnly()) 5577 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly)); 5578 assert(Attrs.size() <= 1); 5579 } 5580 5581 /// See AbstractAttribute::manifest(...). 5582 ChangeStatus manifest(Attributor &A) override { 5583 if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true)) 5584 return ChangeStatus::UNCHANGED; 5585 5586 const IRPosition &IRP = getIRPosition(); 5587 5588 // Check if we would improve the existing attributes first. 5589 SmallVector<Attribute, 4> DeducedAttrs; 5590 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 5591 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 5592 return IRP.hasAttr(Attr.getKindAsEnum(), 5593 /* IgnoreSubsumingPositions */ true); 5594 })) 5595 return ChangeStatus::UNCHANGED; 5596 5597 // Clear existing attributes. 5598 IRP.removeAttrs(AttrKinds); 5599 5600 // Use the generic manifest method. 5601 return IRAttribute::manifest(A); 5602 } 5603 5604 /// See AbstractState::getAsStr(). 5605 const std::string getAsStr() const override { 5606 if (isAssumedReadNone()) 5607 return "readnone"; 5608 if (isAssumedReadOnly()) 5609 return "readonly"; 5610 if (isAssumedWriteOnly()) 5611 return "writeonly"; 5612 return "may-read/write"; 5613 } 5614 5615 /// The set of IR attributes AAMemoryBehavior deals with. 5616 static const Attribute::AttrKind AttrKinds[3]; 5617 }; 5618 5619 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { 5620 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; 5621 5622 /// Memory behavior attribute for a floating value. 5623 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { 5624 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) 5625 : AAMemoryBehaviorImpl(IRP, A) {} 5626 5627 /// See AbstractAttribute::initialize(...). 5628 void initialize(Attributor &A) override { 5629 AAMemoryBehaviorImpl::initialize(A); 5630 // Initialize the use vector with all direct uses of the associated value. 5631 for (const Use &U : getAssociatedValue().uses()) 5632 Uses.insert(&U); 5633 } 5634 5635 /// See AbstractAttribute::updateImpl(...). 5636 ChangeStatus updateImpl(Attributor &A) override; 5637 5638 /// See AbstractAttribute::trackStatistics() 5639 void trackStatistics() const override { 5640 if (isAssumedReadNone()) 5641 STATS_DECLTRACK_FLOATING_ATTR(readnone) 5642 else if (isAssumedReadOnly()) 5643 STATS_DECLTRACK_FLOATING_ATTR(readonly) 5644 else if (isAssumedWriteOnly()) 5645 STATS_DECLTRACK_FLOATING_ATTR(writeonly) 5646 } 5647 5648 private: 5649 /// Return true if users of \p UserI might access the underlying 5650 /// variable/location described by \p U and should therefore be analyzed. 5651 bool followUsersOfUseIn(Attributor &A, const Use *U, 5652 const Instruction *UserI); 5653 5654 /// Update the state according to the effect of use \p U in \p UserI. 5655 void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI); 5656 5657 protected: 5658 /// Container for (transitive) uses of the associated argument. 5659 SetVector<const Use *> Uses; 5660 }; 5661 5662 /// Memory behavior attribute for function argument. 5663 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { 5664 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) 5665 : AAMemoryBehaviorFloating(IRP, A) {} 5666 5667 /// See AbstractAttribute::initialize(...). 5668 void initialize(Attributor &A) override { 5669 intersectAssumedBits(BEST_STATE); 5670 const IRPosition &IRP = getIRPosition(); 5671 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we 5672 // can query it when we use has/getAttr. That would allow us to reuse the 5673 // initialize of the base class here. 5674 bool HasByVal = 5675 IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true); 5676 getKnownStateFromValue(IRP, getState(), 5677 /* IgnoreSubsumingPositions */ HasByVal); 5678 5679 // Initialize the use vector with all direct uses of the associated value. 5680 Argument *Arg = getAssociatedArgument(); 5681 if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) { 5682 indicatePessimisticFixpoint(); 5683 } else { 5684 // Initialize the use vector with all direct uses of the associated value. 5685 for (const Use &U : Arg->uses()) 5686 Uses.insert(&U); 5687 } 5688 } 5689 5690 ChangeStatus manifest(Attributor &A) override { 5691 // TODO: Pointer arguments are not supported on vectors of pointers yet. 5692 if (!getAssociatedValue().getType()->isPointerTy()) 5693 return ChangeStatus::UNCHANGED; 5694 5695 // TODO: From readattrs.ll: "inalloca parameters are always 5696 // considered written" 5697 if (hasAttr({Attribute::InAlloca})) { 5698 removeKnownBits(NO_WRITES); 5699 removeAssumedBits(NO_WRITES); 5700 } 5701 return AAMemoryBehaviorFloating::manifest(A); 5702 } 5703 5704 /// See AbstractAttribute::trackStatistics() 5705 void trackStatistics() const override { 5706 if (isAssumedReadNone()) 5707 STATS_DECLTRACK_ARG_ATTR(readnone) 5708 else if (isAssumedReadOnly()) 5709 STATS_DECLTRACK_ARG_ATTR(readonly) 5710 else if (isAssumedWriteOnly()) 5711 STATS_DECLTRACK_ARG_ATTR(writeonly) 5712 } 5713 }; 5714 5715 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { 5716 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) 5717 : AAMemoryBehaviorArgument(IRP, A) {} 5718 5719 /// See AbstractAttribute::initialize(...). 5720 void initialize(Attributor &A) override { 5721 if (Argument *Arg = getAssociatedArgument()) { 5722 if (Arg->hasByValAttr()) { 5723 addKnownBits(NO_WRITES); 5724 removeKnownBits(NO_READS); 5725 removeAssumedBits(NO_READS); 5726 } 5727 } 5728 AAMemoryBehaviorArgument::initialize(A); 5729 } 5730 5731 /// See AbstractAttribute::updateImpl(...). 5732 ChangeStatus updateImpl(Attributor &A) override { 5733 // TODO: Once we have call site specific value information we can provide 5734 // call site specific liveness liveness information and then it makes 5735 // sense to specialize attributes for call sites arguments instead of 5736 // redirecting requests to the callee argument. 5737 Argument *Arg = getAssociatedArgument(); 5738 const IRPosition &ArgPos = IRPosition::argument(*Arg); 5739 auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos); 5740 return clampStateAndIndicateChange( 5741 getState(), 5742 static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState())); 5743 } 5744 5745 /// See AbstractAttribute::trackStatistics() 5746 void trackStatistics() const override { 5747 if (isAssumedReadNone()) 5748 STATS_DECLTRACK_CSARG_ATTR(readnone) 5749 else if (isAssumedReadOnly()) 5750 STATS_DECLTRACK_CSARG_ATTR(readonly) 5751 else if (isAssumedWriteOnly()) 5752 STATS_DECLTRACK_CSARG_ATTR(writeonly) 5753 } 5754 }; 5755 5756 /// Memory behavior attribute for a call site return position. 5757 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { 5758 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) 5759 : AAMemoryBehaviorFloating(IRP, A) {} 5760 5761 /// See AbstractAttribute::manifest(...). 5762 ChangeStatus manifest(Attributor &A) override { 5763 // We do not annotate returned values. 5764 return ChangeStatus::UNCHANGED; 5765 } 5766 5767 /// See AbstractAttribute::trackStatistics() 5768 void trackStatistics() const override {} 5769 }; 5770 5771 /// An AA to represent the memory behavior function attributes. 5772 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { 5773 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) 5774 : AAMemoryBehaviorImpl(IRP, A) {} 5775 5776 /// See AbstractAttribute::updateImpl(Attributor &A). 5777 virtual ChangeStatus updateImpl(Attributor &A) override; 5778 5779 /// See AbstractAttribute::manifest(...). 5780 ChangeStatus manifest(Attributor &A) override { 5781 Function &F = cast<Function>(getAnchorValue()); 5782 if (isAssumedReadNone()) { 5783 F.removeFnAttr(Attribute::ArgMemOnly); 5784 F.removeFnAttr(Attribute::InaccessibleMemOnly); 5785 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); 5786 } 5787 return AAMemoryBehaviorImpl::manifest(A); 5788 } 5789 5790 /// See AbstractAttribute::trackStatistics() 5791 void trackStatistics() const override { 5792 if (isAssumedReadNone()) 5793 STATS_DECLTRACK_FN_ATTR(readnone) 5794 else if (isAssumedReadOnly()) 5795 STATS_DECLTRACK_FN_ATTR(readonly) 5796 else if (isAssumedWriteOnly()) 5797 STATS_DECLTRACK_FN_ATTR(writeonly) 5798 } 5799 }; 5800 5801 /// AAMemoryBehavior attribute for call sites. 5802 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl { 5803 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) 5804 : AAMemoryBehaviorImpl(IRP, A) {} 5805 5806 /// See AbstractAttribute::initialize(...). 5807 void initialize(Attributor &A) override { 5808 AAMemoryBehaviorImpl::initialize(A); 5809 Function *F = getAssociatedFunction(); 5810 if (!F || !A.isFunctionIPOAmendable(*F)) { 5811 indicatePessimisticFixpoint(); 5812 return; 5813 } 5814 } 5815 5816 /// See AbstractAttribute::updateImpl(...). 5817 ChangeStatus updateImpl(Attributor &A) override { 5818 // TODO: Once we have call site specific value information we can provide 5819 // call site specific liveness liveness information and then it makes 5820 // sense to specialize attributes for call sites arguments instead of 5821 // redirecting requests to the callee argument. 5822 Function *F = getAssociatedFunction(); 5823 const IRPosition &FnPos = IRPosition::function(*F); 5824 auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos); 5825 return clampStateAndIndicateChange( 5826 getState(), 5827 static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState())); 5828 } 5829 5830 /// See AbstractAttribute::trackStatistics() 5831 void trackStatistics() const override { 5832 if (isAssumedReadNone()) 5833 STATS_DECLTRACK_CS_ATTR(readnone) 5834 else if (isAssumedReadOnly()) 5835 STATS_DECLTRACK_CS_ATTR(readonly) 5836 else if (isAssumedWriteOnly()) 5837 STATS_DECLTRACK_CS_ATTR(writeonly) 5838 } 5839 }; 5840 5841 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { 5842 5843 // The current assumed state used to determine a change. 5844 auto AssumedState = getAssumed(); 5845 5846 auto CheckRWInst = [&](Instruction &I) { 5847 // If the instruction has an own memory behavior state, use it to restrict 5848 // the local state. No further analysis is required as the other memory 5849 // state is as optimistic as it gets. 5850 if (const auto *CB = dyn_cast<CallBase>(&I)) { 5851 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 5852 *this, IRPosition::callsite_function(*CB)); 5853 intersectAssumedBits(MemBehaviorAA.getAssumed()); 5854 return !isAtFixpoint(); 5855 } 5856 5857 // Remove access kind modifiers if necessary. 5858 if (I.mayReadFromMemory()) 5859 removeAssumedBits(NO_READS); 5860 if (I.mayWriteToMemory()) 5861 removeAssumedBits(NO_WRITES); 5862 return !isAtFixpoint(); 5863 }; 5864 5865 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this)) 5866 return indicatePessimisticFixpoint(); 5867 5868 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 5869 : ChangeStatus::UNCHANGED; 5870 } 5871 5872 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { 5873 5874 const IRPosition &IRP = getIRPosition(); 5875 const IRPosition &FnPos = IRPosition::function_scope(IRP); 5876 AAMemoryBehavior::StateType &S = getState(); 5877 5878 // First, check the function scope. We take the known information and we avoid 5879 // work if the assumed information implies the current assumed information for 5880 // this attribute. This is a valid for all but byval arguments. 5881 Argument *Arg = IRP.getAssociatedArgument(); 5882 AAMemoryBehavior::base_t FnMemAssumedState = 5883 AAMemoryBehavior::StateType::getWorstState(); 5884 if (!Arg || !Arg->hasByValAttr()) { 5885 const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>( 5886 *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 5887 FnMemAssumedState = FnMemAA.getAssumed(); 5888 S.addKnownBits(FnMemAA.getKnown()); 5889 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed()) 5890 return ChangeStatus::UNCHANGED; 5891 } 5892 5893 // Make sure the value is not captured (except through "return"), if 5894 // it is, any information derived would be irrelevant anyway as we cannot 5895 // check the potential aliases introduced by the capture. However, no need 5896 // to fall back to anythign less optimistic than the function state. 5897 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 5898 *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL); 5899 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 5900 S.intersectAssumedBits(FnMemAssumedState); 5901 return ChangeStatus::CHANGED; 5902 } 5903 5904 // The current assumed state used to determine a change. 5905 auto AssumedState = S.getAssumed(); 5906 5907 // Liveness information to exclude dead users. 5908 // TODO: Take the FnPos once we have call site specific liveness information. 5909 const auto &LivenessAA = A.getAAFor<AAIsDead>( 5910 *this, IRPosition::function(*IRP.getAssociatedFunction()), 5911 /* TrackDependence */ false); 5912 5913 // Visit and expand uses until all are analyzed or a fixpoint is reached. 5914 for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) { 5915 const Use *U = Uses[i]; 5916 Instruction *UserI = cast<Instruction>(U->getUser()); 5917 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI 5918 << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA)) 5919 << "]\n"); 5920 if (A.isAssumedDead(*U, this, &LivenessAA)) 5921 continue; 5922 5923 // Droppable users, e.g., llvm::assume does not actually perform any action. 5924 if (UserI->isDroppable()) 5925 continue; 5926 5927 // Check if the users of UserI should also be visited. 5928 if (followUsersOfUseIn(A, U, UserI)) 5929 for (const Use &UserIUse : UserI->uses()) 5930 Uses.insert(&UserIUse); 5931 5932 // If UserI might touch memory we analyze the use in detail. 5933 if (UserI->mayReadOrWriteMemory()) 5934 analyzeUseIn(A, U, UserI); 5935 } 5936 5937 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 5938 : ChangeStatus::UNCHANGED; 5939 } 5940 5941 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U, 5942 const Instruction *UserI) { 5943 // The loaded value is unrelated to the pointer argument, no need to 5944 // follow the users of the load. 5945 if (isa<LoadInst>(UserI)) 5946 return false; 5947 5948 // By default we follow all uses assuming UserI might leak information on U, 5949 // we have special handling for call sites operands though. 5950 const auto *CB = dyn_cast<CallBase>(UserI); 5951 if (!CB || !CB->isArgOperand(U)) 5952 return true; 5953 5954 // If the use is a call argument known not to be captured, the users of 5955 // the call do not need to be visited because they have to be unrelated to 5956 // the input. Note that this check is not trivial even though we disallow 5957 // general capturing of the underlying argument. The reason is that the 5958 // call might the argument "through return", which we allow and for which we 5959 // need to check call users. 5960 if (U->get()->getType()->isPointerTy()) { 5961 unsigned ArgNo = CB->getArgOperandNo(U); 5962 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 5963 *this, IRPosition::callsite_argument(*CB, ArgNo), 5964 /* TrackDependence */ true, DepClassTy::OPTIONAL); 5965 return !ArgNoCaptureAA.isAssumedNoCapture(); 5966 } 5967 5968 return true; 5969 } 5970 5971 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U, 5972 const Instruction *UserI) { 5973 assert(UserI->mayReadOrWriteMemory()); 5974 5975 switch (UserI->getOpcode()) { 5976 default: 5977 // TODO: Handle all atomics and other side-effect operations we know of. 5978 break; 5979 case Instruction::Load: 5980 // Loads cause the NO_READS property to disappear. 5981 removeAssumedBits(NO_READS); 5982 return; 5983 5984 case Instruction::Store: 5985 // Stores cause the NO_WRITES property to disappear if the use is the 5986 // pointer operand. Note that we do assume that capturing was taken care of 5987 // somewhere else. 5988 if (cast<StoreInst>(UserI)->getPointerOperand() == U->get()) 5989 removeAssumedBits(NO_WRITES); 5990 return; 5991 5992 case Instruction::Call: 5993 case Instruction::CallBr: 5994 case Instruction::Invoke: { 5995 // For call sites we look at the argument memory behavior attribute (this 5996 // could be recursive!) in order to restrict our own state. 5997 const auto *CB = cast<CallBase>(UserI); 5998 5999 // Give up on operand bundles. 6000 if (CB->isBundleOperand(U)) { 6001 indicatePessimisticFixpoint(); 6002 return; 6003 } 6004 6005 // Calling a function does read the function pointer, maybe write it if the 6006 // function is self-modifying. 6007 if (CB->isCallee(U)) { 6008 removeAssumedBits(NO_READS); 6009 break; 6010 } 6011 6012 // Adjust the possible access behavior based on the information on the 6013 // argument. 6014 IRPosition Pos; 6015 if (U->get()->getType()->isPointerTy()) 6016 Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U)); 6017 else 6018 Pos = IRPosition::callsite_function(*CB); 6019 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 6020 *this, Pos, 6021 /* TrackDependence */ true, DepClassTy::OPTIONAL); 6022 // "assumed" has at most the same bits as the MemBehaviorAA assumed 6023 // and at least "known". 6024 intersectAssumedBits(MemBehaviorAA.getAssumed()); 6025 return; 6026 } 6027 }; 6028 6029 // Generally, look at the "may-properties" and adjust the assumed state if we 6030 // did not trigger special handling before. 6031 if (UserI->mayReadFromMemory()) 6032 removeAssumedBits(NO_READS); 6033 if (UserI->mayWriteToMemory()) 6034 removeAssumedBits(NO_WRITES); 6035 } 6036 6037 } // namespace 6038 6039 /// -------------------- Memory Locations Attributes --------------------------- 6040 /// Includes read-none, argmemonly, inaccessiblememonly, 6041 /// inaccessiblememorargmemonly 6042 /// ---------------------------------------------------------------------------- 6043 6044 std::string AAMemoryLocation::getMemoryLocationsAsStr( 6045 AAMemoryLocation::MemoryLocationsKind MLK) { 6046 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) 6047 return "all memory"; 6048 if (MLK == AAMemoryLocation::NO_LOCATIONS) 6049 return "no memory"; 6050 std::string S = "memory:"; 6051 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) 6052 S += "stack,"; 6053 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) 6054 S += "constant,"; 6055 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) 6056 S += "internal global,"; 6057 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) 6058 S += "external global,"; 6059 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) 6060 S += "argument,"; 6061 if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) 6062 S += "inaccessible,"; 6063 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) 6064 S += "malloced,"; 6065 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) 6066 S += "unknown,"; 6067 S.pop_back(); 6068 return S; 6069 } 6070 6071 namespace { 6072 struct AAMemoryLocationImpl : public AAMemoryLocation { 6073 6074 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) 6075 : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { 6076 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 6077 AccessKind2Accesses[u] = nullptr; 6078 } 6079 6080 ~AAMemoryLocationImpl() { 6081 // The AccessSets are allocated via a BumpPtrAllocator, we call 6082 // the destructor manually. 6083 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 6084 if (AccessKind2Accesses[u]) 6085 AccessKind2Accesses[u]->~AccessSet(); 6086 } 6087 6088 /// See AbstractAttribute::initialize(...). 6089 void initialize(Attributor &A) override { 6090 intersectAssumedBits(BEST_STATE); 6091 getKnownStateFromValue(A, getIRPosition(), getState()); 6092 IRAttribute::initialize(A); 6093 } 6094 6095 /// Return the memory behavior information encoded in the IR for \p IRP. 6096 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, 6097 BitIntegerState &State, 6098 bool IgnoreSubsumingPositions = false) { 6099 // For internal functions we ignore `argmemonly` and 6100 // `inaccessiblememorargmemonly` as we might break it via interprocedural 6101 // constant propagation. It is unclear if this is the best way but it is 6102 // unlikely this will cause real performance problems. If we are deriving 6103 // attributes for the anchor function we even remove the attribute in 6104 // addition to ignoring it. 6105 bool UseArgMemOnly = true; 6106 Function *AnchorFn = IRP.getAnchorScope(); 6107 if (AnchorFn && A.isRunOn(*AnchorFn)) 6108 UseArgMemOnly = !AnchorFn->hasLocalLinkage(); 6109 6110 SmallVector<Attribute, 2> Attrs; 6111 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 6112 for (const Attribute &Attr : Attrs) { 6113 switch (Attr.getKindAsEnum()) { 6114 case Attribute::ReadNone: 6115 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM); 6116 break; 6117 case Attribute::InaccessibleMemOnly: 6118 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 6119 break; 6120 case Attribute::ArgMemOnly: 6121 if (UseArgMemOnly) 6122 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true)); 6123 else 6124 IRP.removeAttrs({Attribute::ArgMemOnly}); 6125 break; 6126 case Attribute::InaccessibleMemOrArgMemOnly: 6127 if (UseArgMemOnly) 6128 State.addKnownBits(inverseLocation( 6129 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 6130 else 6131 IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly}); 6132 break; 6133 default: 6134 llvm_unreachable("Unexpected attribute!"); 6135 } 6136 } 6137 } 6138 6139 /// See AbstractAttribute::getDeducedAttributes(...). 6140 void getDeducedAttributes(LLVMContext &Ctx, 6141 SmallVectorImpl<Attribute> &Attrs) const override { 6142 assert(Attrs.size() == 0); 6143 if (isAssumedReadNone()) { 6144 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 6145 } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { 6146 if (isAssumedInaccessibleMemOnly()) 6147 Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly)); 6148 else if (isAssumedArgMemOnly()) 6149 Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly)); 6150 else if (isAssumedInaccessibleOrArgMemOnly()) 6151 Attrs.push_back( 6152 Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly)); 6153 } 6154 assert(Attrs.size() <= 1); 6155 } 6156 6157 /// See AbstractAttribute::manifest(...). 6158 ChangeStatus manifest(Attributor &A) override { 6159 const IRPosition &IRP = getIRPosition(); 6160 6161 // Check if we would improve the existing attributes first. 6162 SmallVector<Attribute, 4> DeducedAttrs; 6163 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 6164 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 6165 return IRP.hasAttr(Attr.getKindAsEnum(), 6166 /* IgnoreSubsumingPositions */ true); 6167 })) 6168 return ChangeStatus::UNCHANGED; 6169 6170 // Clear existing attributes. 6171 IRP.removeAttrs(AttrKinds); 6172 if (isAssumedReadNone()) 6173 IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds); 6174 6175 // Use the generic manifest method. 6176 return IRAttribute::manifest(A); 6177 } 6178 6179 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). 6180 bool checkForAllAccessesToMemoryKind( 6181 function_ref<bool(const Instruction *, const Value *, AccessKind, 6182 MemoryLocationsKind)> 6183 Pred, 6184 MemoryLocationsKind RequestedMLK) const override { 6185 if (!isValidState()) 6186 return false; 6187 6188 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); 6189 if (AssumedMLK == NO_LOCATIONS) 6190 return true; 6191 6192 unsigned Idx = 0; 6193 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; 6194 CurMLK *= 2, ++Idx) { 6195 if (CurMLK & RequestedMLK) 6196 continue; 6197 6198 if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) 6199 for (const AccessInfo &AI : *Accesses) 6200 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) 6201 return false; 6202 } 6203 6204 return true; 6205 } 6206 6207 ChangeStatus indicatePessimisticFixpoint() override { 6208 // If we give up and indicate a pessimistic fixpoint this instruction will 6209 // become an access for all potential access kinds: 6210 // TODO: Add pointers for argmemonly and globals to improve the results of 6211 // checkForAllAccessesToMemoryKind. 6212 bool Changed = false; 6213 MemoryLocationsKind KnownMLK = getKnown(); 6214 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 6215 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) 6216 if (!(CurMLK & KnownMLK)) 6217 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed, 6218 getAccessKindFromInst(I)); 6219 return AAMemoryLocation::indicatePessimisticFixpoint(); 6220 } 6221 6222 protected: 6223 /// Helper struct to tie together an instruction that has a read or write 6224 /// effect with the pointer it accesses (if any). 6225 struct AccessInfo { 6226 6227 /// The instruction that caused the access. 6228 const Instruction *I; 6229 6230 /// The base pointer that is accessed, or null if unknown. 6231 const Value *Ptr; 6232 6233 /// The kind of access (read/write/read+write). 6234 AccessKind Kind; 6235 6236 bool operator==(const AccessInfo &RHS) const { 6237 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; 6238 } 6239 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { 6240 if (LHS.I != RHS.I) 6241 return LHS.I < RHS.I; 6242 if (LHS.Ptr != RHS.Ptr) 6243 return LHS.Ptr < RHS.Ptr; 6244 if (LHS.Kind != RHS.Kind) 6245 return LHS.Kind < RHS.Kind; 6246 return false; 6247 } 6248 }; 6249 6250 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the 6251 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. 6252 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; 6253 AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()]; 6254 6255 /// Return the kind(s) of location that may be accessed by \p V. 6256 AAMemoryLocation::MemoryLocationsKind 6257 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); 6258 6259 /// Return the access kind as determined by \p I. 6260 AccessKind getAccessKindFromInst(const Instruction *I) { 6261 AccessKind AK = READ_WRITE; 6262 if (I) { 6263 AK = I->mayReadFromMemory() ? READ : NONE; 6264 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); 6265 } 6266 return AK; 6267 } 6268 6269 /// Update the state \p State and the AccessKind2Accesses given that \p I is 6270 /// an access of kind \p AK to a \p MLK memory location with the access 6271 /// pointer \p Ptr. 6272 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, 6273 MemoryLocationsKind MLK, const Instruction *I, 6274 const Value *Ptr, bool &Changed, 6275 AccessKind AK = READ_WRITE) { 6276 6277 assert(isPowerOf2_32(MLK) && "Expected a single location set!"); 6278 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)]; 6279 if (!Accesses) 6280 Accesses = new (Allocator) AccessSet(); 6281 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second; 6282 State.removeAssumedBits(MLK); 6283 } 6284 6285 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or 6286 /// arguments, and update the state and access map accordingly. 6287 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, 6288 AAMemoryLocation::StateType &State, bool &Changed); 6289 6290 /// Used to allocate access sets. 6291 BumpPtrAllocator &Allocator; 6292 6293 /// The set of IR attributes AAMemoryLocation deals with. 6294 static const Attribute::AttrKind AttrKinds[4]; 6295 }; 6296 6297 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = { 6298 Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly, 6299 Attribute::InaccessibleMemOrArgMemOnly}; 6300 6301 void AAMemoryLocationImpl::categorizePtrValue( 6302 Attributor &A, const Instruction &I, const Value &Ptr, 6303 AAMemoryLocation::StateType &State, bool &Changed) { 6304 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for " 6305 << Ptr << " [" 6306 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n"); 6307 6308 auto StripGEPCB = [](Value *V) -> Value * { 6309 auto *GEP = dyn_cast<GEPOperator>(V); 6310 while (GEP) { 6311 V = GEP->getPointerOperand(); 6312 GEP = dyn_cast<GEPOperator>(V); 6313 } 6314 return V; 6315 }; 6316 6317 auto VisitValueCB = [&](Value &V, const Instruction *, 6318 AAMemoryLocation::StateType &T, 6319 bool Stripped) -> bool { 6320 MemoryLocationsKind MLK = NO_LOCATIONS; 6321 assert(!isa<GEPOperator>(V) && "GEPs should have been stripped."); 6322 if (isa<UndefValue>(V)) 6323 return true; 6324 if (auto *Arg = dyn_cast<Argument>(&V)) { 6325 if (Arg->hasByValAttr()) 6326 MLK = NO_LOCAL_MEM; 6327 else 6328 MLK = NO_ARGUMENT_MEM; 6329 } else if (auto *GV = dyn_cast<GlobalValue>(&V)) { 6330 if (GV->hasLocalLinkage()) 6331 MLK = NO_GLOBAL_INTERNAL_MEM; 6332 else 6333 MLK = NO_GLOBAL_EXTERNAL_MEM; 6334 } else if (isa<ConstantPointerNull>(V) && 6335 !NullPointerIsDefined(getAssociatedFunction(), 6336 V.getType()->getPointerAddressSpace())) { 6337 return true; 6338 } else if (isa<AllocaInst>(V)) { 6339 MLK = NO_LOCAL_MEM; 6340 } else if (const auto *CB = dyn_cast<CallBase>(&V)) { 6341 const auto &NoAliasAA = 6342 A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB)); 6343 if (NoAliasAA.isAssumedNoAlias()) 6344 MLK = NO_MALLOCED_MEM; 6345 else 6346 MLK = NO_UNKOWN_MEM; 6347 } else { 6348 MLK = NO_UNKOWN_MEM; 6349 } 6350 6351 assert(MLK != NO_LOCATIONS && "No location specified!"); 6352 updateStateAndAccessesMap(T, MLK, &I, &V, Changed, 6353 getAccessKindFromInst(&I)); 6354 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: " 6355 << V << " -> " << getMemoryLocationsAsStr(T.getAssumed()) 6356 << "\n"); 6357 return true; 6358 }; 6359 6360 if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>( 6361 A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(), 6362 /* UseValueSimplify */ true, 6363 /* MaxValues */ 32, StripGEPCB)) { 6364 LLVM_DEBUG( 6365 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"); 6366 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed, 6367 getAccessKindFromInst(&I)); 6368 } else { 6369 LLVM_DEBUG( 6370 dbgs() 6371 << "[AAMemoryLocation] Accessed locations with pointer locations: " 6372 << getMemoryLocationsAsStr(State.getAssumed()) << "\n"); 6373 } 6374 } 6375 6376 AAMemoryLocation::MemoryLocationsKind 6377 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, 6378 bool &Changed) { 6379 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for " 6380 << I << "\n"); 6381 6382 AAMemoryLocation::StateType AccessedLocs; 6383 AccessedLocs.intersectAssumedBits(NO_LOCATIONS); 6384 6385 if (auto *CB = dyn_cast<CallBase>(&I)) { 6386 6387 // First check if we assume any memory is access is visible. 6388 const auto &CBMemLocationAA = 6389 A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB)); 6390 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I 6391 << " [" << CBMemLocationAA << "]\n"); 6392 6393 if (CBMemLocationAA.isAssumedReadNone()) 6394 return NO_LOCATIONS; 6395 6396 if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) { 6397 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr, 6398 Changed, getAccessKindFromInst(&I)); 6399 return AccessedLocs.getAssumed(); 6400 } 6401 6402 uint32_t CBAssumedNotAccessedLocs = 6403 CBMemLocationAA.getAssumedNotAccessedLocation(); 6404 6405 // Set the argmemonly and global bit as we handle them separately below. 6406 uint32_t CBAssumedNotAccessedLocsNoArgMem = 6407 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; 6408 6409 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { 6410 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) 6411 continue; 6412 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed, 6413 getAccessKindFromInst(&I)); 6414 } 6415 6416 // Now handle global memory if it might be accessed. This is slightly tricky 6417 // as NO_GLOBAL_MEM has multiple bits set. 6418 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); 6419 if (HasGlobalAccesses) { 6420 auto AccessPred = [&](const Instruction *, const Value *Ptr, 6421 AccessKind Kind, MemoryLocationsKind MLK) { 6422 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed, 6423 getAccessKindFromInst(&I)); 6424 return true; 6425 }; 6426 if (!CBMemLocationAA.checkForAllAccessesToMemoryKind( 6427 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false))) 6428 return AccessedLocs.getWorstState(); 6429 } 6430 6431 LLVM_DEBUG( 6432 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " 6433 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 6434 6435 // Now handle argument memory if it might be accessed. 6436 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); 6437 if (HasArgAccesses) { 6438 for (unsigned ArgNo = 0, E = CB->getNumArgOperands(); ArgNo < E; 6439 ++ArgNo) { 6440 6441 // Skip non-pointer arguments. 6442 const Value *ArgOp = CB->getArgOperand(ArgNo); 6443 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 6444 continue; 6445 6446 // Skip readnone arguments. 6447 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(*CB, ArgNo); 6448 const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>( 6449 *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL); 6450 6451 if (ArgOpMemLocationAA.isAssumedReadNone()) 6452 continue; 6453 6454 // Categorize potentially accessed pointer arguments as if there was an 6455 // access instruction with them as pointer. 6456 categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed); 6457 } 6458 } 6459 6460 LLVM_DEBUG( 6461 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " 6462 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 6463 6464 return AccessedLocs.getAssumed(); 6465 } 6466 6467 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) { 6468 LLVM_DEBUG( 6469 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " 6470 << I << " [" << *Ptr << "]\n"); 6471 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed); 6472 return AccessedLocs.getAssumed(); 6473 } 6474 6475 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " 6476 << I << "\n"); 6477 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed, 6478 getAccessKindFromInst(&I)); 6479 return AccessedLocs.getAssumed(); 6480 } 6481 6482 /// An AA to represent the memory behavior function attributes. 6483 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { 6484 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) 6485 : AAMemoryLocationImpl(IRP, A) {} 6486 6487 /// See AbstractAttribute::updateImpl(Attributor &A). 6488 virtual ChangeStatus updateImpl(Attributor &A) override { 6489 6490 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 6491 *this, getIRPosition(), /* TrackDependence */ false); 6492 if (MemBehaviorAA.isAssumedReadNone()) { 6493 if (MemBehaviorAA.isKnownReadNone()) 6494 return indicateOptimisticFixpoint(); 6495 assert(isAssumedReadNone() && 6496 "AAMemoryLocation was not read-none but AAMemoryBehavior was!"); 6497 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 6498 return ChangeStatus::UNCHANGED; 6499 } 6500 6501 // The current assumed state used to determine a change. 6502 auto AssumedState = getAssumed(); 6503 bool Changed = false; 6504 6505 auto CheckRWInst = [&](Instruction &I) { 6506 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); 6507 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I 6508 << ": " << getMemoryLocationsAsStr(MLK) << "\n"); 6509 removeAssumedBits(inverseLocation(MLK, false, false)); 6510 return true; 6511 }; 6512 6513 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this)) 6514 return indicatePessimisticFixpoint(); 6515 6516 Changed |= AssumedState != getAssumed(); 6517 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 6518 } 6519 6520 /// See AbstractAttribute::trackStatistics() 6521 void trackStatistics() const override { 6522 if (isAssumedReadNone()) 6523 STATS_DECLTRACK_FN_ATTR(readnone) 6524 else if (isAssumedArgMemOnly()) 6525 STATS_DECLTRACK_FN_ATTR(argmemonly) 6526 else if (isAssumedInaccessibleMemOnly()) 6527 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly) 6528 else if (isAssumedInaccessibleOrArgMemOnly()) 6529 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly) 6530 } 6531 }; 6532 6533 /// AAMemoryLocation attribute for call sites. 6534 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { 6535 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) 6536 : AAMemoryLocationImpl(IRP, A) {} 6537 6538 /// See AbstractAttribute::initialize(...). 6539 void initialize(Attributor &A) override { 6540 AAMemoryLocationImpl::initialize(A); 6541 Function *F = getAssociatedFunction(); 6542 if (!F || !A.isFunctionIPOAmendable(*F)) { 6543 indicatePessimisticFixpoint(); 6544 return; 6545 } 6546 } 6547 6548 /// See AbstractAttribute::updateImpl(...). 6549 ChangeStatus updateImpl(Attributor &A) override { 6550 // TODO: Once we have call site specific value information we can provide 6551 // call site specific liveness liveness information and then it makes 6552 // sense to specialize attributes for call sites arguments instead of 6553 // redirecting requests to the callee argument. 6554 Function *F = getAssociatedFunction(); 6555 const IRPosition &FnPos = IRPosition::function(*F); 6556 auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos); 6557 bool Changed = false; 6558 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 6559 AccessKind Kind, MemoryLocationsKind MLK) { 6560 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed, 6561 getAccessKindFromInst(I)); 6562 return true; 6563 }; 6564 if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS)) 6565 return indicatePessimisticFixpoint(); 6566 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 6567 } 6568 6569 /// See AbstractAttribute::trackStatistics() 6570 void trackStatistics() const override { 6571 if (isAssumedReadNone()) 6572 STATS_DECLTRACK_CS_ATTR(readnone) 6573 } 6574 }; 6575 6576 /// ------------------ Value Constant Range Attribute ------------------------- 6577 6578 struct AAValueConstantRangeImpl : AAValueConstantRange { 6579 using StateType = IntegerRangeState; 6580 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) 6581 : AAValueConstantRange(IRP, A) {} 6582 6583 /// See AbstractAttribute::getAsStr(). 6584 const std::string getAsStr() const override { 6585 std::string Str; 6586 llvm::raw_string_ostream OS(Str); 6587 OS << "range(" << getBitWidth() << ")<"; 6588 getKnown().print(OS); 6589 OS << " / "; 6590 getAssumed().print(OS); 6591 OS << ">"; 6592 return OS.str(); 6593 } 6594 6595 /// Helper function to get a SCEV expr for the associated value at program 6596 /// point \p I. 6597 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { 6598 if (!getAnchorScope()) 6599 return nullptr; 6600 6601 ScalarEvolution *SE = 6602 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 6603 *getAnchorScope()); 6604 6605 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( 6606 *getAnchorScope()); 6607 6608 if (!SE || !LI) 6609 return nullptr; 6610 6611 const SCEV *S = SE->getSCEV(&getAssociatedValue()); 6612 if (!I) 6613 return S; 6614 6615 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent())); 6616 } 6617 6618 /// Helper function to get a range from SCEV for the associated value at 6619 /// program point \p I. 6620 ConstantRange getConstantRangeFromSCEV(Attributor &A, 6621 const Instruction *I = nullptr) const { 6622 if (!getAnchorScope()) 6623 return getWorstState(getBitWidth()); 6624 6625 ScalarEvolution *SE = 6626 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 6627 *getAnchorScope()); 6628 6629 const SCEV *S = getSCEV(A, I); 6630 if (!SE || !S) 6631 return getWorstState(getBitWidth()); 6632 6633 return SE->getUnsignedRange(S); 6634 } 6635 6636 /// Helper function to get a range from LVI for the associated value at 6637 /// program point \p I. 6638 ConstantRange 6639 getConstantRangeFromLVI(Attributor &A, 6640 const Instruction *CtxI = nullptr) const { 6641 if (!getAnchorScope()) 6642 return getWorstState(getBitWidth()); 6643 6644 LazyValueInfo *LVI = 6645 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( 6646 *getAnchorScope()); 6647 6648 if (!LVI || !CtxI) 6649 return getWorstState(getBitWidth()); 6650 return LVI->getConstantRange(&getAssociatedValue(), 6651 const_cast<BasicBlock *>(CtxI->getParent()), 6652 const_cast<Instruction *>(CtxI)); 6653 } 6654 6655 /// See AAValueConstantRange::getKnownConstantRange(..). 6656 ConstantRange 6657 getKnownConstantRange(Attributor &A, 6658 const Instruction *CtxI = nullptr) const override { 6659 if (!CtxI || CtxI == getCtxI()) 6660 return getKnown(); 6661 6662 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 6663 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 6664 return getKnown().intersectWith(SCEVR).intersectWith(LVIR); 6665 } 6666 6667 /// See AAValueConstantRange::getAssumedConstantRange(..). 6668 ConstantRange 6669 getAssumedConstantRange(Attributor &A, 6670 const Instruction *CtxI = nullptr) const override { 6671 // TODO: Make SCEV use Attributor assumption. 6672 // We may be able to bound a variable range via assumptions in 6673 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to 6674 // evolve to x^2 + x, then we can say that y is in [2, 12]. 6675 6676 if (!CtxI || CtxI == getCtxI()) 6677 return getAssumed(); 6678 6679 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 6680 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 6681 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR); 6682 } 6683 6684 /// See AbstractAttribute::initialize(..). 6685 void initialize(Attributor &A) override { 6686 // Intersect a range given by SCEV. 6687 intersectKnown(getConstantRangeFromSCEV(A, getCtxI())); 6688 6689 // Intersect a range given by LVI. 6690 intersectKnown(getConstantRangeFromLVI(A, getCtxI())); 6691 } 6692 6693 /// Helper function to create MDNode for range metadata. 6694 static MDNode * 6695 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, 6696 const ConstantRange &AssumedConstantRange) { 6697 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get( 6698 Ty, AssumedConstantRange.getLower())), 6699 ConstantAsMetadata::get(ConstantInt::get( 6700 Ty, AssumedConstantRange.getUpper()))}; 6701 return MDNode::get(Ctx, LowAndHigh); 6702 } 6703 6704 /// Return true if \p Assumed is included in \p KnownRanges. 6705 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { 6706 6707 if (Assumed.isFullSet()) 6708 return false; 6709 6710 if (!KnownRanges) 6711 return true; 6712 6713 // If multiple ranges are annotated in IR, we give up to annotate assumed 6714 // range for now. 6715 6716 // TODO: If there exists a known range which containts assumed range, we 6717 // can say assumed range is better. 6718 if (KnownRanges->getNumOperands() > 2) 6719 return false; 6720 6721 ConstantInt *Lower = 6722 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0)); 6723 ConstantInt *Upper = 6724 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1)); 6725 6726 ConstantRange Known(Lower->getValue(), Upper->getValue()); 6727 return Known.contains(Assumed) && Known != Assumed; 6728 } 6729 6730 /// Helper function to set range metadata. 6731 static bool 6732 setRangeMetadataIfisBetterRange(Instruction *I, 6733 const ConstantRange &AssumedConstantRange) { 6734 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range); 6735 if (isBetterRange(AssumedConstantRange, OldRangeMD)) { 6736 if (!AssumedConstantRange.isEmptySet()) { 6737 I->setMetadata(LLVMContext::MD_range, 6738 getMDNodeForConstantRange(I->getType(), I->getContext(), 6739 AssumedConstantRange)); 6740 return true; 6741 } 6742 } 6743 return false; 6744 } 6745 6746 /// See AbstractAttribute::manifest() 6747 ChangeStatus manifest(Attributor &A) override { 6748 ChangeStatus Changed = ChangeStatus::UNCHANGED; 6749 ConstantRange AssumedConstantRange = getAssumedConstantRange(A); 6750 assert(!AssumedConstantRange.isFullSet() && "Invalid state"); 6751 6752 auto &V = getAssociatedValue(); 6753 if (!AssumedConstantRange.isEmptySet() && 6754 !AssumedConstantRange.isSingleElement()) { 6755 if (Instruction *I = dyn_cast<Instruction>(&V)) 6756 if (isa<CallInst>(I) || isa<LoadInst>(I)) 6757 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) 6758 Changed = ChangeStatus::CHANGED; 6759 } 6760 6761 return Changed; 6762 } 6763 }; 6764 6765 struct AAValueConstantRangeArgument final 6766 : AAArgumentFromCallSiteArguments< 6767 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> { 6768 using Base = AAArgumentFromCallSiteArguments< 6769 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>; 6770 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) 6771 : Base(IRP, A) {} 6772 6773 /// See AbstractAttribute::initialize(..). 6774 void initialize(Attributor &A) override { 6775 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 6776 indicatePessimisticFixpoint(); 6777 } else { 6778 Base::initialize(A); 6779 } 6780 } 6781 6782 /// See AbstractAttribute::trackStatistics() 6783 void trackStatistics() const override { 6784 STATS_DECLTRACK_ARG_ATTR(value_range) 6785 } 6786 }; 6787 6788 struct AAValueConstantRangeReturned 6789 : AAReturnedFromReturnedValues<AAValueConstantRange, 6790 AAValueConstantRangeImpl> { 6791 using Base = AAReturnedFromReturnedValues<AAValueConstantRange, 6792 AAValueConstantRangeImpl>; 6793 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) 6794 : Base(IRP, A) {} 6795 6796 /// See AbstractAttribute::initialize(...). 6797 void initialize(Attributor &A) override {} 6798 6799 /// See AbstractAttribute::trackStatistics() 6800 void trackStatistics() const override { 6801 STATS_DECLTRACK_FNRET_ATTR(value_range) 6802 } 6803 }; 6804 6805 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { 6806 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) 6807 : AAValueConstantRangeImpl(IRP, A) {} 6808 6809 /// See AbstractAttribute::initialize(...). 6810 void initialize(Attributor &A) override { 6811 AAValueConstantRangeImpl::initialize(A); 6812 Value &V = getAssociatedValue(); 6813 6814 if (auto *C = dyn_cast<ConstantInt>(&V)) { 6815 unionAssumed(ConstantRange(C->getValue())); 6816 indicateOptimisticFixpoint(); 6817 return; 6818 } 6819 6820 if (isa<UndefValue>(&V)) { 6821 // Collapse the undef state to 0. 6822 unionAssumed(ConstantRange(APInt(getBitWidth(), 0))); 6823 indicateOptimisticFixpoint(); 6824 return; 6825 } 6826 6827 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V)) 6828 return; 6829 // If it is a load instruction with range metadata, use it. 6830 if (LoadInst *LI = dyn_cast<LoadInst>(&V)) 6831 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) { 6832 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 6833 return; 6834 } 6835 6836 // We can work with PHI and select instruction as we traverse their operands 6837 // during update. 6838 if (isa<SelectInst>(V) || isa<PHINode>(V)) 6839 return; 6840 6841 // Otherwise we give up. 6842 indicatePessimisticFixpoint(); 6843 6844 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: " 6845 << getAssociatedValue() << "\n"); 6846 } 6847 6848 bool calculateBinaryOperator( 6849 Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, 6850 const Instruction *CtxI, 6851 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 6852 Value *LHS = BinOp->getOperand(0); 6853 Value *RHS = BinOp->getOperand(1); 6854 // TODO: Allow non integers as well. 6855 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 6856 return false; 6857 6858 auto &LHSAA = 6859 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS)); 6860 QuerriedAAs.push_back(&LHSAA); 6861 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 6862 6863 auto &RHSAA = 6864 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS)); 6865 QuerriedAAs.push_back(&RHSAA); 6866 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 6867 6868 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange); 6869 6870 T.unionAssumed(AssumedRange); 6871 6872 // TODO: Track a known state too. 6873 6874 return T.isValidState(); 6875 } 6876 6877 bool calculateCastInst( 6878 Attributor &A, CastInst *CastI, IntegerRangeState &T, 6879 const Instruction *CtxI, 6880 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 6881 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!"); 6882 // TODO: Allow non integers as well. 6883 Value &OpV = *CastI->getOperand(0); 6884 if (!OpV.getType()->isIntegerTy()) 6885 return false; 6886 6887 auto &OpAA = 6888 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV)); 6889 QuerriedAAs.push_back(&OpAA); 6890 T.unionAssumed( 6891 OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth())); 6892 return T.isValidState(); 6893 } 6894 6895 bool 6896 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, 6897 const Instruction *CtxI, 6898 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 6899 Value *LHS = CmpI->getOperand(0); 6900 Value *RHS = CmpI->getOperand(1); 6901 // TODO: Allow non integers as well. 6902 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 6903 return false; 6904 6905 auto &LHSAA = 6906 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS)); 6907 QuerriedAAs.push_back(&LHSAA); 6908 auto &RHSAA = 6909 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS)); 6910 QuerriedAAs.push_back(&RHSAA); 6911 6912 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 6913 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 6914 6915 // If one of them is empty set, we can't decide. 6916 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) 6917 return true; 6918 6919 bool MustTrue = false, MustFalse = false; 6920 6921 auto AllowedRegion = 6922 ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange); 6923 6924 auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion( 6925 CmpI->getPredicate(), RHSAARange); 6926 6927 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet()) 6928 MustFalse = true; 6929 6930 if (SatisfyingRegion.contains(LHSAARange)) 6931 MustTrue = true; 6932 6933 assert((!MustTrue || !MustFalse) && 6934 "Either MustTrue or MustFalse should be false!"); 6935 6936 if (MustTrue) 6937 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); 6938 else if (MustFalse) 6939 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); 6940 else 6941 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); 6942 6943 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA 6944 << " " << RHSAA << "\n"); 6945 6946 // TODO: Track a known state too. 6947 return T.isValidState(); 6948 } 6949 6950 /// See AbstractAttribute::updateImpl(...). 6951 ChangeStatus updateImpl(Attributor &A) override { 6952 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 6953 IntegerRangeState &T, bool Stripped) -> bool { 6954 Instruction *I = dyn_cast<Instruction>(&V); 6955 if (!I || isa<CallBase>(I)) { 6956 6957 // If the value is not instruction, we query AA to Attributor. 6958 const auto &AA = 6959 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V)); 6960 6961 // Clamp operator is not used to utilize a program point CtxI. 6962 T.unionAssumed(AA.getAssumedConstantRange(A, CtxI)); 6963 6964 return T.isValidState(); 6965 } 6966 6967 SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; 6968 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) { 6969 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) 6970 return false; 6971 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) { 6972 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) 6973 return false; 6974 } else if (auto *CastI = dyn_cast<CastInst>(I)) { 6975 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) 6976 return false; 6977 } else { 6978 // Give up with other instructions. 6979 // TODO: Add other instructions 6980 6981 T.indicatePessimisticFixpoint(); 6982 return false; 6983 } 6984 6985 // Catch circular reasoning in a pessimistic way for now. 6986 // TODO: Check how the range evolves and if we stripped anything, see also 6987 // AADereferenceable or AAAlign for similar situations. 6988 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { 6989 if (QueriedAA != this) 6990 continue; 6991 // If we are in a stady state we do not need to worry. 6992 if (T.getAssumed() == getState().getAssumed()) 6993 continue; 6994 T.indicatePessimisticFixpoint(); 6995 } 6996 6997 return T.isValidState(); 6998 }; 6999 7000 IntegerRangeState T(getBitWidth()); 7001 7002 if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>( 7003 A, getIRPosition(), *this, T, VisitValueCB, getCtxI(), 7004 /* UseValueSimplify */ false)) 7005 return indicatePessimisticFixpoint(); 7006 7007 return clampStateAndIndicateChange(getState(), T); 7008 } 7009 7010 /// See AbstractAttribute::trackStatistics() 7011 void trackStatistics() const override { 7012 STATS_DECLTRACK_FLOATING_ATTR(value_range) 7013 } 7014 }; 7015 7016 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { 7017 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) 7018 : AAValueConstantRangeImpl(IRP, A) {} 7019 7020 /// See AbstractAttribute::initialize(...). 7021 ChangeStatus updateImpl(Attributor &A) override { 7022 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will " 7023 "not be called"); 7024 } 7025 7026 /// See AbstractAttribute::trackStatistics() 7027 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) } 7028 }; 7029 7030 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { 7031 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) 7032 : AAValueConstantRangeFunction(IRP, A) {} 7033 7034 /// See AbstractAttribute::trackStatistics() 7035 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) } 7036 }; 7037 7038 struct AAValueConstantRangeCallSiteReturned 7039 : AACallSiteReturnedFromReturned<AAValueConstantRange, 7040 AAValueConstantRangeImpl> { 7041 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) 7042 : AACallSiteReturnedFromReturned<AAValueConstantRange, 7043 AAValueConstantRangeImpl>(IRP, A) {} 7044 7045 /// See AbstractAttribute::initialize(...). 7046 void initialize(Attributor &A) override { 7047 // If it is a load instruction with range metadata, use the metadata. 7048 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue())) 7049 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range)) 7050 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 7051 7052 AAValueConstantRangeImpl::initialize(A); 7053 } 7054 7055 /// See AbstractAttribute::trackStatistics() 7056 void trackStatistics() const override { 7057 STATS_DECLTRACK_CSRET_ATTR(value_range) 7058 } 7059 }; 7060 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { 7061 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) 7062 : AAValueConstantRangeFloating(IRP, A) {} 7063 7064 /// See AbstractAttribute::trackStatistics() 7065 void trackStatistics() const override { 7066 STATS_DECLTRACK_CSARG_ATTR(value_range) 7067 } 7068 }; 7069 } // namespace 7070 7071 const char AAReturnedValues::ID = 0; 7072 const char AANoUnwind::ID = 0; 7073 const char AANoSync::ID = 0; 7074 const char AANoFree::ID = 0; 7075 const char AANonNull::ID = 0; 7076 const char AANoRecurse::ID = 0; 7077 const char AAWillReturn::ID = 0; 7078 const char AAUndefinedBehavior::ID = 0; 7079 const char AANoAlias::ID = 0; 7080 const char AAReachability::ID = 0; 7081 const char AANoReturn::ID = 0; 7082 const char AAIsDead::ID = 0; 7083 const char AADereferenceable::ID = 0; 7084 const char AAAlign::ID = 0; 7085 const char AANoCapture::ID = 0; 7086 const char AAValueSimplify::ID = 0; 7087 const char AAHeapToStack::ID = 0; 7088 const char AAPrivatizablePtr::ID = 0; 7089 const char AAMemoryBehavior::ID = 0; 7090 const char AAMemoryLocation::ID = 0; 7091 const char AAValueConstantRange::ID = 0; 7092 7093 // Macro magic to create the static generator function for attributes that 7094 // follow the naming scheme. 7095 7096 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ 7097 case IRPosition::PK: \ 7098 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); 7099 7100 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ 7101 case IRPosition::PK: \ 7102 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ 7103 ++NumAAs; \ 7104 break; 7105 7106 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7107 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7108 CLASS *AA = nullptr; \ 7109 switch (IRP.getPositionKind()) { \ 7110 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7111 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 7112 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 7113 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 7114 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 7115 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 7116 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7117 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 7118 } \ 7119 return *AA; \ 7120 } 7121 7122 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7123 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7124 CLASS *AA = nullptr; \ 7125 switch (IRP.getPositionKind()) { \ 7126 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7127 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ 7128 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 7129 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 7130 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 7131 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 7132 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 7133 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 7134 } \ 7135 return *AA; \ 7136 } 7137 7138 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7139 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7140 CLASS *AA = nullptr; \ 7141 switch (IRP.getPositionKind()) { \ 7142 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7143 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7144 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 7145 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 7146 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 7147 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 7148 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 7149 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 7150 } \ 7151 return *AA; \ 7152 } 7153 7154 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7155 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7156 CLASS *AA = nullptr; \ 7157 switch (IRP.getPositionKind()) { \ 7158 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7159 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 7160 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 7161 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 7162 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 7163 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 7164 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 7165 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7166 } \ 7167 return *AA; \ 7168 } 7169 7170 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7171 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7172 CLASS *AA = nullptr; \ 7173 switch (IRP.getPositionKind()) { \ 7174 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7175 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 7176 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7177 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 7178 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 7179 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 7180 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 7181 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 7182 } \ 7183 return *AA; \ 7184 } 7185 7186 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) 7187 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) 7188 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) 7189 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) 7190 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) 7191 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) 7192 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) 7193 7194 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) 7195 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) 7196 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) 7197 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) 7198 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) 7199 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) 7200 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) 7201 7202 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) 7203 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) 7204 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) 7205 7206 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) 7207 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability) 7208 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) 7209 7210 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) 7211 7212 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION 7213 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION 7214 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION 7215 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION 7216 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION 7217 #undef SWITCH_PK_CREATE 7218 #undef SWITCH_PK_INV 7219