1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // See the Attributor.h file comment and the class descriptions in that file for 10 // more information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/IPO/Attributor.h" 15 16 #include "llvm/ADT/SCCIterator.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/AssumeBundleQueries.h" 20 #include "llvm/Analysis/AssumptionCache.h" 21 #include "llvm/Analysis/CaptureTracking.h" 22 #include "llvm/Analysis/LazyValueInfo.h" 23 #include "llvm/Analysis/MemoryBuiltins.h" 24 #include "llvm/Analysis/ScalarEvolution.h" 25 #include "llvm/Analysis/TargetTransformInfo.h" 26 #include "llvm/Analysis/ValueTracking.h" 27 #include "llvm/IR/IRBuilder.h" 28 #include "llvm/IR/IntrinsicInst.h" 29 #include "llvm/IR/NoFolder.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Transforms/IPO/ArgumentPromotion.h" 32 #include "llvm/Transforms/Utils/Local.h" 33 34 #include <cassert> 35 36 using namespace llvm; 37 38 #define DEBUG_TYPE "attributor" 39 40 static cl::opt<bool> ManifestInternal( 41 "attributor-manifest-internal", cl::Hidden, 42 cl::desc("Manifest Attributor internal string attributes."), 43 cl::init(false)); 44 45 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), 46 cl::Hidden); 47 48 STATISTIC(NumAAs, "Number of abstract attributes created"); 49 50 // Some helper macros to deal with statistics tracking. 51 // 52 // Usage: 53 // For simple IR attribute tracking overload trackStatistics in the abstract 54 // attribute and choose the right STATS_DECLTRACK_********* macro, 55 // e.g.,: 56 // void trackStatistics() const override { 57 // STATS_DECLTRACK_ARG_ATTR(returned) 58 // } 59 // If there is a single "increment" side one can use the macro 60 // STATS_DECLTRACK with a custom message. If there are multiple increment 61 // sides, STATS_DECL and STATS_TRACK can also be used separately. 62 // 63 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ 64 ("Number of " #TYPE " marked '" #NAME "'") 65 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME 66 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); 67 #define STATS_DECL(NAME, TYPE, MSG) \ 68 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); 69 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); 70 #define STATS_DECLTRACK(NAME, TYPE, MSG) \ 71 { \ 72 STATS_DECL(NAME, TYPE, MSG) \ 73 STATS_TRACK(NAME, TYPE) \ 74 } 75 #define STATS_DECLTRACK_ARG_ATTR(NAME) \ 76 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) 77 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ 78 STATS_DECLTRACK(NAME, CSArguments, \ 79 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) 80 #define STATS_DECLTRACK_FN_ATTR(NAME) \ 81 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) 82 #define STATS_DECLTRACK_CS_ATTR(NAME) \ 83 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) 84 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ 85 STATS_DECLTRACK(NAME, FunctionReturn, \ 86 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) 87 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ 88 STATS_DECLTRACK(NAME, CSReturn, \ 89 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) 90 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ 91 STATS_DECLTRACK(NAME, Floating, \ 92 ("Number of floating values known to be '" #NAME "'")) 93 94 // Specialization of the operator<< for abstract attributes subclasses. This 95 // disambiguates situations where multiple operators are applicable. 96 namespace llvm { 97 #define PIPE_OPERATOR(CLASS) \ 98 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ 99 return OS << static_cast<const AbstractAttribute &>(AA); \ 100 } 101 102 PIPE_OPERATOR(AAIsDead) 103 PIPE_OPERATOR(AANoUnwind) 104 PIPE_OPERATOR(AANoSync) 105 PIPE_OPERATOR(AANoRecurse) 106 PIPE_OPERATOR(AAWillReturn) 107 PIPE_OPERATOR(AANoReturn) 108 PIPE_OPERATOR(AAReturnedValues) 109 PIPE_OPERATOR(AANonNull) 110 PIPE_OPERATOR(AANoAlias) 111 PIPE_OPERATOR(AADereferenceable) 112 PIPE_OPERATOR(AAAlign) 113 PIPE_OPERATOR(AANoCapture) 114 PIPE_OPERATOR(AAValueSimplify) 115 PIPE_OPERATOR(AANoFree) 116 PIPE_OPERATOR(AAHeapToStack) 117 PIPE_OPERATOR(AAReachability) 118 PIPE_OPERATOR(AAMemoryBehavior) 119 PIPE_OPERATOR(AAMemoryLocation) 120 PIPE_OPERATOR(AAValueConstantRange) 121 PIPE_OPERATOR(AAPrivatizablePtr) 122 PIPE_OPERATOR(AAUndefinedBehavior) 123 124 #undef PIPE_OPERATOR 125 } // namespace llvm 126 127 namespace { 128 129 static Optional<ConstantInt *> 130 getAssumedConstantInt(Attributor &A, const Value &V, 131 const AbstractAttribute &AA, 132 bool &UsedAssumedInformation) { 133 Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation); 134 if (C.hasValue()) 135 return dyn_cast_or_null<ConstantInt>(C.getValue()); 136 return llvm::None; 137 } 138 139 /// Get pointer operand of memory accessing instruction. If \p I is 140 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, 141 /// is set to false and the instruction is volatile, return nullptr. 142 static const Value *getPointerOperand(const Instruction *I, 143 bool AllowVolatile) { 144 if (auto *LI = dyn_cast<LoadInst>(I)) { 145 if (!AllowVolatile && LI->isVolatile()) 146 return nullptr; 147 return LI->getPointerOperand(); 148 } 149 150 if (auto *SI = dyn_cast<StoreInst>(I)) { 151 if (!AllowVolatile && SI->isVolatile()) 152 return nullptr; 153 return SI->getPointerOperand(); 154 } 155 156 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { 157 if (!AllowVolatile && CXI->isVolatile()) 158 return nullptr; 159 return CXI->getPointerOperand(); 160 } 161 162 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { 163 if (!AllowVolatile && RMWI->isVolatile()) 164 return nullptr; 165 return RMWI->getPointerOperand(); 166 } 167 168 return nullptr; 169 } 170 171 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and 172 /// advanced by \p Offset bytes. To aid later analysis the method tries to build 173 /// getelement pointer instructions that traverse the natural type of \p Ptr if 174 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence 175 /// through a cast to i8*. 176 /// 177 /// TODO: This could probably live somewhere more prominantly if it doesn't 178 /// already exist. 179 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset, 180 IRBuilder<NoFolder> &IRB, const DataLayout &DL) { 181 assert(Offset >= 0 && "Negative offset not supported yet!"); 182 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset 183 << "-bytes as " << *ResTy << "\n"); 184 185 // The initial type we are trying to traverse to get nice GEPs. 186 Type *Ty = Ptr->getType(); 187 188 SmallVector<Value *, 4> Indices; 189 std::string GEPName = Ptr->getName().str(); 190 while (Offset) { 191 uint64_t Idx, Rem; 192 193 if (auto *STy = dyn_cast<StructType>(Ty)) { 194 const StructLayout *SL = DL.getStructLayout(STy); 195 if (int64_t(SL->getSizeInBytes()) < Offset) 196 break; 197 Idx = SL->getElementContainingOffset(Offset); 198 assert(Idx < STy->getNumElements() && "Offset calculation error!"); 199 Rem = Offset - SL->getElementOffset(Idx); 200 Ty = STy->getElementType(Idx); 201 } else if (auto *PTy = dyn_cast<PointerType>(Ty)) { 202 Ty = PTy->getElementType(); 203 if (!Ty->isSized()) 204 break; 205 uint64_t ElementSize = DL.getTypeAllocSize(Ty); 206 assert(ElementSize && "Expected type with size!"); 207 Idx = Offset / ElementSize; 208 Rem = Offset % ElementSize; 209 } else { 210 // Non-aggregate type, we cast and make byte-wise progress now. 211 break; 212 } 213 214 LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset 215 << " Idx: " << Idx << " Rem: " << Rem << "\n"); 216 217 GEPName += "." + std::to_string(Idx); 218 Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx)); 219 Offset = Rem; 220 } 221 222 // Create a GEP if we collected indices above. 223 if (Indices.size()) 224 Ptr = IRB.CreateGEP(Ptr, Indices, GEPName); 225 226 // If an offset is left we use byte-wise adjustment. 227 if (Offset) { 228 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); 229 Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset), 230 GEPName + ".b" + Twine(Offset)); 231 } 232 233 // Ensure the result has the requested type. 234 Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast"); 235 236 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n"); 237 return Ptr; 238 } 239 240 /// Recursively visit all values that might become \p IRP at some point. This 241 /// will be done by looking through cast instructions, selects, phis, and calls 242 /// with the "returned" attribute. Once we cannot look through the value any 243 /// further, the callback \p VisitValueCB is invoked and passed the current 244 /// value, the \p State, and a flag to indicate if we stripped anything. 245 /// Stripped means that we unpacked the value associated with \p IRP at least 246 /// once. Note that the value used for the callback may still be the value 247 /// associated with \p IRP (due to PHIs). To limit how much effort is invested, 248 /// we will never visit more values than specified by \p MaxValues. 249 template <typename AAType, typename StateTy> 250 static bool genericValueTraversal( 251 Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State, 252 function_ref<bool(Value &, const Instruction *, StateTy &, bool)> 253 VisitValueCB, 254 const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16, 255 function_ref<Value *(Value *)> StripCB = nullptr) { 256 257 const AAIsDead *LivenessAA = nullptr; 258 if (IRP.getAnchorScope()) 259 LivenessAA = &A.getAAFor<AAIsDead>( 260 QueryingAA, IRPosition::function(*IRP.getAnchorScope()), 261 /* TrackDependence */ false); 262 bool AnyDead = false; 263 264 using Item = std::pair<Value *, const Instruction *>; 265 SmallSet<Item, 16> Visited; 266 SmallVector<Item, 16> Worklist; 267 Worklist.push_back({&IRP.getAssociatedValue(), CtxI}); 268 269 int Iteration = 0; 270 do { 271 Item I = Worklist.pop_back_val(); 272 Value *V = I.first; 273 CtxI = I.second; 274 if (StripCB) 275 V = StripCB(V); 276 277 // Check if we should process the current value. To prevent endless 278 // recursion keep a record of the values we followed! 279 if (!Visited.insert(I).second) 280 continue; 281 282 // Make sure we limit the compile time for complex expressions. 283 if (Iteration++ >= MaxValues) 284 return false; 285 286 // Explicitly look through calls with a "returned" attribute if we do 287 // not have a pointer as stripPointerCasts only works on them. 288 Value *NewV = nullptr; 289 if (V->getType()->isPointerTy()) { 290 NewV = V->stripPointerCasts(); 291 } else { 292 auto *CB = dyn_cast<CallBase>(V); 293 if (CB && CB->getCalledFunction()) { 294 for (Argument &Arg : CB->getCalledFunction()->args()) 295 if (Arg.hasReturnedAttr()) { 296 NewV = CB->getArgOperand(Arg.getArgNo()); 297 break; 298 } 299 } 300 } 301 if (NewV && NewV != V) { 302 Worklist.push_back({NewV, CtxI}); 303 continue; 304 } 305 306 // Look through select instructions, visit both potential values. 307 if (auto *SI = dyn_cast<SelectInst>(V)) { 308 Worklist.push_back({SI->getTrueValue(), CtxI}); 309 Worklist.push_back({SI->getFalseValue(), CtxI}); 310 continue; 311 } 312 313 // Look through phi nodes, visit all live operands. 314 if (auto *PHI = dyn_cast<PHINode>(V)) { 315 assert(LivenessAA && 316 "Expected liveness in the presence of instructions!"); 317 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 318 BasicBlock *IncomingBB = PHI->getIncomingBlock(u); 319 if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA, 320 LivenessAA, 321 /* CheckBBLivenessOnly */ true)) { 322 AnyDead = true; 323 continue; 324 } 325 Worklist.push_back( 326 {PHI->getIncomingValue(u), IncomingBB->getTerminator()}); 327 } 328 continue; 329 } 330 331 if (UseValueSimplify && !isa<Constant>(V)) { 332 bool UsedAssumedInformation = false; 333 Optional<Constant *> C = 334 A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation); 335 if (!C.hasValue()) 336 continue; 337 if (Value *NewV = C.getValue()) { 338 Worklist.push_back({NewV, CtxI}); 339 continue; 340 } 341 } 342 343 // Once a leaf is reached we inform the user through the callback. 344 if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) 345 return false; 346 } while (!Worklist.empty()); 347 348 // If we actually used liveness information so we have to record a dependence. 349 if (AnyDead) 350 A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL); 351 352 // All values have been visited. 353 return true; 354 } 355 356 const Value *stripAndAccumulateMinimalOffsets( 357 Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val, 358 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, 359 bool UseAssumed = false) { 360 361 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { 362 const IRPosition &Pos = IRPosition::value(V); 363 // Only track dependence if we are going to use the assumed info. 364 const AAValueConstantRange &ValueConstantRangeAA = 365 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, 366 /* TrackDependence */ UseAssumed); 367 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() 368 : ValueConstantRangeAA.getKnown(); 369 // We can only use the lower part of the range because the upper part can 370 // be higher than what the value can really be. 371 ROffset = Range.getSignedMin(); 372 return true; 373 }; 374 375 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, 376 AttributorAnalysis); 377 } 378 379 static const Value *getMinimalBaseOfAccsesPointerOperand( 380 Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I, 381 int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) { 382 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); 383 if (!Ptr) 384 return nullptr; 385 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 386 const Value *Base = stripAndAccumulateMinimalOffsets( 387 A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds); 388 389 BytesOffset = OffsetAPInt.getSExtValue(); 390 return Base; 391 } 392 393 static const Value * 394 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset, 395 const DataLayout &DL, 396 bool AllowNonInbounds = false) { 397 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); 398 if (!Ptr) 399 return nullptr; 400 401 return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL, 402 AllowNonInbounds); 403 } 404 405 /// Helper function to clamp a state \p S of type \p StateType with the 406 /// information in \p R and indicate/return if \p S did change (as-in update is 407 /// required to be run again). 408 template <typename StateType> 409 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) { 410 auto Assumed = S.getAssumed(); 411 S ^= R; 412 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 413 : ChangeStatus::CHANGED; 414 } 415 416 /// Clamp the information known for all returned values of a function 417 /// (identified by \p QueryingAA) into \p S. 418 template <typename AAType, typename StateType = typename AAType::StateType> 419 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA, 420 StateType &S) { 421 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " 422 << QueryingAA << " into " << S << "\n"); 423 424 assert((QueryingAA.getIRPosition().getPositionKind() == 425 IRPosition::IRP_RETURNED || 426 QueryingAA.getIRPosition().getPositionKind() == 427 IRPosition::IRP_CALL_SITE_RETURNED) && 428 "Can only clamp returned value states for a function returned or call " 429 "site returned position!"); 430 431 // Use an optional state as there might not be any return values and we want 432 // to join (IntegerState::operator&) the state of all there are. 433 Optional<StateType> T; 434 435 // Callback for each possibly returned value. 436 auto CheckReturnValue = [&](Value &RV) -> bool { 437 const IRPosition &RVPos = IRPosition::value(RV); 438 const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos); 439 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() 440 << " @ " << RVPos << "\n"); 441 const StateType &AAS = static_cast<const StateType &>(AA.getState()); 442 if (T.hasValue()) 443 *T &= AAS; 444 else 445 T = AAS; 446 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T 447 << "\n"); 448 return T->isValidState(); 449 }; 450 451 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) 452 S.indicatePessimisticFixpoint(); 453 else if (T.hasValue()) 454 S ^= *T; 455 } 456 457 /// Helper class for generic deduction: return value -> returned position. 458 template <typename AAType, typename BaseType, 459 typename StateType = typename BaseType::StateType> 460 struct AAReturnedFromReturnedValues : public BaseType { 461 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) 462 : BaseType(IRP, A) {} 463 464 /// See AbstractAttribute::updateImpl(...). 465 ChangeStatus updateImpl(Attributor &A) override { 466 StateType S(StateType::getBestState(this->getState())); 467 clampReturnedValueStates<AAType, StateType>(A, *this, S); 468 // TODO: If we know we visited all returned values, thus no are assumed 469 // dead, we can take the known information from the state T. 470 return clampStateAndIndicateChange<StateType>(this->getState(), S); 471 } 472 }; 473 474 /// Clamp the information known at all call sites for a given argument 475 /// (identified by \p QueryingAA) into \p S. 476 template <typename AAType, typename StateType = typename AAType::StateType> 477 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, 478 StateType &S) { 479 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " 480 << QueryingAA << " into " << S << "\n"); 481 482 assert(QueryingAA.getIRPosition().getPositionKind() == 483 IRPosition::IRP_ARGUMENT && 484 "Can only clamp call site argument states for an argument position!"); 485 486 // Use an optional state as there might not be any return values and we want 487 // to join (IntegerState::operator&) the state of all there are. 488 Optional<StateType> T; 489 490 // The argument number which is also the call site argument number. 491 unsigned ArgNo = QueryingAA.getIRPosition().getArgNo(); 492 493 auto CallSiteCheck = [&](AbstractCallSite ACS) { 494 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 495 // Check if a coresponding argument was found or if it is on not associated 496 // (which can happen for callback calls). 497 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 498 return false; 499 500 const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos); 501 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() 502 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n"); 503 const StateType &AAS = static_cast<const StateType &>(AA.getState()); 504 if (T.hasValue()) 505 *T &= AAS; 506 else 507 T = AAS; 508 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T 509 << "\n"); 510 return T->isValidState(); 511 }; 512 513 bool AllCallSitesKnown; 514 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, 515 AllCallSitesKnown)) 516 S.indicatePessimisticFixpoint(); 517 else if (T.hasValue()) 518 S ^= *T; 519 } 520 521 /// Helper class for generic deduction: call site argument -> argument position. 522 template <typename AAType, typename BaseType, 523 typename StateType = typename AAType::StateType> 524 struct AAArgumentFromCallSiteArguments : public BaseType { 525 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) 526 : BaseType(IRP, A) {} 527 528 /// See AbstractAttribute::updateImpl(...). 529 ChangeStatus updateImpl(Attributor &A) override { 530 StateType S(StateType::getBestState(this->getState())); 531 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); 532 // TODO: If we know we visited all incoming values, thus no are assumed 533 // dead, we can take the known information from the state T. 534 return clampStateAndIndicateChange<StateType>(this->getState(), S); 535 } 536 }; 537 538 /// Helper class for generic replication: function returned -> cs returned. 539 template <typename AAType, typename BaseType, 540 typename StateType = typename BaseType::StateType> 541 struct AACallSiteReturnedFromReturned : public BaseType { 542 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) 543 : BaseType(IRP, A) {} 544 545 /// See AbstractAttribute::updateImpl(...). 546 ChangeStatus updateImpl(Attributor &A) override { 547 assert(this->getIRPosition().getPositionKind() == 548 IRPosition::IRP_CALL_SITE_RETURNED && 549 "Can only wrap function returned positions for call site returned " 550 "positions!"); 551 auto &S = this->getState(); 552 553 const Function *AssociatedFunction = 554 this->getIRPosition().getAssociatedFunction(); 555 if (!AssociatedFunction) 556 return S.indicatePessimisticFixpoint(); 557 558 IRPosition FnPos = IRPosition::returned(*AssociatedFunction); 559 const AAType &AA = A.getAAFor<AAType>(*this, FnPos); 560 return clampStateAndIndicateChange( 561 S, static_cast<const StateType &>(AA.getState())); 562 } 563 }; 564 565 /// Helper function to accumulate uses. 566 template <class AAType, typename StateType = typename AAType::StateType> 567 static void followUsesInContext(AAType &AA, Attributor &A, 568 MustBeExecutedContextExplorer &Explorer, 569 const Instruction *CtxI, 570 SetVector<const Use *> &Uses, 571 StateType &State) { 572 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); 573 for (unsigned u = 0; u < Uses.size(); ++u) { 574 const Use *U = Uses[u]; 575 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { 576 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); 577 if (Found && AA.followUseInMBEC(A, U, UserI, State)) 578 for (const Use &Us : UserI->uses()) 579 Uses.insert(&Us); 580 } 581 } 582 } 583 584 /// Use the must-be-executed-context around \p I to add information into \p S. 585 /// The AAType class is required to have `followUseInMBEC` method with the 586 /// following signature and behaviour: 587 /// 588 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) 589 /// U - Underlying use. 590 /// I - The user of the \p U. 591 /// Returns true if the value should be tracked transitively. 592 /// 593 template <class AAType, typename StateType = typename AAType::StateType> 594 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, 595 Instruction &CtxI) { 596 597 // Container for (transitive) uses of the associated value. 598 SetVector<const Use *> Uses; 599 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) 600 Uses.insert(&U); 601 602 MustBeExecutedContextExplorer &Explorer = 603 A.getInfoCache().getMustBeExecutedContextExplorer(); 604 605 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); 606 607 if (S.isAtFixpoint()) 608 return; 609 610 SmallVector<const BranchInst *, 4> BrInsts; 611 auto Pred = [&](const Instruction *I) { 612 if (const BranchInst *Br = dyn_cast<BranchInst>(I)) 613 if (Br->isConditional()) 614 BrInsts.push_back(Br); 615 return true; 616 }; 617 618 // Here, accumulate conditional branch instructions in the context. We 619 // explore the child paths and collect the known states. The disjunction of 620 // those states can be merged to its own state. Let ParentState_i be a state 621 // to indicate the known information for an i-th branch instruction in the 622 // context. ChildStates are created for its successors respectively. 623 // 624 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} 625 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} 626 // ... 627 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} 628 // 629 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m 630 // 631 // FIXME: Currently, recursive branches are not handled. For example, we 632 // can't deduce that ptr must be dereferenced in below function. 633 // 634 // void f(int a, int c, int *ptr) { 635 // if(a) 636 // if (b) { 637 // *ptr = 0; 638 // } else { 639 // *ptr = 1; 640 // } 641 // else { 642 // if (b) { 643 // *ptr = 0; 644 // } else { 645 // *ptr = 1; 646 // } 647 // } 648 // } 649 650 Explorer.checkForAllContext(&CtxI, Pred); 651 for (const BranchInst *Br : BrInsts) { 652 StateType ParentState; 653 654 // The known state of the parent state is a conjunction of children's 655 // known states so it is initialized with a best state. 656 ParentState.indicateOptimisticFixpoint(); 657 658 for (const BasicBlock *BB : Br->successors()) { 659 StateType ChildState; 660 661 size_t BeforeSize = Uses.size(); 662 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); 663 664 // Erase uses which only appear in the child. 665 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) 666 It = Uses.erase(It); 667 668 ParentState &= ChildState; 669 } 670 671 // Use only known state. 672 S += ParentState; 673 } 674 } 675 676 /// -----------------------NoUnwind Function Attribute-------------------------- 677 678 struct AANoUnwindImpl : AANoUnwind { 679 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} 680 681 const std::string getAsStr() const override { 682 return getAssumed() ? "nounwind" : "may-unwind"; 683 } 684 685 /// See AbstractAttribute::updateImpl(...). 686 ChangeStatus updateImpl(Attributor &A) override { 687 auto Opcodes = { 688 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 689 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 690 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 691 692 auto CheckForNoUnwind = [&](Instruction &I) { 693 if (!I.mayThrow()) 694 return true; 695 696 if (const auto *CB = dyn_cast<CallBase>(&I)) { 697 const auto &NoUnwindAA = 698 A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB)); 699 return NoUnwindAA.isAssumedNoUnwind(); 700 } 701 return false; 702 }; 703 704 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes)) 705 return indicatePessimisticFixpoint(); 706 707 return ChangeStatus::UNCHANGED; 708 } 709 }; 710 711 struct AANoUnwindFunction final : public AANoUnwindImpl { 712 AANoUnwindFunction(const IRPosition &IRP, Attributor &A) 713 : AANoUnwindImpl(IRP, A) {} 714 715 /// See AbstractAttribute::trackStatistics() 716 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } 717 }; 718 719 /// NoUnwind attribute deduction for a call sites. 720 struct AANoUnwindCallSite final : AANoUnwindImpl { 721 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) 722 : AANoUnwindImpl(IRP, A) {} 723 724 /// See AbstractAttribute::initialize(...). 725 void initialize(Attributor &A) override { 726 AANoUnwindImpl::initialize(A); 727 Function *F = getAssociatedFunction(); 728 if (!F) 729 indicatePessimisticFixpoint(); 730 } 731 732 /// See AbstractAttribute::updateImpl(...). 733 ChangeStatus updateImpl(Attributor &A) override { 734 // TODO: Once we have call site specific value information we can provide 735 // call site specific liveness information and then it makes 736 // sense to specialize attributes for call sites arguments instead of 737 // redirecting requests to the callee argument. 738 Function *F = getAssociatedFunction(); 739 const IRPosition &FnPos = IRPosition::function(*F); 740 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos); 741 return clampStateAndIndicateChange( 742 getState(), 743 static_cast<const AANoUnwind::StateType &>(FnAA.getState())); 744 } 745 746 /// See AbstractAttribute::trackStatistics() 747 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } 748 }; 749 750 /// --------------------- Function Return Values ------------------------------- 751 752 /// "Attribute" that collects all potential returned values and the return 753 /// instructions that they arise from. 754 /// 755 /// If there is a unique returned value R, the manifest method will: 756 /// - mark R with the "returned" attribute, if R is an argument. 757 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { 758 759 /// Mapping of values potentially returned by the associated function to the 760 /// return instructions that might return them. 761 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; 762 763 /// Mapping to remember the number of returned values for a call site such 764 /// that we can avoid updates if nothing changed. 765 DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA; 766 767 /// Set of unresolved calls returned by the associated function. 768 SmallSetVector<CallBase *, 4> UnresolvedCalls; 769 770 /// State flags 771 /// 772 ///{ 773 bool IsFixed = false; 774 bool IsValidState = true; 775 ///} 776 777 public: 778 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) 779 : AAReturnedValues(IRP, A) {} 780 781 /// See AbstractAttribute::initialize(...). 782 void initialize(Attributor &A) override { 783 // Reset the state. 784 IsFixed = false; 785 IsValidState = true; 786 ReturnedValues.clear(); 787 788 Function *F = getAssociatedFunction(); 789 if (!F) { 790 indicatePessimisticFixpoint(); 791 return; 792 } 793 assert(!F->getReturnType()->isVoidTy() && 794 "Did not expect a void return type!"); 795 796 // The map from instruction opcodes to those instructions in the function. 797 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); 798 799 // Look through all arguments, if one is marked as returned we are done. 800 for (Argument &Arg : F->args()) { 801 if (Arg.hasReturnedAttr()) { 802 auto &ReturnInstSet = ReturnedValues[&Arg]; 803 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) 804 for (Instruction *RI : *Insts) 805 ReturnInstSet.insert(cast<ReturnInst>(RI)); 806 807 indicateOptimisticFixpoint(); 808 return; 809 } 810 } 811 812 if (!A.isFunctionIPOAmendable(*F)) 813 indicatePessimisticFixpoint(); 814 } 815 816 /// See AbstractAttribute::manifest(...). 817 ChangeStatus manifest(Attributor &A) override; 818 819 /// See AbstractAttribute::getState(...). 820 AbstractState &getState() override { return *this; } 821 822 /// See AbstractAttribute::getState(...). 823 const AbstractState &getState() const override { return *this; } 824 825 /// See AbstractAttribute::updateImpl(Attributor &A). 826 ChangeStatus updateImpl(Attributor &A) override; 827 828 llvm::iterator_range<iterator> returned_values() override { 829 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 830 } 831 832 llvm::iterator_range<const_iterator> returned_values() const override { 833 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 834 } 835 836 const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override { 837 return UnresolvedCalls; 838 } 839 840 /// Return the number of potential return values, -1 if unknown. 841 size_t getNumReturnValues() const override { 842 return isValidState() ? ReturnedValues.size() : -1; 843 } 844 845 /// Return an assumed unique return value if a single candidate is found. If 846 /// there cannot be one, return a nullptr. If it is not clear yet, return the 847 /// Optional::NoneType. 848 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 849 850 /// See AbstractState::checkForAllReturnedValues(...). 851 bool checkForAllReturnedValuesAndReturnInsts( 852 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 853 const override; 854 855 /// Pretty print the attribute similar to the IR representation. 856 const std::string getAsStr() const override; 857 858 /// See AbstractState::isAtFixpoint(). 859 bool isAtFixpoint() const override { return IsFixed; } 860 861 /// See AbstractState::isValidState(). 862 bool isValidState() const override { return IsValidState; } 863 864 /// See AbstractState::indicateOptimisticFixpoint(...). 865 ChangeStatus indicateOptimisticFixpoint() override { 866 IsFixed = true; 867 return ChangeStatus::UNCHANGED; 868 } 869 870 ChangeStatus indicatePessimisticFixpoint() override { 871 IsFixed = true; 872 IsValidState = false; 873 return ChangeStatus::CHANGED; 874 } 875 }; 876 877 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 878 ChangeStatus Changed = ChangeStatus::UNCHANGED; 879 880 // Bookkeeping. 881 assert(isValidState()); 882 STATS_DECLTRACK(KnownReturnValues, FunctionReturn, 883 "Number of function with known return values"); 884 885 // Check if we have an assumed unique return value that we could manifest. 886 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); 887 888 if (!UniqueRV.hasValue() || !UniqueRV.getValue()) 889 return Changed; 890 891 // Bookkeeping. 892 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, 893 "Number of function with unique return"); 894 895 // Callback to replace the uses of CB with the constant C. 896 auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) { 897 if (CB.use_empty()) 898 return ChangeStatus::UNCHANGED; 899 if (A.changeValueAfterManifest(CB, C)) 900 return ChangeStatus::CHANGED; 901 return ChangeStatus::UNCHANGED; 902 }; 903 904 // If the assumed unique return value is an argument, annotate it. 905 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { 906 if (UniqueRVArg->getType()->canLosslesslyBitCastTo( 907 getAssociatedFunction()->getReturnType())) { 908 getIRPosition() = IRPosition::argument(*UniqueRVArg); 909 Changed = IRAttribute::manifest(A); 910 } 911 } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) { 912 // We can replace the returned value with the unique returned constant. 913 Value &AnchorValue = getAnchorValue(); 914 if (Function *F = dyn_cast<Function>(&AnchorValue)) { 915 for (const Use &U : F->uses()) 916 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) 917 if (CB->isCallee(&U)) { 918 Constant *RVCCast = 919 CB->getType() == RVC->getType() 920 ? RVC 921 : ConstantExpr::getTruncOrBitCast(RVC, CB->getType()); 922 Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed; 923 } 924 } else { 925 assert(isa<CallBase>(AnchorValue) && 926 "Expcected a function or call base anchor!"); 927 Constant *RVCCast = 928 AnchorValue.getType() == RVC->getType() 929 ? RVC 930 : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType()); 931 Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast); 932 } 933 if (Changed == ChangeStatus::CHANGED) 934 STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn, 935 "Number of function returns replaced by constant return"); 936 } 937 938 return Changed; 939 } 940 941 const std::string AAReturnedValuesImpl::getAsStr() const { 942 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 943 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + 944 ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]"; 945 } 946 947 Optional<Value *> 948 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { 949 // If checkForAllReturnedValues provides a unique value, ignoring potential 950 // undef values that can also be present, it is assumed to be the actual 951 // return value and forwarded to the caller of this method. If there are 952 // multiple, a nullptr is returned indicating there cannot be a unique 953 // returned value. 954 Optional<Value *> UniqueRV; 955 956 auto Pred = [&](Value &RV) -> bool { 957 // If we found a second returned value and neither the current nor the saved 958 // one is an undef, there is no unique returned value. Undefs are special 959 // since we can pretend they have any value. 960 if (UniqueRV.hasValue() && UniqueRV != &RV && 961 !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) { 962 UniqueRV = nullptr; 963 return false; 964 } 965 966 // Do not overwrite a value with an undef. 967 if (!UniqueRV.hasValue() || !isa<UndefValue>(RV)) 968 UniqueRV = &RV; 969 970 return true; 971 }; 972 973 if (!A.checkForAllReturnedValues(Pred, *this)) 974 UniqueRV = nullptr; 975 976 return UniqueRV; 977 } 978 979 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( 980 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 981 const { 982 if (!isValidState()) 983 return false; 984 985 // Check all returned values but ignore call sites as long as we have not 986 // encountered an overdefined one during an update. 987 for (auto &It : ReturnedValues) { 988 Value *RV = It.first; 989 990 CallBase *CB = dyn_cast<CallBase>(RV); 991 if (CB && !UnresolvedCalls.count(CB)) 992 continue; 993 994 if (!Pred(*RV, It.second)) 995 return false; 996 } 997 998 return true; 999 } 1000 1001 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 1002 size_t NumUnresolvedCalls = UnresolvedCalls.size(); 1003 bool Changed = false; 1004 1005 // State used in the value traversals starting in returned values. 1006 struct RVState { 1007 // The map in which we collect return values -> return instrs. 1008 decltype(ReturnedValues) &RetValsMap; 1009 // The flag to indicate a change. 1010 bool &Changed; 1011 // The return instrs we come from. 1012 SmallSetVector<ReturnInst *, 4> RetInsts; 1013 }; 1014 1015 // Callback for a leaf value returned by the associated function. 1016 auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS, 1017 bool) -> bool { 1018 auto Size = RVS.RetValsMap[&Val].size(); 1019 RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end()); 1020 bool Inserted = RVS.RetValsMap[&Val].size() != Size; 1021 RVS.Changed |= Inserted; 1022 LLVM_DEBUG({ 1023 if (Inserted) 1024 dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val 1025 << " => " << RVS.RetInsts.size() << "\n"; 1026 }); 1027 return true; 1028 }; 1029 1030 // Helper method to invoke the generic value traversal. 1031 auto VisitReturnedValue = [&](Value &RV, RVState &RVS, 1032 const Instruction *CtxI) { 1033 IRPosition RetValPos = IRPosition::value(RV); 1034 return genericValueTraversal<AAReturnedValues, RVState>( 1035 A, RetValPos, *this, RVS, VisitValueCB, CtxI, 1036 /* UseValueSimplify */ false); 1037 }; 1038 1039 // Callback for all "return intructions" live in the associated function. 1040 auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) { 1041 ReturnInst &Ret = cast<ReturnInst>(I); 1042 RVState RVS({ReturnedValues, Changed, {}}); 1043 RVS.RetInsts.insert(&Ret); 1044 return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I); 1045 }; 1046 1047 // Start by discovering returned values from all live returned instructions in 1048 // the associated function. 1049 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret})) 1050 return indicatePessimisticFixpoint(); 1051 1052 // Once returned values "directly" present in the code are handled we try to 1053 // resolve returned calls. To avoid modifications to the ReturnedValues map 1054 // while we iterate over it we kept record of potential new entries in a copy 1055 // map, NewRVsMap. 1056 decltype(ReturnedValues) NewRVsMap; 1057 1058 auto HandleReturnValue = [&](Value *RV, 1059 SmallSetVector<ReturnInst *, 4> &RIs) { 1060 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #" 1061 << RIs.size() << " RIs\n"); 1062 CallBase *CB = dyn_cast<CallBase>(RV); 1063 if (!CB || UnresolvedCalls.count(CB)) 1064 return; 1065 1066 if (!CB->getCalledFunction()) { 1067 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB 1068 << "\n"); 1069 UnresolvedCalls.insert(CB); 1070 return; 1071 } 1072 1073 // TODO: use the function scope once we have call site AAReturnedValues. 1074 const auto &RetValAA = A.getAAFor<AAReturnedValues>( 1075 *this, IRPosition::function(*CB->getCalledFunction())); 1076 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: " 1077 << RetValAA << "\n"); 1078 1079 // Skip dead ends, thus if we do not know anything about the returned 1080 // call we mark it as unresolved and it will stay that way. 1081 if (!RetValAA.getState().isValidState()) { 1082 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB 1083 << "\n"); 1084 UnresolvedCalls.insert(CB); 1085 return; 1086 } 1087 1088 // Do not try to learn partial information. If the callee has unresolved 1089 // return values we will treat the call as unresolved/opaque. 1090 auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls(); 1091 if (!RetValAAUnresolvedCalls.empty()) { 1092 UnresolvedCalls.insert(CB); 1093 return; 1094 } 1095 1096 // Now check if we can track transitively returned values. If possible, thus 1097 // if all return value can be represented in the current scope, do so. 1098 bool Unresolved = false; 1099 for (auto &RetValAAIt : RetValAA.returned_values()) { 1100 Value *RetVal = RetValAAIt.first; 1101 if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) || 1102 isa<Constant>(RetVal)) 1103 continue; 1104 // Anything that did not fit in the above categories cannot be resolved, 1105 // mark the call as unresolved. 1106 LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value " 1107 "cannot be translated: " 1108 << *RetVal << "\n"); 1109 UnresolvedCalls.insert(CB); 1110 Unresolved = true; 1111 break; 1112 } 1113 1114 if (Unresolved) 1115 return; 1116 1117 // Now track transitively returned values. 1118 unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB]; 1119 if (NumRetAA == RetValAA.getNumReturnValues()) { 1120 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not " 1121 "changed since it was seen last\n"); 1122 return; 1123 } 1124 NumRetAA = RetValAA.getNumReturnValues(); 1125 1126 for (auto &RetValAAIt : RetValAA.returned_values()) { 1127 Value *RetVal = RetValAAIt.first; 1128 if (Argument *Arg = dyn_cast<Argument>(RetVal)) { 1129 // Arguments are mapped to call site operands and we begin the traversal 1130 // again. 1131 bool Unused = false; 1132 RVState RVS({NewRVsMap, Unused, RetValAAIt.second}); 1133 VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB); 1134 continue; 1135 } else if (isa<CallBase>(RetVal)) { 1136 // Call sites are resolved by the callee attribute over time, no need to 1137 // do anything for us. 1138 continue; 1139 } else if (isa<Constant>(RetVal)) { 1140 // Constants are valid everywhere, we can simply take them. 1141 NewRVsMap[RetVal].insert(RIs.begin(), RIs.end()); 1142 continue; 1143 } 1144 } 1145 }; 1146 1147 for (auto &It : ReturnedValues) 1148 HandleReturnValue(It.first, It.second); 1149 1150 // Because processing the new information can again lead to new return values 1151 // we have to be careful and iterate until this iteration is complete. The 1152 // idea is that we are in a stable state at the end of an update. All return 1153 // values have been handled and properly categorized. We might not update 1154 // again if we have not requested a non-fix attribute so we cannot "wait" for 1155 // the next update to analyze a new return value. 1156 while (!NewRVsMap.empty()) { 1157 auto It = std::move(NewRVsMap.back()); 1158 NewRVsMap.pop_back(); 1159 1160 assert(!It.second.empty() && "Entry does not add anything."); 1161 auto &ReturnInsts = ReturnedValues[It.first]; 1162 for (ReturnInst *RI : It.second) 1163 if (ReturnInsts.insert(RI)) { 1164 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value " 1165 << *It.first << " => " << *RI << "\n"); 1166 HandleReturnValue(It.first, ReturnInsts); 1167 Changed = true; 1168 } 1169 } 1170 1171 Changed |= (NumUnresolvedCalls != UnresolvedCalls.size()); 1172 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 1173 } 1174 1175 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { 1176 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) 1177 : AAReturnedValuesImpl(IRP, A) {} 1178 1179 /// See AbstractAttribute::trackStatistics() 1180 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } 1181 }; 1182 1183 /// Returned values information for a call sites. 1184 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { 1185 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) 1186 : AAReturnedValuesImpl(IRP, A) {} 1187 1188 /// See AbstractAttribute::initialize(...). 1189 void initialize(Attributor &A) override { 1190 // TODO: Once we have call site specific value information we can provide 1191 // call site specific liveness information and then it makes 1192 // sense to specialize attributes for call sites instead of 1193 // redirecting requests to the callee. 1194 llvm_unreachable("Abstract attributes for returned values are not " 1195 "supported for call sites yet!"); 1196 } 1197 1198 /// See AbstractAttribute::updateImpl(...). 1199 ChangeStatus updateImpl(Attributor &A) override { 1200 return indicatePessimisticFixpoint(); 1201 } 1202 1203 /// See AbstractAttribute::trackStatistics() 1204 void trackStatistics() const override {} 1205 }; 1206 1207 /// ------------------------ NoSync Function Attribute ------------------------- 1208 1209 struct AANoSyncImpl : AANoSync { 1210 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} 1211 1212 const std::string getAsStr() const override { 1213 return getAssumed() ? "nosync" : "may-sync"; 1214 } 1215 1216 /// See AbstractAttribute::updateImpl(...). 1217 ChangeStatus updateImpl(Attributor &A) override; 1218 1219 /// Helper function used to determine whether an instruction is non-relaxed 1220 /// atomic. In other words, if an atomic instruction does not have unordered 1221 /// or monotonic ordering 1222 static bool isNonRelaxedAtomic(Instruction *I); 1223 1224 /// Helper function used to determine whether an instruction is volatile. 1225 static bool isVolatile(Instruction *I); 1226 1227 /// Helper function uset to check if intrinsic is volatile (memcpy, memmove, 1228 /// memset). 1229 static bool isNoSyncIntrinsic(Instruction *I); 1230 }; 1231 1232 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) { 1233 if (!I->isAtomic()) 1234 return false; 1235 1236 AtomicOrdering Ordering; 1237 switch (I->getOpcode()) { 1238 case Instruction::AtomicRMW: 1239 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 1240 break; 1241 case Instruction::Store: 1242 Ordering = cast<StoreInst>(I)->getOrdering(); 1243 break; 1244 case Instruction::Load: 1245 Ordering = cast<LoadInst>(I)->getOrdering(); 1246 break; 1247 case Instruction::Fence: { 1248 auto *FI = cast<FenceInst>(I); 1249 if (FI->getSyncScopeID() == SyncScope::SingleThread) 1250 return false; 1251 Ordering = FI->getOrdering(); 1252 break; 1253 } 1254 case Instruction::AtomicCmpXchg: { 1255 AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering(); 1256 AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering(); 1257 // Only if both are relaxed, than it can be treated as relaxed. 1258 // Otherwise it is non-relaxed. 1259 if (Success != AtomicOrdering::Unordered && 1260 Success != AtomicOrdering::Monotonic) 1261 return true; 1262 if (Failure != AtomicOrdering::Unordered && 1263 Failure != AtomicOrdering::Monotonic) 1264 return true; 1265 return false; 1266 } 1267 default: 1268 llvm_unreachable( 1269 "New atomic operations need to be known in the attributor."); 1270 } 1271 1272 // Relaxed. 1273 if (Ordering == AtomicOrdering::Unordered || 1274 Ordering == AtomicOrdering::Monotonic) 1275 return false; 1276 return true; 1277 } 1278 1279 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics. 1280 /// FIXME: We should ipmrove the handling of intrinsics. 1281 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) { 1282 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 1283 switch (II->getIntrinsicID()) { 1284 /// Element wise atomic memory intrinsics are can only be unordered, 1285 /// therefore nosync. 1286 case Intrinsic::memset_element_unordered_atomic: 1287 case Intrinsic::memmove_element_unordered_atomic: 1288 case Intrinsic::memcpy_element_unordered_atomic: 1289 return true; 1290 case Intrinsic::memset: 1291 case Intrinsic::memmove: 1292 case Intrinsic::memcpy: 1293 if (!cast<MemIntrinsic>(II)->isVolatile()) 1294 return true; 1295 return false; 1296 default: 1297 return false; 1298 } 1299 } 1300 return false; 1301 } 1302 1303 bool AANoSyncImpl::isVolatile(Instruction *I) { 1304 assert(!isa<CallBase>(I) && "Calls should not be checked here"); 1305 1306 switch (I->getOpcode()) { 1307 case Instruction::AtomicRMW: 1308 return cast<AtomicRMWInst>(I)->isVolatile(); 1309 case Instruction::Store: 1310 return cast<StoreInst>(I)->isVolatile(); 1311 case Instruction::Load: 1312 return cast<LoadInst>(I)->isVolatile(); 1313 case Instruction::AtomicCmpXchg: 1314 return cast<AtomicCmpXchgInst>(I)->isVolatile(); 1315 default: 1316 return false; 1317 } 1318 } 1319 1320 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { 1321 1322 auto CheckRWInstForNoSync = [&](Instruction &I) { 1323 /// We are looking for volatile instructions or Non-Relaxed atomics. 1324 /// FIXME: We should improve the handling of intrinsics. 1325 1326 if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I)) 1327 return true; 1328 1329 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1330 if (CB->hasFnAttr(Attribute::NoSync)) 1331 return true; 1332 1333 const auto &NoSyncAA = 1334 A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB)); 1335 if (NoSyncAA.isAssumedNoSync()) 1336 return true; 1337 return false; 1338 } 1339 1340 if (!isVolatile(&I) && !isNonRelaxedAtomic(&I)) 1341 return true; 1342 1343 return false; 1344 }; 1345 1346 auto CheckForNoSync = [&](Instruction &I) { 1347 // At this point we handled all read/write effects and they are all 1348 // nosync, so they can be skipped. 1349 if (I.mayReadOrWriteMemory()) 1350 return true; 1351 1352 // non-convergent and readnone imply nosync. 1353 return !cast<CallBase>(I).isConvergent(); 1354 }; 1355 1356 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) || 1357 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this)) 1358 return indicatePessimisticFixpoint(); 1359 1360 return ChangeStatus::UNCHANGED; 1361 } 1362 1363 struct AANoSyncFunction final : public AANoSyncImpl { 1364 AANoSyncFunction(const IRPosition &IRP, Attributor &A) 1365 : AANoSyncImpl(IRP, A) {} 1366 1367 /// See AbstractAttribute::trackStatistics() 1368 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } 1369 }; 1370 1371 /// NoSync attribute deduction for a call sites. 1372 struct AANoSyncCallSite final : AANoSyncImpl { 1373 AANoSyncCallSite(const IRPosition &IRP, Attributor &A) 1374 : AANoSyncImpl(IRP, A) {} 1375 1376 /// See AbstractAttribute::initialize(...). 1377 void initialize(Attributor &A) override { 1378 AANoSyncImpl::initialize(A); 1379 Function *F = getAssociatedFunction(); 1380 if (!F) 1381 indicatePessimisticFixpoint(); 1382 } 1383 1384 /// See AbstractAttribute::updateImpl(...). 1385 ChangeStatus updateImpl(Attributor &A) override { 1386 // TODO: Once we have call site specific value information we can provide 1387 // call site specific liveness information and then it makes 1388 // sense to specialize attributes for call sites arguments instead of 1389 // redirecting requests to the callee argument. 1390 Function *F = getAssociatedFunction(); 1391 const IRPosition &FnPos = IRPosition::function(*F); 1392 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos); 1393 return clampStateAndIndicateChange( 1394 getState(), static_cast<const AANoSync::StateType &>(FnAA.getState())); 1395 } 1396 1397 /// See AbstractAttribute::trackStatistics() 1398 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } 1399 }; 1400 1401 /// ------------------------ No-Free Attributes ---------------------------- 1402 1403 struct AANoFreeImpl : public AANoFree { 1404 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} 1405 1406 /// See AbstractAttribute::updateImpl(...). 1407 ChangeStatus updateImpl(Attributor &A) override { 1408 auto CheckForNoFree = [&](Instruction &I) { 1409 const auto &CB = cast<CallBase>(I); 1410 if (CB.hasFnAttr(Attribute::NoFree)) 1411 return true; 1412 1413 const auto &NoFreeAA = 1414 A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB)); 1415 return NoFreeAA.isAssumedNoFree(); 1416 }; 1417 1418 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this)) 1419 return indicatePessimisticFixpoint(); 1420 return ChangeStatus::UNCHANGED; 1421 } 1422 1423 /// See AbstractAttribute::getAsStr(). 1424 const std::string getAsStr() const override { 1425 return getAssumed() ? "nofree" : "may-free"; 1426 } 1427 }; 1428 1429 struct AANoFreeFunction final : public AANoFreeImpl { 1430 AANoFreeFunction(const IRPosition &IRP, Attributor &A) 1431 : AANoFreeImpl(IRP, A) {} 1432 1433 /// See AbstractAttribute::trackStatistics() 1434 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } 1435 }; 1436 1437 /// NoFree attribute deduction for a call sites. 1438 struct AANoFreeCallSite final : AANoFreeImpl { 1439 AANoFreeCallSite(const IRPosition &IRP, Attributor &A) 1440 : AANoFreeImpl(IRP, A) {} 1441 1442 /// See AbstractAttribute::initialize(...). 1443 void initialize(Attributor &A) override { 1444 AANoFreeImpl::initialize(A); 1445 Function *F = getAssociatedFunction(); 1446 if (!F) 1447 indicatePessimisticFixpoint(); 1448 } 1449 1450 /// See AbstractAttribute::updateImpl(...). 1451 ChangeStatus updateImpl(Attributor &A) override { 1452 // TODO: Once we have call site specific value information we can provide 1453 // call site specific liveness information and then it makes 1454 // sense to specialize attributes for call sites arguments instead of 1455 // redirecting requests to the callee argument. 1456 Function *F = getAssociatedFunction(); 1457 const IRPosition &FnPos = IRPosition::function(*F); 1458 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos); 1459 return clampStateAndIndicateChange( 1460 getState(), static_cast<const AANoFree::StateType &>(FnAA.getState())); 1461 } 1462 1463 /// See AbstractAttribute::trackStatistics() 1464 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } 1465 }; 1466 1467 /// NoFree attribute for floating values. 1468 struct AANoFreeFloating : AANoFreeImpl { 1469 AANoFreeFloating(const IRPosition &IRP, Attributor &A) 1470 : AANoFreeImpl(IRP, A) {} 1471 1472 /// See AbstractAttribute::trackStatistics() 1473 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} 1474 1475 /// See Abstract Attribute::updateImpl(...). 1476 ChangeStatus updateImpl(Attributor &A) override { 1477 const IRPosition &IRP = getIRPosition(); 1478 1479 const auto &NoFreeAA = 1480 A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP)); 1481 if (NoFreeAA.isAssumedNoFree()) 1482 return ChangeStatus::UNCHANGED; 1483 1484 Value &AssociatedValue = getIRPosition().getAssociatedValue(); 1485 auto Pred = [&](const Use &U, bool &Follow) -> bool { 1486 Instruction *UserI = cast<Instruction>(U.getUser()); 1487 if (auto *CB = dyn_cast<CallBase>(UserI)) { 1488 if (CB->isBundleOperand(&U)) 1489 return false; 1490 if (!CB->isArgOperand(&U)) 1491 return true; 1492 unsigned ArgNo = CB->getArgOperandNo(&U); 1493 1494 const auto &NoFreeArg = A.getAAFor<AANoFree>( 1495 *this, IRPosition::callsite_argument(*CB, ArgNo)); 1496 return NoFreeArg.isAssumedNoFree(); 1497 } 1498 1499 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 1500 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 1501 Follow = true; 1502 return true; 1503 } 1504 if (isa<ReturnInst>(UserI)) 1505 return true; 1506 1507 // Unknown user. 1508 return false; 1509 }; 1510 if (!A.checkForAllUses(Pred, *this, AssociatedValue)) 1511 return indicatePessimisticFixpoint(); 1512 1513 return ChangeStatus::UNCHANGED; 1514 } 1515 }; 1516 1517 /// NoFree attribute for a call site argument. 1518 struct AANoFreeArgument final : AANoFreeFloating { 1519 AANoFreeArgument(const IRPosition &IRP, Attributor &A) 1520 : AANoFreeFloating(IRP, A) {} 1521 1522 /// See AbstractAttribute::trackStatistics() 1523 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } 1524 }; 1525 1526 /// NoFree attribute for call site arguments. 1527 struct AANoFreeCallSiteArgument final : AANoFreeFloating { 1528 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) 1529 : AANoFreeFloating(IRP, A) {} 1530 1531 /// See AbstractAttribute::updateImpl(...). 1532 ChangeStatus updateImpl(Attributor &A) override { 1533 // TODO: Once we have call site specific value information we can provide 1534 // call site specific liveness information and then it makes 1535 // sense to specialize attributes for call sites arguments instead of 1536 // redirecting requests to the callee argument. 1537 Argument *Arg = getAssociatedArgument(); 1538 if (!Arg) 1539 return indicatePessimisticFixpoint(); 1540 const IRPosition &ArgPos = IRPosition::argument(*Arg); 1541 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos); 1542 return clampStateAndIndicateChange( 1543 getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState())); 1544 } 1545 1546 /// See AbstractAttribute::trackStatistics() 1547 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)}; 1548 }; 1549 1550 /// NoFree attribute for function return value. 1551 struct AANoFreeReturned final : AANoFreeFloating { 1552 AANoFreeReturned(const IRPosition &IRP, Attributor &A) 1553 : AANoFreeFloating(IRP, A) { 1554 llvm_unreachable("NoFree is not applicable to function returns!"); 1555 } 1556 1557 /// See AbstractAttribute::initialize(...). 1558 void initialize(Attributor &A) override { 1559 llvm_unreachable("NoFree is not applicable to function returns!"); 1560 } 1561 1562 /// See AbstractAttribute::updateImpl(...). 1563 ChangeStatus updateImpl(Attributor &A) override { 1564 llvm_unreachable("NoFree is not applicable to function returns!"); 1565 } 1566 1567 /// See AbstractAttribute::trackStatistics() 1568 void trackStatistics() const override {} 1569 }; 1570 1571 /// NoFree attribute deduction for a call site return value. 1572 struct AANoFreeCallSiteReturned final : AANoFreeFloating { 1573 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) 1574 : AANoFreeFloating(IRP, A) {} 1575 1576 ChangeStatus manifest(Attributor &A) override { 1577 return ChangeStatus::UNCHANGED; 1578 } 1579 /// See AbstractAttribute::trackStatistics() 1580 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } 1581 }; 1582 1583 /// ------------------------ NonNull Argument Attribute ------------------------ 1584 static int64_t getKnownNonNullAndDerefBytesForUse( 1585 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, 1586 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { 1587 TrackUse = false; 1588 1589 const Value *UseV = U->get(); 1590 if (!UseV->getType()->isPointerTy()) 1591 return 0; 1592 1593 Type *PtrTy = UseV->getType(); 1594 const Function *F = I->getFunction(); 1595 bool NullPointerIsDefined = 1596 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; 1597 const DataLayout &DL = A.getInfoCache().getDL(); 1598 if (const auto *CB = dyn_cast<CallBase>(I)) { 1599 if (CB->isBundleOperand(U)) { 1600 if (RetainedKnowledge RK = getKnowledgeFromUse( 1601 U, {Attribute::NonNull, Attribute::Dereferenceable})) { 1602 IsNonNull |= 1603 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); 1604 return RK.ArgValue; 1605 } 1606 return 0; 1607 } 1608 1609 if (CB->isCallee(U)) { 1610 IsNonNull |= !NullPointerIsDefined; 1611 return 0; 1612 } 1613 1614 unsigned ArgNo = CB->getArgOperandNo(U); 1615 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 1616 // As long as we only use known information there is no need to track 1617 // dependences here. 1618 auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP, 1619 /* TrackDependence */ false); 1620 IsNonNull |= DerefAA.isKnownNonNull(); 1621 return DerefAA.getKnownDereferenceableBytes(); 1622 } 1623 1624 // We need to follow common pointer manipulation uses to the accesses they 1625 // feed into. We can try to be smart to avoid looking through things we do not 1626 // like for now, e.g., non-inbounds GEPs. 1627 if (isa<CastInst>(I)) { 1628 TrackUse = true; 1629 return 0; 1630 } 1631 1632 if (isa<GetElementPtrInst>(I)) { 1633 TrackUse = true; 1634 return 0; 1635 } 1636 1637 int64_t Offset; 1638 const Value *Base = 1639 getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL); 1640 if (Base) { 1641 if (Base == &AssociatedValue && 1642 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 1643 int64_t DerefBytes = 1644 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset; 1645 1646 IsNonNull |= !NullPointerIsDefined; 1647 return std::max(int64_t(0), DerefBytes); 1648 } 1649 } 1650 1651 /// Corner case when an offset is 0. 1652 Base = getBasePointerOfAccessPointerOperand(I, Offset, DL, 1653 /*AllowNonInbounds*/ true); 1654 if (Base) { 1655 if (Offset == 0 && Base == &AssociatedValue && 1656 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 1657 int64_t DerefBytes = 1658 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()); 1659 IsNonNull |= !NullPointerIsDefined; 1660 return std::max(int64_t(0), DerefBytes); 1661 } 1662 } 1663 1664 return 0; 1665 } 1666 1667 struct AANonNullImpl : AANonNull { 1668 AANonNullImpl(const IRPosition &IRP, Attributor &A) 1669 : AANonNull(IRP, A), 1670 NullIsDefined(NullPointerIsDefined( 1671 getAnchorScope(), 1672 getAssociatedValue().getType()->getPointerAddressSpace())) {} 1673 1674 /// See AbstractAttribute::initialize(...). 1675 void initialize(Attributor &A) override { 1676 Value &V = getAssociatedValue(); 1677 if (!NullIsDefined && 1678 hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, 1679 /* IgnoreSubsumingPositions */ false, &A)) 1680 indicateOptimisticFixpoint(); 1681 else if (isa<ConstantPointerNull>(V)) 1682 indicatePessimisticFixpoint(); 1683 else 1684 AANonNull::initialize(A); 1685 1686 bool CanBeNull = true; 1687 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull)) 1688 if (!CanBeNull) 1689 indicateOptimisticFixpoint(); 1690 1691 if (!getState().isAtFixpoint()) 1692 if (Instruction *CtxI = getCtxI()) 1693 followUsesInMBEC(*this, A, getState(), *CtxI); 1694 } 1695 1696 /// See followUsesInMBEC 1697 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 1698 AANonNull::StateType &State) { 1699 bool IsNonNull = false; 1700 bool TrackUse = false; 1701 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, 1702 IsNonNull, TrackUse); 1703 State.setKnown(IsNonNull); 1704 return TrackUse; 1705 } 1706 1707 /// See AbstractAttribute::getAsStr(). 1708 const std::string getAsStr() const override { 1709 return getAssumed() ? "nonnull" : "may-null"; 1710 } 1711 1712 /// Flag to determine if the underlying value can be null and still allow 1713 /// valid accesses. 1714 const bool NullIsDefined; 1715 }; 1716 1717 /// NonNull attribute for a floating value. 1718 struct AANonNullFloating : public AANonNullImpl { 1719 AANonNullFloating(const IRPosition &IRP, Attributor &A) 1720 : AANonNullImpl(IRP, A) {} 1721 1722 /// See AbstractAttribute::updateImpl(...). 1723 ChangeStatus updateImpl(Attributor &A) override { 1724 if (!NullIsDefined) { 1725 const auto &DerefAA = 1726 A.getAAFor<AADereferenceable>(*this, getIRPosition()); 1727 if (DerefAA.getAssumedDereferenceableBytes()) 1728 return ChangeStatus::UNCHANGED; 1729 } 1730 1731 const DataLayout &DL = A.getDataLayout(); 1732 1733 DominatorTree *DT = nullptr; 1734 AssumptionCache *AC = nullptr; 1735 InformationCache &InfoCache = A.getInfoCache(); 1736 if (const Function *Fn = getAnchorScope()) { 1737 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); 1738 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); 1739 } 1740 1741 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 1742 AANonNull::StateType &T, bool Stripped) -> bool { 1743 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V)); 1744 if (!Stripped && this == &AA) { 1745 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) 1746 T.indicatePessimisticFixpoint(); 1747 } else { 1748 // Use abstract attribute information. 1749 const AANonNull::StateType &NS = 1750 static_cast<const AANonNull::StateType &>(AA.getState()); 1751 T ^= NS; 1752 } 1753 return T.isValidState(); 1754 }; 1755 1756 StateType T; 1757 if (!genericValueTraversal<AANonNull, StateType>( 1758 A, getIRPosition(), *this, T, VisitValueCB, getCtxI())) 1759 return indicatePessimisticFixpoint(); 1760 1761 return clampStateAndIndicateChange(getState(), T); 1762 } 1763 1764 /// See AbstractAttribute::trackStatistics() 1765 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 1766 }; 1767 1768 /// NonNull attribute for function return value. 1769 struct AANonNullReturned final 1770 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> { 1771 AANonNullReturned(const IRPosition &IRP, Attributor &A) 1772 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP, A) {} 1773 1774 /// See AbstractAttribute::trackStatistics() 1775 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 1776 }; 1777 1778 /// NonNull attribute for function argument. 1779 struct AANonNullArgument final 1780 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { 1781 AANonNullArgument(const IRPosition &IRP, Attributor &A) 1782 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} 1783 1784 /// See AbstractAttribute::trackStatistics() 1785 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } 1786 }; 1787 1788 struct AANonNullCallSiteArgument final : AANonNullFloating { 1789 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) 1790 : AANonNullFloating(IRP, A) {} 1791 1792 /// See AbstractAttribute::trackStatistics() 1793 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } 1794 }; 1795 1796 /// NonNull attribute for a call site return position. 1797 struct AANonNullCallSiteReturned final 1798 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { 1799 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) 1800 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} 1801 1802 /// See AbstractAttribute::trackStatistics() 1803 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } 1804 }; 1805 1806 /// ------------------------ No-Recurse Attributes ---------------------------- 1807 1808 struct AANoRecurseImpl : public AANoRecurse { 1809 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} 1810 1811 /// See AbstractAttribute::getAsStr() 1812 const std::string getAsStr() const override { 1813 return getAssumed() ? "norecurse" : "may-recurse"; 1814 } 1815 }; 1816 1817 struct AANoRecurseFunction final : AANoRecurseImpl { 1818 AANoRecurseFunction(const IRPosition &IRP, Attributor &A) 1819 : AANoRecurseImpl(IRP, A) {} 1820 1821 /// See AbstractAttribute::initialize(...). 1822 void initialize(Attributor &A) override { 1823 AANoRecurseImpl::initialize(A); 1824 if (const Function *F = getAnchorScope()) 1825 if (A.getInfoCache().getSccSize(*F) != 1) 1826 indicatePessimisticFixpoint(); 1827 } 1828 1829 /// See AbstractAttribute::updateImpl(...). 1830 ChangeStatus updateImpl(Attributor &A) override { 1831 1832 // If all live call sites are known to be no-recurse, we are as well. 1833 auto CallSitePred = [&](AbstractCallSite ACS) { 1834 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1835 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 1836 /* TrackDependence */ false, DepClassTy::OPTIONAL); 1837 return NoRecurseAA.isKnownNoRecurse(); 1838 }; 1839 bool AllCallSitesKnown; 1840 if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) { 1841 // If we know all call sites and all are known no-recurse, we are done. 1842 // If all known call sites, which might not be all that exist, are known 1843 // to be no-recurse, we are not done but we can continue to assume 1844 // no-recurse. If one of the call sites we have not visited will become 1845 // live, another update is triggered. 1846 if (AllCallSitesKnown) 1847 indicateOptimisticFixpoint(); 1848 return ChangeStatus::UNCHANGED; 1849 } 1850 1851 // If the above check does not hold anymore we look at the calls. 1852 auto CheckForNoRecurse = [&](Instruction &I) { 1853 const auto &CB = cast<CallBase>(I); 1854 if (CB.hasFnAttr(Attribute::NoRecurse)) 1855 return true; 1856 1857 const auto &NoRecurseAA = 1858 A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB)); 1859 if (!NoRecurseAA.isAssumedNoRecurse()) 1860 return false; 1861 1862 // Recursion to the same function 1863 if (CB.getCalledFunction() == getAnchorScope()) 1864 return false; 1865 1866 return true; 1867 }; 1868 1869 if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this)) 1870 return indicatePessimisticFixpoint(); 1871 return ChangeStatus::UNCHANGED; 1872 } 1873 1874 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } 1875 }; 1876 1877 /// NoRecurse attribute deduction for a call sites. 1878 struct AANoRecurseCallSite final : AANoRecurseImpl { 1879 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) 1880 : AANoRecurseImpl(IRP, A) {} 1881 1882 /// See AbstractAttribute::initialize(...). 1883 void initialize(Attributor &A) override { 1884 AANoRecurseImpl::initialize(A); 1885 Function *F = getAssociatedFunction(); 1886 if (!F) 1887 indicatePessimisticFixpoint(); 1888 } 1889 1890 /// See AbstractAttribute::updateImpl(...). 1891 ChangeStatus updateImpl(Attributor &A) override { 1892 // TODO: Once we have call site specific value information we can provide 1893 // call site specific liveness information and then it makes 1894 // sense to specialize attributes for call sites arguments instead of 1895 // redirecting requests to the callee argument. 1896 Function *F = getAssociatedFunction(); 1897 const IRPosition &FnPos = IRPosition::function(*F); 1898 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos); 1899 return clampStateAndIndicateChange( 1900 getState(), 1901 static_cast<const AANoRecurse::StateType &>(FnAA.getState())); 1902 } 1903 1904 /// See AbstractAttribute::trackStatistics() 1905 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } 1906 }; 1907 1908 /// -------------------- Undefined-Behavior Attributes ------------------------ 1909 1910 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { 1911 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) 1912 : AAUndefinedBehavior(IRP, A) {} 1913 1914 /// See AbstractAttribute::updateImpl(...). 1915 // through a pointer (i.e. also branches etc.) 1916 ChangeStatus updateImpl(Attributor &A) override { 1917 const size_t UBPrevSize = KnownUBInsts.size(); 1918 const size_t NoUBPrevSize = AssumedNoUBInsts.size(); 1919 1920 auto InspectMemAccessInstForUB = [&](Instruction &I) { 1921 // Skip instructions that are already saved. 1922 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 1923 return true; 1924 1925 // If we reach here, we know we have an instruction 1926 // that accesses memory through a pointer operand, 1927 // for which getPointerOperand() should give it to us. 1928 const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true); 1929 assert(PtrOp && 1930 "Expected pointer operand of memory accessing instruction"); 1931 1932 // Either we stopped and the appropriate action was taken, 1933 // or we got back a simplified value to continue. 1934 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); 1935 if (!SimplifiedPtrOp.hasValue()) 1936 return true; 1937 const Value *PtrOpVal = SimplifiedPtrOp.getValue(); 1938 1939 // A memory access through a pointer is considered UB 1940 // only if the pointer has constant null value. 1941 // TODO: Expand it to not only check constant values. 1942 if (!isa<ConstantPointerNull>(PtrOpVal)) { 1943 AssumedNoUBInsts.insert(&I); 1944 return true; 1945 } 1946 const Type *PtrTy = PtrOpVal->getType(); 1947 1948 // Because we only consider instructions inside functions, 1949 // assume that a parent function exists. 1950 const Function *F = I.getFunction(); 1951 1952 // A memory access using constant null pointer is only considered UB 1953 // if null pointer is _not_ defined for the target platform. 1954 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) 1955 AssumedNoUBInsts.insert(&I); 1956 else 1957 KnownUBInsts.insert(&I); 1958 return true; 1959 }; 1960 1961 auto InspectBrInstForUB = [&](Instruction &I) { 1962 // A conditional branch instruction is considered UB if it has `undef` 1963 // condition. 1964 1965 // Skip instructions that are already saved. 1966 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 1967 return true; 1968 1969 // We know we have a branch instruction. 1970 auto BrInst = cast<BranchInst>(&I); 1971 1972 // Unconditional branches are never considered UB. 1973 if (BrInst->isUnconditional()) 1974 return true; 1975 1976 // Either we stopped and the appropriate action was taken, 1977 // or we got back a simplified value to continue. 1978 Optional<Value *> SimplifiedCond = 1979 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); 1980 if (!SimplifiedCond.hasValue()) 1981 return true; 1982 AssumedNoUBInsts.insert(&I); 1983 return true; 1984 }; 1985 1986 auto InspectCallSiteForUB = [&](Instruction &I) { 1987 // Check whether a callsite always cause UB or not 1988 1989 // Skip instructions that are already saved. 1990 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 1991 return true; 1992 1993 // Check nonnull and noundef argument attribute violation for each 1994 // callsite. 1995 CallBase &CB = cast<CallBase>(I); 1996 Function *Callee = CB.getCalledFunction(); 1997 if (!Callee) 1998 return true; 1999 for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) { 2000 // If current argument is known to be simplified to null pointer and the 2001 // corresponding argument position is known to have nonnull attribute, 2002 // the argument is poison. Furthermore, if the argument is poison and 2003 // the position is known to have noundef attriubte, this callsite is 2004 // considered UB. 2005 // TODO: Check also nopoison attribute if it is introduced. 2006 if (idx >= Callee->arg_size()) 2007 break; 2008 Value *ArgVal = CB.getArgOperand(idx); 2009 if (!ArgVal) 2010 continue; 2011 IRPosition CalleeArgumentIRP = 2012 IRPosition::argument(*Callee->getArg(idx)); 2013 if (!CalleeArgumentIRP.hasAttr({Attribute::NoUndef})) 2014 continue; 2015 auto &NonNullAA = A.getAAFor<AANonNull>(*this, CalleeArgumentIRP); 2016 if (!NonNullAA.isKnownNonNull()) 2017 continue; 2018 const auto &ValueSimplifyAA = 2019 A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*ArgVal)); 2020 Optional<Value *> SimplifiedVal = 2021 ValueSimplifyAA.getAssumedSimplifiedValue(A); 2022 2023 if (!ValueSimplifyAA.isKnown()) 2024 continue; 2025 // Here, we handle three cases. 2026 // (1) Not having a value means it is dead. (we can replace the value 2027 // with undef) 2028 // (2) Simplified to null pointer. The argument is a poison value and 2029 // violate noundef attribute. 2030 // (3) Simplified to undef. The argument violate noundef attriubte. 2031 if (!SimplifiedVal.hasValue() || 2032 isa<ConstantPointerNull>(*SimplifiedVal.getValue()) || 2033 isa<UndefValue>(*SimplifiedVal.getValue())) { 2034 KnownUBInsts.insert(&I); 2035 return true; 2036 } 2037 } 2038 return true; 2039 }; 2040 2041 A.checkForAllInstructions(InspectMemAccessInstForUB, *this, 2042 {Instruction::Load, Instruction::Store, 2043 Instruction::AtomicCmpXchg, 2044 Instruction::AtomicRMW}, 2045 /* CheckBBLivenessOnly */ true); 2046 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, 2047 /* CheckBBLivenessOnly */ true); 2048 A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this); 2049 if (NoUBPrevSize != AssumedNoUBInsts.size() || 2050 UBPrevSize != KnownUBInsts.size()) 2051 return ChangeStatus::CHANGED; 2052 return ChangeStatus::UNCHANGED; 2053 } 2054 2055 bool isKnownToCauseUB(Instruction *I) const override { 2056 return KnownUBInsts.count(I); 2057 } 2058 2059 bool isAssumedToCauseUB(Instruction *I) const override { 2060 // In simple words, if an instruction is not in the assumed to _not_ 2061 // cause UB, then it is assumed UB (that includes those 2062 // in the KnownUBInsts set). The rest is boilerplate 2063 // is to ensure that it is one of the instructions we test 2064 // for UB. 2065 2066 switch (I->getOpcode()) { 2067 case Instruction::Load: 2068 case Instruction::Store: 2069 case Instruction::AtomicCmpXchg: 2070 case Instruction::AtomicRMW: 2071 return !AssumedNoUBInsts.count(I); 2072 case Instruction::Br: { 2073 auto BrInst = cast<BranchInst>(I); 2074 if (BrInst->isUnconditional()) 2075 return false; 2076 return !AssumedNoUBInsts.count(I); 2077 } break; 2078 default: 2079 return false; 2080 } 2081 return false; 2082 } 2083 2084 ChangeStatus manifest(Attributor &A) override { 2085 if (KnownUBInsts.empty()) 2086 return ChangeStatus::UNCHANGED; 2087 for (Instruction *I : KnownUBInsts) 2088 A.changeToUnreachableAfterManifest(I); 2089 return ChangeStatus::CHANGED; 2090 } 2091 2092 /// See AbstractAttribute::getAsStr() 2093 const std::string getAsStr() const override { 2094 return getAssumed() ? "undefined-behavior" : "no-ub"; 2095 } 2096 2097 /// Note: The correctness of this analysis depends on the fact that the 2098 /// following 2 sets will stop changing after some point. 2099 /// "Change" here means that their size changes. 2100 /// The size of each set is monotonically increasing 2101 /// (we only add items to them) and it is upper bounded by the number of 2102 /// instructions in the processed function (we can never save more 2103 /// elements in either set than this number). Hence, at some point, 2104 /// they will stop increasing. 2105 /// Consequently, at some point, both sets will have stopped 2106 /// changing, effectively making the analysis reach a fixpoint. 2107 2108 /// Note: These 2 sets are disjoint and an instruction can be considered 2109 /// one of 3 things: 2110 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in 2111 /// the KnownUBInsts set. 2112 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior 2113 /// has a reason to assume it). 2114 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior 2115 /// could not find a reason to assume or prove that it can cause UB, 2116 /// hence it assumes it doesn't. We have a set for these instructions 2117 /// so that we don't reprocess them in every update. 2118 /// Note however that instructions in this set may cause UB. 2119 2120 protected: 2121 /// A set of all live instructions _known_ to cause UB. 2122 SmallPtrSet<Instruction *, 8> KnownUBInsts; 2123 2124 private: 2125 /// A set of all the (live) instructions that are assumed to _not_ cause UB. 2126 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; 2127 2128 // Should be called on updates in which if we're processing an instruction 2129 // \p I that depends on a value \p V, one of the following has to happen: 2130 // - If the value is assumed, then stop. 2131 // - If the value is known but undef, then consider it UB. 2132 // - Otherwise, do specific processing with the simplified value. 2133 // We return None in the first 2 cases to signify that an appropriate 2134 // action was taken and the caller should stop. 2135 // Otherwise, we return the simplified value that the caller should 2136 // use for specific processing. 2137 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V, 2138 Instruction *I) { 2139 const auto &ValueSimplifyAA = 2140 A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V)); 2141 Optional<Value *> SimplifiedV = 2142 ValueSimplifyAA.getAssumedSimplifiedValue(A); 2143 if (!ValueSimplifyAA.isKnown()) { 2144 // Don't depend on assumed values. 2145 return llvm::None; 2146 } 2147 if (!SimplifiedV.hasValue()) { 2148 // If it is known (which we tested above) but it doesn't have a value, 2149 // then we can assume `undef` and hence the instruction is UB. 2150 KnownUBInsts.insert(I); 2151 return llvm::None; 2152 } 2153 Value *Val = SimplifiedV.getValue(); 2154 if (isa<UndefValue>(Val)) { 2155 KnownUBInsts.insert(I); 2156 return llvm::None; 2157 } 2158 return Val; 2159 } 2160 }; 2161 2162 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { 2163 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) 2164 : AAUndefinedBehaviorImpl(IRP, A) {} 2165 2166 /// See AbstractAttribute::trackStatistics() 2167 void trackStatistics() const override { 2168 STATS_DECL(UndefinedBehaviorInstruction, Instruction, 2169 "Number of instructions known to have UB"); 2170 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += 2171 KnownUBInsts.size(); 2172 } 2173 }; 2174 2175 /// ------------------------ Will-Return Attributes ---------------------------- 2176 2177 // Helper function that checks whether a function has any cycle which we don't 2178 // know if it is bounded or not. 2179 // Loops with maximum trip count are considered bounded, any other cycle not. 2180 static bool mayContainUnboundedCycle(Function &F, Attributor &A) { 2181 ScalarEvolution *SE = 2182 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); 2183 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); 2184 // If either SCEV or LoopInfo is not available for the function then we assume 2185 // any cycle to be unbounded cycle. 2186 // We use scc_iterator which uses Tarjan algorithm to find all the maximal 2187 // SCCs.To detect if there's a cycle, we only need to find the maximal ones. 2188 if (!SE || !LI) { 2189 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) 2190 if (SCCI.hasCycle()) 2191 return true; 2192 return false; 2193 } 2194 2195 // If there's irreducible control, the function may contain non-loop cycles. 2196 if (mayContainIrreducibleControl(F, LI)) 2197 return true; 2198 2199 // Any loop that does not have a max trip count is considered unbounded cycle. 2200 for (auto *L : LI->getLoopsInPreorder()) { 2201 if (!SE->getSmallConstantMaxTripCount(L)) 2202 return true; 2203 } 2204 return false; 2205 } 2206 2207 struct AAWillReturnImpl : public AAWillReturn { 2208 AAWillReturnImpl(const IRPosition &IRP, Attributor &A) 2209 : AAWillReturn(IRP, A) {} 2210 2211 /// See AbstractAttribute::initialize(...). 2212 void initialize(Attributor &A) override { 2213 AAWillReturn::initialize(A); 2214 2215 Function *F = getAnchorScope(); 2216 if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A)) 2217 indicatePessimisticFixpoint(); 2218 } 2219 2220 /// See AbstractAttribute::updateImpl(...). 2221 ChangeStatus updateImpl(Attributor &A) override { 2222 auto CheckForWillReturn = [&](Instruction &I) { 2223 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); 2224 const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos); 2225 if (WillReturnAA.isKnownWillReturn()) 2226 return true; 2227 if (!WillReturnAA.isAssumedWillReturn()) 2228 return false; 2229 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos); 2230 return NoRecurseAA.isAssumedNoRecurse(); 2231 }; 2232 2233 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this)) 2234 return indicatePessimisticFixpoint(); 2235 2236 return ChangeStatus::UNCHANGED; 2237 } 2238 2239 /// See AbstractAttribute::getAsStr() 2240 const std::string getAsStr() const override { 2241 return getAssumed() ? "willreturn" : "may-noreturn"; 2242 } 2243 }; 2244 2245 struct AAWillReturnFunction final : AAWillReturnImpl { 2246 AAWillReturnFunction(const IRPosition &IRP, Attributor &A) 2247 : AAWillReturnImpl(IRP, A) {} 2248 2249 /// See AbstractAttribute::trackStatistics() 2250 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } 2251 }; 2252 2253 /// WillReturn attribute deduction for a call sites. 2254 struct AAWillReturnCallSite final : AAWillReturnImpl { 2255 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) 2256 : AAWillReturnImpl(IRP, A) {} 2257 2258 /// See AbstractAttribute::initialize(...). 2259 void initialize(Attributor &A) override { 2260 AAWillReturnImpl::initialize(A); 2261 Function *F = getAssociatedFunction(); 2262 if (!F) 2263 indicatePessimisticFixpoint(); 2264 } 2265 2266 /// See AbstractAttribute::updateImpl(...). 2267 ChangeStatus updateImpl(Attributor &A) override { 2268 // TODO: Once we have call site specific value information we can provide 2269 // call site specific liveness information and then it makes 2270 // sense to specialize attributes for call sites arguments instead of 2271 // redirecting requests to the callee argument. 2272 Function *F = getAssociatedFunction(); 2273 const IRPosition &FnPos = IRPosition::function(*F); 2274 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos); 2275 return clampStateAndIndicateChange( 2276 getState(), 2277 static_cast<const AAWillReturn::StateType &>(FnAA.getState())); 2278 } 2279 2280 /// See AbstractAttribute::trackStatistics() 2281 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } 2282 }; 2283 2284 /// -------------------AAReachability Attribute-------------------------- 2285 2286 struct AAReachabilityImpl : AAReachability { 2287 AAReachabilityImpl(const IRPosition &IRP, Attributor &A) 2288 : AAReachability(IRP, A) {} 2289 2290 const std::string getAsStr() const override { 2291 // TODO: Return the number of reachable queries. 2292 return "reachable"; 2293 } 2294 2295 /// See AbstractAttribute::initialize(...). 2296 void initialize(Attributor &A) override { indicatePessimisticFixpoint(); } 2297 2298 /// See AbstractAttribute::updateImpl(...). 2299 ChangeStatus updateImpl(Attributor &A) override { 2300 return indicatePessimisticFixpoint(); 2301 } 2302 }; 2303 2304 struct AAReachabilityFunction final : public AAReachabilityImpl { 2305 AAReachabilityFunction(const IRPosition &IRP, Attributor &A) 2306 : AAReachabilityImpl(IRP, A) {} 2307 2308 /// See AbstractAttribute::trackStatistics() 2309 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); } 2310 }; 2311 2312 /// ------------------------ NoAlias Argument Attribute ------------------------ 2313 2314 struct AANoAliasImpl : AANoAlias { 2315 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { 2316 assert(getAssociatedType()->isPointerTy() && 2317 "Noalias is a pointer attribute"); 2318 } 2319 2320 const std::string getAsStr() const override { 2321 return getAssumed() ? "noalias" : "may-alias"; 2322 } 2323 }; 2324 2325 /// NoAlias attribute for a floating value. 2326 struct AANoAliasFloating final : AANoAliasImpl { 2327 AANoAliasFloating(const IRPosition &IRP, Attributor &A) 2328 : AANoAliasImpl(IRP, A) {} 2329 2330 /// See AbstractAttribute::initialize(...). 2331 void initialize(Attributor &A) override { 2332 AANoAliasImpl::initialize(A); 2333 Value *Val = &getAssociatedValue(); 2334 do { 2335 CastInst *CI = dyn_cast<CastInst>(Val); 2336 if (!CI) 2337 break; 2338 Value *Base = CI->getOperand(0); 2339 if (!Base->hasOneUse()) 2340 break; 2341 Val = Base; 2342 } while (true); 2343 2344 if (!Val->getType()->isPointerTy()) { 2345 indicatePessimisticFixpoint(); 2346 return; 2347 } 2348 2349 if (isa<AllocaInst>(Val)) 2350 indicateOptimisticFixpoint(); 2351 else if (isa<ConstantPointerNull>(Val) && 2352 !NullPointerIsDefined(getAnchorScope(), 2353 Val->getType()->getPointerAddressSpace())) 2354 indicateOptimisticFixpoint(); 2355 else if (Val != &getAssociatedValue()) { 2356 const auto &ValNoAliasAA = 2357 A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val)); 2358 if (ValNoAliasAA.isKnownNoAlias()) 2359 indicateOptimisticFixpoint(); 2360 } 2361 } 2362 2363 /// See AbstractAttribute::updateImpl(...). 2364 ChangeStatus updateImpl(Attributor &A) override { 2365 // TODO: Implement this. 2366 return indicatePessimisticFixpoint(); 2367 } 2368 2369 /// See AbstractAttribute::trackStatistics() 2370 void trackStatistics() const override { 2371 STATS_DECLTRACK_FLOATING_ATTR(noalias) 2372 } 2373 }; 2374 2375 /// NoAlias attribute for an argument. 2376 struct AANoAliasArgument final 2377 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { 2378 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; 2379 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 2380 2381 /// See AbstractAttribute::initialize(...). 2382 void initialize(Attributor &A) override { 2383 Base::initialize(A); 2384 // See callsite argument attribute and callee argument attribute. 2385 if (hasAttr({Attribute::ByVal})) 2386 indicateOptimisticFixpoint(); 2387 } 2388 2389 /// See AbstractAttribute::update(...). 2390 ChangeStatus updateImpl(Attributor &A) override { 2391 // We have to make sure no-alias on the argument does not break 2392 // synchronization when this is a callback argument, see also [1] below. 2393 // If synchronization cannot be affected, we delegate to the base updateImpl 2394 // function, otherwise we give up for now. 2395 2396 // If the function is no-sync, no-alias cannot break synchronization. 2397 const auto &NoSyncAA = A.getAAFor<AANoSync>( 2398 *this, IRPosition::function_scope(getIRPosition())); 2399 if (NoSyncAA.isAssumedNoSync()) 2400 return Base::updateImpl(A); 2401 2402 // If the argument is read-only, no-alias cannot break synchronization. 2403 const auto &MemBehaviorAA = 2404 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition()); 2405 if (MemBehaviorAA.isAssumedReadOnly()) 2406 return Base::updateImpl(A); 2407 2408 // If the argument is never passed through callbacks, no-alias cannot break 2409 // synchronization. 2410 bool AllCallSitesKnown; 2411 if (A.checkForAllCallSites( 2412 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, 2413 true, AllCallSitesKnown)) 2414 return Base::updateImpl(A); 2415 2416 // TODO: add no-alias but make sure it doesn't break synchronization by 2417 // introducing fake uses. See: 2418 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, 2419 // International Workshop on OpenMP 2018, 2420 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf 2421 2422 return indicatePessimisticFixpoint(); 2423 } 2424 2425 /// See AbstractAttribute::trackStatistics() 2426 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } 2427 }; 2428 2429 struct AANoAliasCallSiteArgument final : AANoAliasImpl { 2430 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) 2431 : AANoAliasImpl(IRP, A) {} 2432 2433 /// See AbstractAttribute::initialize(...). 2434 void initialize(Attributor &A) override { 2435 // See callsite argument attribute and callee argument attribute. 2436 const auto &CB = cast<CallBase>(getAnchorValue()); 2437 if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias)) 2438 indicateOptimisticFixpoint(); 2439 Value &Val = getAssociatedValue(); 2440 if (isa<ConstantPointerNull>(Val) && 2441 !NullPointerIsDefined(getAnchorScope(), 2442 Val.getType()->getPointerAddressSpace())) 2443 indicateOptimisticFixpoint(); 2444 } 2445 2446 /// Determine if the underlying value may alias with the call site argument 2447 /// \p OtherArgNo of \p ICS (= the underlying call site). 2448 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, 2449 const AAMemoryBehavior &MemBehaviorAA, 2450 const CallBase &CB, unsigned OtherArgNo) { 2451 // We do not need to worry about aliasing with the underlying IRP. 2452 if (this->getArgNo() == (int)OtherArgNo) 2453 return false; 2454 2455 // If it is not a pointer or pointer vector we do not alias. 2456 const Value *ArgOp = CB.getArgOperand(OtherArgNo); 2457 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 2458 return false; 2459 2460 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 2461 *this, IRPosition::callsite_argument(CB, OtherArgNo), 2462 /* TrackDependence */ false); 2463 2464 // If the argument is readnone, there is no read-write aliasing. 2465 if (CBArgMemBehaviorAA.isAssumedReadNone()) { 2466 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 2467 return false; 2468 } 2469 2470 // If the argument is readonly and the underlying value is readonly, there 2471 // is no read-write aliasing. 2472 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); 2473 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { 2474 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2475 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 2476 return false; 2477 } 2478 2479 // We have to utilize actual alias analysis queries so we need the object. 2480 if (!AAR) 2481 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); 2482 2483 // Try to rule it out at the call site. 2484 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); 2485 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " 2486 "callsite arguments: " 2487 << getAssociatedValue() << " " << *ArgOp << " => " 2488 << (IsAliasing ? "" : "no-") << "alias \n"); 2489 2490 return IsAliasing; 2491 } 2492 2493 bool 2494 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, 2495 const AAMemoryBehavior &MemBehaviorAA, 2496 const AANoAlias &NoAliasAA) { 2497 // We can deduce "noalias" if the following conditions hold. 2498 // (i) Associated value is assumed to be noalias in the definition. 2499 // (ii) Associated value is assumed to be no-capture in all the uses 2500 // possibly executed before this callsite. 2501 // (iii) There is no other pointer argument which could alias with the 2502 // value. 2503 2504 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); 2505 if (!AssociatedValueIsNoAliasAtDef) { 2506 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() 2507 << " is not no-alias at the definition\n"); 2508 return false; 2509 } 2510 2511 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); 2512 2513 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 2514 auto &NoCaptureAA = 2515 A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false); 2516 // Check whether the value is captured in the scope using AANoCapture. 2517 // Look at CFG and check only uses possibly executed before this 2518 // callsite. 2519 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 2520 Instruction *UserI = cast<Instruction>(U.getUser()); 2521 2522 // If user if curr instr and only use. 2523 if (UserI == getCtxI() && UserI->hasOneUse()) 2524 return true; 2525 2526 const Function *ScopeFn = VIRP.getAnchorScope(); 2527 if (ScopeFn) { 2528 const auto &ReachabilityAA = 2529 A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn)); 2530 2531 if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI())) 2532 return true; 2533 2534 if (auto *CB = dyn_cast<CallBase>(UserI)) { 2535 if (CB->isArgOperand(&U)) { 2536 2537 unsigned ArgNo = CB->getArgOperandNo(&U); 2538 2539 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 2540 *this, IRPosition::callsite_argument(*CB, ArgNo)); 2541 2542 if (NoCaptureAA.isAssumedNoCapture()) 2543 return true; 2544 } 2545 } 2546 } 2547 2548 // For cases which can potentially have more users 2549 if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) || 2550 isa<SelectInst>(U)) { 2551 Follow = true; 2552 return true; 2553 } 2554 2555 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n"); 2556 return false; 2557 }; 2558 2559 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 2560 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { 2561 LLVM_DEBUG( 2562 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() 2563 << " cannot be noalias as it is potentially captured\n"); 2564 return false; 2565 } 2566 } 2567 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); 2568 2569 // Check there is no other pointer argument which could alias with the 2570 // value passed at this call site. 2571 // TODO: AbstractCallSite 2572 const auto &CB = cast<CallBase>(getAnchorValue()); 2573 for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands(); 2574 OtherArgNo++) 2575 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) 2576 return false; 2577 2578 return true; 2579 } 2580 2581 /// See AbstractAttribute::updateImpl(...). 2582 ChangeStatus updateImpl(Attributor &A) override { 2583 // If the argument is readnone we are done as there are no accesses via the 2584 // argument. 2585 auto &MemBehaviorAA = 2586 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), 2587 /* TrackDependence */ false); 2588 if (MemBehaviorAA.isAssumedReadNone()) { 2589 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2590 return ChangeStatus::UNCHANGED; 2591 } 2592 2593 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 2594 const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP, 2595 /* TrackDependence */ false); 2596 2597 AAResults *AAR = nullptr; 2598 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, 2599 NoAliasAA)) { 2600 LLVM_DEBUG( 2601 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"); 2602 return ChangeStatus::UNCHANGED; 2603 } 2604 2605 return indicatePessimisticFixpoint(); 2606 } 2607 2608 /// See AbstractAttribute::trackStatistics() 2609 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } 2610 }; 2611 2612 /// NoAlias attribute for function return value. 2613 struct AANoAliasReturned final : AANoAliasImpl { 2614 AANoAliasReturned(const IRPosition &IRP, Attributor &A) 2615 : AANoAliasImpl(IRP, A) {} 2616 2617 /// See AbstractAttribute::updateImpl(...). 2618 virtual ChangeStatus updateImpl(Attributor &A) override { 2619 2620 auto CheckReturnValue = [&](Value &RV) -> bool { 2621 if (Constant *C = dyn_cast<Constant>(&RV)) 2622 if (C->isNullValue() || isa<UndefValue>(C)) 2623 return true; 2624 2625 /// For now, we can only deduce noalias if we have call sites. 2626 /// FIXME: add more support. 2627 if (!isa<CallBase>(&RV)) 2628 return false; 2629 2630 const IRPosition &RVPos = IRPosition::value(RV); 2631 const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos); 2632 if (!NoAliasAA.isAssumedNoAlias()) 2633 return false; 2634 2635 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos); 2636 return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); 2637 }; 2638 2639 if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) 2640 return indicatePessimisticFixpoint(); 2641 2642 return ChangeStatus::UNCHANGED; 2643 } 2644 2645 /// See AbstractAttribute::trackStatistics() 2646 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } 2647 }; 2648 2649 /// NoAlias attribute deduction for a call site return value. 2650 struct AANoAliasCallSiteReturned final : AANoAliasImpl { 2651 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) 2652 : AANoAliasImpl(IRP, A) {} 2653 2654 /// See AbstractAttribute::initialize(...). 2655 void initialize(Attributor &A) override { 2656 AANoAliasImpl::initialize(A); 2657 Function *F = getAssociatedFunction(); 2658 if (!F) 2659 indicatePessimisticFixpoint(); 2660 } 2661 2662 /// See AbstractAttribute::updateImpl(...). 2663 ChangeStatus updateImpl(Attributor &A) override { 2664 // TODO: Once we have call site specific value information we can provide 2665 // call site specific liveness information and then it makes 2666 // sense to specialize attributes for call sites arguments instead of 2667 // redirecting requests to the callee argument. 2668 Function *F = getAssociatedFunction(); 2669 const IRPosition &FnPos = IRPosition::returned(*F); 2670 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos); 2671 return clampStateAndIndicateChange( 2672 getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState())); 2673 } 2674 2675 /// See AbstractAttribute::trackStatistics() 2676 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } 2677 }; 2678 2679 /// -------------------AAIsDead Function Attribute----------------------- 2680 2681 struct AAIsDeadValueImpl : public AAIsDead { 2682 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 2683 2684 /// See AAIsDead::isAssumedDead(). 2685 bool isAssumedDead() const override { return getAssumed(); } 2686 2687 /// See AAIsDead::isKnownDead(). 2688 bool isKnownDead() const override { return getKnown(); } 2689 2690 /// See AAIsDead::isAssumedDead(BasicBlock *). 2691 bool isAssumedDead(const BasicBlock *BB) const override { return false; } 2692 2693 /// See AAIsDead::isKnownDead(BasicBlock *). 2694 bool isKnownDead(const BasicBlock *BB) const override { return false; } 2695 2696 /// See AAIsDead::isAssumedDead(Instruction *I). 2697 bool isAssumedDead(const Instruction *I) const override { 2698 return I == getCtxI() && isAssumedDead(); 2699 } 2700 2701 /// See AAIsDead::isKnownDead(Instruction *I). 2702 bool isKnownDead(const Instruction *I) const override { 2703 return isAssumedDead(I) && getKnown(); 2704 } 2705 2706 /// See AbstractAttribute::getAsStr(). 2707 const std::string getAsStr() const override { 2708 return isAssumedDead() ? "assumed-dead" : "assumed-live"; 2709 } 2710 2711 /// Check if all uses are assumed dead. 2712 bool areAllUsesAssumedDead(Attributor &A, Value &V) { 2713 auto UsePred = [&](const Use &U, bool &Follow) { return false; }; 2714 // Explicitly set the dependence class to required because we want a long 2715 // chain of N dependent instructions to be considered live as soon as one is 2716 // without going through N update cycles. This is not required for 2717 // correctness. 2718 return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED); 2719 } 2720 2721 /// Determine if \p I is assumed to be side-effect free. 2722 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { 2723 if (!I || wouldInstructionBeTriviallyDead(I)) 2724 return true; 2725 2726 auto *CB = dyn_cast<CallBase>(I); 2727 if (!CB || isa<IntrinsicInst>(CB)) 2728 return false; 2729 2730 const IRPosition &CallIRP = IRPosition::callsite_function(*CB); 2731 const auto &NoUnwindAA = A.getAndUpdateAAFor<AANoUnwind>( 2732 *this, CallIRP, /* TrackDependence */ false); 2733 if (!NoUnwindAA.isAssumedNoUnwind()) 2734 return false; 2735 if (!NoUnwindAA.isKnownNoUnwind()) 2736 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); 2737 2738 const auto &MemBehaviorAA = A.getAndUpdateAAFor<AAMemoryBehavior>( 2739 *this, CallIRP, /* TrackDependence */ false); 2740 if (MemBehaviorAA.isAssumedReadOnly()) { 2741 if (!MemBehaviorAA.isKnownReadOnly()) 2742 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2743 return true; 2744 } 2745 return false; 2746 } 2747 }; 2748 2749 struct AAIsDeadFloating : public AAIsDeadValueImpl { 2750 AAIsDeadFloating(const IRPosition &IRP, Attributor &A) 2751 : AAIsDeadValueImpl(IRP, A) {} 2752 2753 /// See AbstractAttribute::initialize(...). 2754 void initialize(Attributor &A) override { 2755 if (isa<UndefValue>(getAssociatedValue())) { 2756 indicatePessimisticFixpoint(); 2757 return; 2758 } 2759 2760 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 2761 if (!isAssumedSideEffectFree(A, I)) 2762 indicatePessimisticFixpoint(); 2763 } 2764 2765 /// See AbstractAttribute::updateImpl(...). 2766 ChangeStatus updateImpl(Attributor &A) override { 2767 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 2768 if (!isAssumedSideEffectFree(A, I)) 2769 return indicatePessimisticFixpoint(); 2770 2771 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 2772 return indicatePessimisticFixpoint(); 2773 return ChangeStatus::UNCHANGED; 2774 } 2775 2776 /// See AbstractAttribute::manifest(...). 2777 ChangeStatus manifest(Attributor &A) override { 2778 Value &V = getAssociatedValue(); 2779 if (auto *I = dyn_cast<Instruction>(&V)) { 2780 // If we get here we basically know the users are all dead. We check if 2781 // isAssumedSideEffectFree returns true here again because it might not be 2782 // the case and only the users are dead but the instruction (=call) is 2783 // still needed. 2784 if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) { 2785 A.deleteAfterManifest(*I); 2786 return ChangeStatus::CHANGED; 2787 } 2788 } 2789 if (V.use_empty()) 2790 return ChangeStatus::UNCHANGED; 2791 2792 bool UsedAssumedInformation = false; 2793 Optional<Constant *> C = 2794 A.getAssumedConstant(V, *this, UsedAssumedInformation); 2795 if (C.hasValue() && C.getValue()) 2796 return ChangeStatus::UNCHANGED; 2797 2798 // Replace the value with undef as it is dead but keep droppable uses around 2799 // as they provide information we don't want to give up on just yet. 2800 UndefValue &UV = *UndefValue::get(V.getType()); 2801 bool AnyChange = 2802 A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false); 2803 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 2804 } 2805 2806 /// See AbstractAttribute::trackStatistics() 2807 void trackStatistics() const override { 2808 STATS_DECLTRACK_FLOATING_ATTR(IsDead) 2809 } 2810 }; 2811 2812 struct AAIsDeadArgument : public AAIsDeadFloating { 2813 AAIsDeadArgument(const IRPosition &IRP, Attributor &A) 2814 : AAIsDeadFloating(IRP, A) {} 2815 2816 /// See AbstractAttribute::initialize(...). 2817 void initialize(Attributor &A) override { 2818 if (!A.isFunctionIPOAmendable(*getAnchorScope())) 2819 indicatePessimisticFixpoint(); 2820 } 2821 2822 /// See AbstractAttribute::manifest(...). 2823 ChangeStatus manifest(Attributor &A) override { 2824 ChangeStatus Changed = AAIsDeadFloating::manifest(A); 2825 Argument &Arg = *getAssociatedArgument(); 2826 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) 2827 if (A.registerFunctionSignatureRewrite( 2828 Arg, /* ReplacementTypes */ {}, 2829 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, 2830 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { 2831 Arg.dropDroppableUses(); 2832 return ChangeStatus::CHANGED; 2833 } 2834 return Changed; 2835 } 2836 2837 /// See AbstractAttribute::trackStatistics() 2838 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) } 2839 }; 2840 2841 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { 2842 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) 2843 : AAIsDeadValueImpl(IRP, A) {} 2844 2845 /// See AbstractAttribute::initialize(...). 2846 void initialize(Attributor &A) override { 2847 if (isa<UndefValue>(getAssociatedValue())) 2848 indicatePessimisticFixpoint(); 2849 } 2850 2851 /// See AbstractAttribute::updateImpl(...). 2852 ChangeStatus updateImpl(Attributor &A) override { 2853 // TODO: Once we have call site specific value information we can provide 2854 // call site specific liveness information and then it makes 2855 // sense to specialize attributes for call sites arguments instead of 2856 // redirecting requests to the callee argument. 2857 Argument *Arg = getAssociatedArgument(); 2858 if (!Arg) 2859 return indicatePessimisticFixpoint(); 2860 const IRPosition &ArgPos = IRPosition::argument(*Arg); 2861 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos); 2862 return clampStateAndIndicateChange( 2863 getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState())); 2864 } 2865 2866 /// See AbstractAttribute::manifest(...). 2867 ChangeStatus manifest(Attributor &A) override { 2868 CallBase &CB = cast<CallBase>(getAnchorValue()); 2869 Use &U = CB.getArgOperandUse(getArgNo()); 2870 assert(!isa<UndefValue>(U.get()) && 2871 "Expected undef values to be filtered out!"); 2872 UndefValue &UV = *UndefValue::get(U->getType()); 2873 if (A.changeUseAfterManifest(U, UV)) 2874 return ChangeStatus::CHANGED; 2875 return ChangeStatus::UNCHANGED; 2876 } 2877 2878 /// See AbstractAttribute::trackStatistics() 2879 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) } 2880 }; 2881 2882 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { 2883 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) 2884 : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {} 2885 2886 /// See AAIsDead::isAssumedDead(). 2887 bool isAssumedDead() const override { 2888 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; 2889 } 2890 2891 /// See AbstractAttribute::initialize(...). 2892 void initialize(Attributor &A) override { 2893 if (isa<UndefValue>(getAssociatedValue())) { 2894 indicatePessimisticFixpoint(); 2895 return; 2896 } 2897 2898 // We track this separately as a secondary state. 2899 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); 2900 } 2901 2902 /// See AbstractAttribute::updateImpl(...). 2903 ChangeStatus updateImpl(Attributor &A) override { 2904 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2905 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { 2906 IsAssumedSideEffectFree = false; 2907 Changed = ChangeStatus::CHANGED; 2908 } 2909 2910 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 2911 return indicatePessimisticFixpoint(); 2912 return Changed; 2913 } 2914 2915 /// See AbstractAttribute::trackStatistics() 2916 void trackStatistics() const override { 2917 if (IsAssumedSideEffectFree) 2918 STATS_DECLTRACK_CSRET_ATTR(IsDead) 2919 else 2920 STATS_DECLTRACK_CSRET_ATTR(UnusedResult) 2921 } 2922 2923 /// See AbstractAttribute::getAsStr(). 2924 const std::string getAsStr() const override { 2925 return isAssumedDead() 2926 ? "assumed-dead" 2927 : (getAssumed() ? "assumed-dead-users" : "assumed-live"); 2928 } 2929 2930 private: 2931 bool IsAssumedSideEffectFree; 2932 }; 2933 2934 struct AAIsDeadReturned : public AAIsDeadValueImpl { 2935 AAIsDeadReturned(const IRPosition &IRP, Attributor &A) 2936 : AAIsDeadValueImpl(IRP, A) {} 2937 2938 /// See AbstractAttribute::updateImpl(...). 2939 ChangeStatus updateImpl(Attributor &A) override { 2940 2941 A.checkForAllInstructions([](Instruction &) { return true; }, *this, 2942 {Instruction::Ret}); 2943 2944 auto PredForCallSite = [&](AbstractCallSite ACS) { 2945 if (ACS.isCallbackCall() || !ACS.getInstruction()) 2946 return false; 2947 return areAllUsesAssumedDead(A, *ACS.getInstruction()); 2948 }; 2949 2950 bool AllCallSitesKnown; 2951 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 2952 AllCallSitesKnown)) 2953 return indicatePessimisticFixpoint(); 2954 2955 return ChangeStatus::UNCHANGED; 2956 } 2957 2958 /// See AbstractAttribute::manifest(...). 2959 ChangeStatus manifest(Attributor &A) override { 2960 // TODO: Rewrite the signature to return void? 2961 bool AnyChange = false; 2962 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); 2963 auto RetInstPred = [&](Instruction &I) { 2964 ReturnInst &RI = cast<ReturnInst>(I); 2965 if (!isa<UndefValue>(RI.getReturnValue())) 2966 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); 2967 return true; 2968 }; 2969 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}); 2970 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 2971 } 2972 2973 /// See AbstractAttribute::trackStatistics() 2974 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) } 2975 }; 2976 2977 struct AAIsDeadFunction : public AAIsDead { 2978 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 2979 2980 /// See AbstractAttribute::initialize(...). 2981 void initialize(Attributor &A) override { 2982 const Function *F = getAnchorScope(); 2983 if (F && !F->isDeclaration()) { 2984 ToBeExploredFrom.insert(&F->getEntryBlock().front()); 2985 assumeLive(A, F->getEntryBlock()); 2986 } 2987 } 2988 2989 /// See AbstractAttribute::getAsStr(). 2990 const std::string getAsStr() const override { 2991 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + 2992 std::to_string(getAnchorScope()->size()) + "][#TBEP " + 2993 std::to_string(ToBeExploredFrom.size()) + "][#KDE " + 2994 std::to_string(KnownDeadEnds.size()) + "]"; 2995 } 2996 2997 /// See AbstractAttribute::manifest(...). 2998 ChangeStatus manifest(Attributor &A) override { 2999 assert(getState().isValidState() && 3000 "Attempted to manifest an invalid state!"); 3001 3002 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 3003 Function &F = *getAnchorScope(); 3004 3005 if (AssumedLiveBlocks.empty()) { 3006 A.deleteAfterManifest(F); 3007 return ChangeStatus::CHANGED; 3008 } 3009 3010 // Flag to determine if we can change an invoke to a call assuming the 3011 // callee is nounwind. This is not possible if the personality of the 3012 // function allows to catch asynchronous exceptions. 3013 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 3014 3015 KnownDeadEnds.set_union(ToBeExploredFrom); 3016 for (const Instruction *DeadEndI : KnownDeadEnds) { 3017 auto *CB = dyn_cast<CallBase>(DeadEndI); 3018 if (!CB) 3019 continue; 3020 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3021 *this, IRPosition::callsite_function(*CB), /* TrackDependence */ true, 3022 DepClassTy::OPTIONAL); 3023 bool MayReturn = !NoReturnAA.isAssumedNoReturn(); 3024 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) 3025 continue; 3026 3027 if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) 3028 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); 3029 else 3030 A.changeToUnreachableAfterManifest( 3031 const_cast<Instruction *>(DeadEndI->getNextNode())); 3032 HasChanged = ChangeStatus::CHANGED; 3033 } 3034 3035 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted."); 3036 for (BasicBlock &BB : F) 3037 if (!AssumedLiveBlocks.count(&BB)) { 3038 A.deleteAfterManifest(BB); 3039 ++BUILD_STAT_NAME(AAIsDead, BasicBlock); 3040 } 3041 3042 return HasChanged; 3043 } 3044 3045 /// See AbstractAttribute::updateImpl(...). 3046 ChangeStatus updateImpl(Attributor &A) override; 3047 3048 /// See AbstractAttribute::trackStatistics() 3049 void trackStatistics() const override {} 3050 3051 /// Returns true if the function is assumed dead. 3052 bool isAssumedDead() const override { return false; } 3053 3054 /// See AAIsDead::isKnownDead(). 3055 bool isKnownDead() const override { return false; } 3056 3057 /// See AAIsDead::isAssumedDead(BasicBlock *). 3058 bool isAssumedDead(const BasicBlock *BB) const override { 3059 assert(BB->getParent() == getAnchorScope() && 3060 "BB must be in the same anchor scope function."); 3061 3062 if (!getAssumed()) 3063 return false; 3064 return !AssumedLiveBlocks.count(BB); 3065 } 3066 3067 /// See AAIsDead::isKnownDead(BasicBlock *). 3068 bool isKnownDead(const BasicBlock *BB) const override { 3069 return getKnown() && isAssumedDead(BB); 3070 } 3071 3072 /// See AAIsDead::isAssumed(Instruction *I). 3073 bool isAssumedDead(const Instruction *I) const override { 3074 assert(I->getParent()->getParent() == getAnchorScope() && 3075 "Instruction must be in the same anchor scope function."); 3076 3077 if (!getAssumed()) 3078 return false; 3079 3080 // If it is not in AssumedLiveBlocks then it for sure dead. 3081 // Otherwise, it can still be after noreturn call in a live block. 3082 if (!AssumedLiveBlocks.count(I->getParent())) 3083 return true; 3084 3085 // If it is not after a liveness barrier it is live. 3086 const Instruction *PrevI = I->getPrevNode(); 3087 while (PrevI) { 3088 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) 3089 return true; 3090 PrevI = PrevI->getPrevNode(); 3091 } 3092 return false; 3093 } 3094 3095 /// See AAIsDead::isKnownDead(Instruction *I). 3096 bool isKnownDead(const Instruction *I) const override { 3097 return getKnown() && isAssumedDead(I); 3098 } 3099 3100 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A 3101 /// that internal function called from \p BB should now be looked at. 3102 bool assumeLive(Attributor &A, const BasicBlock &BB) { 3103 if (!AssumedLiveBlocks.insert(&BB).second) 3104 return false; 3105 3106 // We assume that all of BB is (probably) live now and if there are calls to 3107 // internal functions we will assume that those are now live as well. This 3108 // is a performance optimization for blocks with calls to a lot of internal 3109 // functions. It can however cause dead functions to be treated as live. 3110 for (const Instruction &I : BB) 3111 if (const auto *CB = dyn_cast<CallBase>(&I)) 3112 if (const Function *F = CB->getCalledFunction()) 3113 if (F->hasLocalLinkage()) 3114 A.markLiveInternalFunction(*F); 3115 return true; 3116 } 3117 3118 /// Collection of instructions that need to be explored again, e.g., we 3119 /// did assume they do not transfer control to (one of their) successors. 3120 SmallSetVector<const Instruction *, 8> ToBeExploredFrom; 3121 3122 /// Collection of instructions that are known to not transfer control. 3123 SmallSetVector<const Instruction *, 8> KnownDeadEnds; 3124 3125 /// Collection of all assumed live BasicBlocks. 3126 DenseSet<const BasicBlock *> AssumedLiveBlocks; 3127 }; 3128 3129 static bool 3130 identifyAliveSuccessors(Attributor &A, const CallBase &CB, 3131 AbstractAttribute &AA, 3132 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3133 const IRPosition &IPos = IRPosition::callsite_function(CB); 3134 3135 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3136 AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 3137 if (NoReturnAA.isAssumedNoReturn()) 3138 return !NoReturnAA.isKnownNoReturn(); 3139 if (CB.isTerminator()) 3140 AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); 3141 else 3142 AliveSuccessors.push_back(CB.getNextNode()); 3143 return false; 3144 } 3145 3146 static bool 3147 identifyAliveSuccessors(Attributor &A, const InvokeInst &II, 3148 AbstractAttribute &AA, 3149 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3150 bool UsedAssumedInformation = 3151 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); 3152 3153 // First, determine if we can change an invoke to a call assuming the 3154 // callee is nounwind. This is not possible if the personality of the 3155 // function allows to catch asynchronous exceptions. 3156 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { 3157 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3158 } else { 3159 const IRPosition &IPos = IRPosition::callsite_function(II); 3160 const auto &AANoUnw = A.getAndUpdateAAFor<AANoUnwind>( 3161 AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 3162 if (AANoUnw.isAssumedNoUnwind()) { 3163 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); 3164 } else { 3165 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3166 } 3167 } 3168 return UsedAssumedInformation; 3169 } 3170 3171 static bool 3172 identifyAliveSuccessors(Attributor &A, const BranchInst &BI, 3173 AbstractAttribute &AA, 3174 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3175 bool UsedAssumedInformation = false; 3176 if (BI.getNumSuccessors() == 1) { 3177 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3178 } else { 3179 Optional<ConstantInt *> CI = getAssumedConstantInt( 3180 A, *BI.getCondition(), AA, UsedAssumedInformation); 3181 if (!CI.hasValue()) { 3182 // No value yet, assume both edges are dead. 3183 } else if (CI.getValue()) { 3184 const BasicBlock *SuccBB = 3185 BI.getSuccessor(1 - CI.getValue()->getZExtValue()); 3186 AliveSuccessors.push_back(&SuccBB->front()); 3187 } else { 3188 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3189 AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); 3190 UsedAssumedInformation = false; 3191 } 3192 } 3193 return UsedAssumedInformation; 3194 } 3195 3196 static bool 3197 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, 3198 AbstractAttribute &AA, 3199 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3200 bool UsedAssumedInformation = false; 3201 Optional<ConstantInt *> CI = 3202 getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation); 3203 if (!CI.hasValue()) { 3204 // No value yet, assume all edges are dead. 3205 } else if (CI.getValue()) { 3206 for (auto &CaseIt : SI.cases()) { 3207 if (CaseIt.getCaseValue() == CI.getValue()) { 3208 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); 3209 return UsedAssumedInformation; 3210 } 3211 } 3212 AliveSuccessors.push_back(&SI.getDefaultDest()->front()); 3213 return UsedAssumedInformation; 3214 } else { 3215 for (const BasicBlock *SuccBB : successors(SI.getParent())) 3216 AliveSuccessors.push_back(&SuccBB->front()); 3217 } 3218 return UsedAssumedInformation; 3219 } 3220 3221 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { 3222 ChangeStatus Change = ChangeStatus::UNCHANGED; 3223 3224 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" 3225 << getAnchorScope()->size() << "] BBs and " 3226 << ToBeExploredFrom.size() << " exploration points and " 3227 << KnownDeadEnds.size() << " known dead ends\n"); 3228 3229 // Copy and clear the list of instructions we need to explore from. It is 3230 // refilled with instructions the next update has to look at. 3231 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), 3232 ToBeExploredFrom.end()); 3233 decltype(ToBeExploredFrom) NewToBeExploredFrom; 3234 3235 SmallVector<const Instruction *, 8> AliveSuccessors; 3236 while (!Worklist.empty()) { 3237 const Instruction *I = Worklist.pop_back_val(); 3238 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"); 3239 3240 AliveSuccessors.clear(); 3241 3242 bool UsedAssumedInformation = false; 3243 switch (I->getOpcode()) { 3244 // TODO: look for (assumed) UB to backwards propagate "deadness". 3245 default: 3246 if (I->isTerminator()) { 3247 for (const BasicBlock *SuccBB : successors(I->getParent())) 3248 AliveSuccessors.push_back(&SuccBB->front()); 3249 } else { 3250 AliveSuccessors.push_back(I->getNextNode()); 3251 } 3252 break; 3253 case Instruction::Call: 3254 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), 3255 *this, AliveSuccessors); 3256 break; 3257 case Instruction::Invoke: 3258 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), 3259 *this, AliveSuccessors); 3260 break; 3261 case Instruction::Br: 3262 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), 3263 *this, AliveSuccessors); 3264 break; 3265 case Instruction::Switch: 3266 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), 3267 *this, AliveSuccessors); 3268 break; 3269 } 3270 3271 if (UsedAssumedInformation) { 3272 NewToBeExploredFrom.insert(I); 3273 } else { 3274 Change = ChangeStatus::CHANGED; 3275 if (AliveSuccessors.empty() || 3276 (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors())) 3277 KnownDeadEnds.insert(I); 3278 } 3279 3280 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: " 3281 << AliveSuccessors.size() << " UsedAssumedInformation: " 3282 << UsedAssumedInformation << "\n"); 3283 3284 for (const Instruction *AliveSuccessor : AliveSuccessors) { 3285 if (!I->isTerminator()) { 3286 assert(AliveSuccessors.size() == 1 && 3287 "Non-terminator expected to have a single successor!"); 3288 Worklist.push_back(AliveSuccessor); 3289 } else { 3290 if (assumeLive(A, *AliveSuccessor->getParent())) 3291 Worklist.push_back(AliveSuccessor); 3292 } 3293 } 3294 } 3295 3296 ToBeExploredFrom = std::move(NewToBeExploredFrom); 3297 3298 // If we know everything is live there is no need to query for liveness. 3299 // Instead, indicating a pessimistic fixpoint will cause the state to be 3300 // "invalid" and all queries to be answered conservatively without lookups. 3301 // To be in this state we have to (1) finished the exploration and (3) not 3302 // discovered any non-trivial dead end and (2) not ruled unreachable code 3303 // dead. 3304 if (ToBeExploredFrom.empty() && 3305 getAnchorScope()->size() == AssumedLiveBlocks.size() && 3306 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { 3307 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; 3308 })) 3309 return indicatePessimisticFixpoint(); 3310 return Change; 3311 } 3312 3313 /// Liveness information for a call sites. 3314 struct AAIsDeadCallSite final : AAIsDeadFunction { 3315 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) 3316 : AAIsDeadFunction(IRP, A) {} 3317 3318 /// See AbstractAttribute::initialize(...). 3319 void initialize(Attributor &A) override { 3320 // TODO: Once we have call site specific value information we can provide 3321 // call site specific liveness information and then it makes 3322 // sense to specialize attributes for call sites instead of 3323 // redirecting requests to the callee. 3324 llvm_unreachable("Abstract attributes for liveness are not " 3325 "supported for call sites yet!"); 3326 } 3327 3328 /// See AbstractAttribute::updateImpl(...). 3329 ChangeStatus updateImpl(Attributor &A) override { 3330 return indicatePessimisticFixpoint(); 3331 } 3332 3333 /// See AbstractAttribute::trackStatistics() 3334 void trackStatistics() const override {} 3335 }; 3336 3337 /// -------------------- Dereferenceable Argument Attribute -------------------- 3338 3339 template <> 3340 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, 3341 const DerefState &R) { 3342 ChangeStatus CS0 = 3343 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); 3344 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); 3345 return CS0 | CS1; 3346 } 3347 3348 struct AADereferenceableImpl : AADereferenceable { 3349 AADereferenceableImpl(const IRPosition &IRP, Attributor &A) 3350 : AADereferenceable(IRP, A) {} 3351 using StateType = DerefState; 3352 3353 /// See AbstractAttribute::initialize(...). 3354 void initialize(Attributor &A) override { 3355 SmallVector<Attribute, 4> Attrs; 3356 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, 3357 Attrs, /* IgnoreSubsumingPositions */ false, &A); 3358 for (const Attribute &Attr : Attrs) 3359 takeKnownDerefBytesMaximum(Attr.getValueAsInt()); 3360 3361 const IRPosition &IRP = this->getIRPosition(); 3362 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, 3363 /* TrackDependence */ false); 3364 3365 bool CanBeNull; 3366 takeKnownDerefBytesMaximum( 3367 IRP.getAssociatedValue().getPointerDereferenceableBytes( 3368 A.getDataLayout(), CanBeNull)); 3369 3370 bool IsFnInterface = IRP.isFnInterfaceKind(); 3371 Function *FnScope = IRP.getAnchorScope(); 3372 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { 3373 indicatePessimisticFixpoint(); 3374 return; 3375 } 3376 3377 if (Instruction *CtxI = getCtxI()) 3378 followUsesInMBEC(*this, A, getState(), *CtxI); 3379 } 3380 3381 /// See AbstractAttribute::getState() 3382 /// { 3383 StateType &getState() override { return *this; } 3384 const StateType &getState() const override { return *this; } 3385 /// } 3386 3387 /// Helper function for collecting accessed bytes in must-be-executed-context 3388 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, 3389 DerefState &State) { 3390 const Value *UseV = U->get(); 3391 if (!UseV->getType()->isPointerTy()) 3392 return; 3393 3394 Type *PtrTy = UseV->getType(); 3395 const DataLayout &DL = A.getDataLayout(); 3396 int64_t Offset; 3397 if (const Value *Base = getBasePointerOfAccessPointerOperand( 3398 I, Offset, DL, /*AllowNonInbounds*/ true)) { 3399 if (Base == &getAssociatedValue() && 3400 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 3401 uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType()); 3402 State.addAccessedBytes(Offset, Size); 3403 } 3404 } 3405 return; 3406 } 3407 3408 /// See followUsesInMBEC 3409 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 3410 AADereferenceable::StateType &State) { 3411 bool IsNonNull = false; 3412 bool TrackUse = false; 3413 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( 3414 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); 3415 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes 3416 << " for instruction " << *I << "\n"); 3417 3418 addAccessedBytesForUse(A, U, I, State); 3419 State.takeKnownDerefBytesMaximum(DerefBytes); 3420 return TrackUse; 3421 } 3422 3423 /// See AbstractAttribute::manifest(...). 3424 ChangeStatus manifest(Attributor &A) override { 3425 ChangeStatus Change = AADereferenceable::manifest(A); 3426 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { 3427 removeAttrs({Attribute::DereferenceableOrNull}); 3428 return ChangeStatus::CHANGED; 3429 } 3430 return Change; 3431 } 3432 3433 void getDeducedAttributes(LLVMContext &Ctx, 3434 SmallVectorImpl<Attribute> &Attrs) const override { 3435 // TODO: Add *_globally support 3436 if (isAssumedNonNull()) 3437 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 3438 Ctx, getAssumedDereferenceableBytes())); 3439 else 3440 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 3441 Ctx, getAssumedDereferenceableBytes())); 3442 } 3443 3444 /// See AbstractAttribute::getAsStr(). 3445 const std::string getAsStr() const override { 3446 if (!getAssumedDereferenceableBytes()) 3447 return "unknown-dereferenceable"; 3448 return std::string("dereferenceable") + 3449 (isAssumedNonNull() ? "" : "_or_null") + 3450 (isAssumedGlobal() ? "_globally" : "") + "<" + 3451 std::to_string(getKnownDereferenceableBytes()) + "-" + 3452 std::to_string(getAssumedDereferenceableBytes()) + ">"; 3453 } 3454 }; 3455 3456 /// Dereferenceable attribute for a floating value. 3457 struct AADereferenceableFloating : AADereferenceableImpl { 3458 AADereferenceableFloating(const IRPosition &IRP, Attributor &A) 3459 : AADereferenceableImpl(IRP, A) {} 3460 3461 /// See AbstractAttribute::updateImpl(...). 3462 ChangeStatus updateImpl(Attributor &A) override { 3463 const DataLayout &DL = A.getDataLayout(); 3464 3465 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T, 3466 bool Stripped) -> bool { 3467 unsigned IdxWidth = 3468 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 3469 APInt Offset(IdxWidth, 0); 3470 const Value *Base = 3471 stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false); 3472 3473 const auto &AA = 3474 A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base)); 3475 int64_t DerefBytes = 0; 3476 if (!Stripped && this == &AA) { 3477 // Use IR information if we did not strip anything. 3478 // TODO: track globally. 3479 bool CanBeNull; 3480 DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull); 3481 T.GlobalState.indicatePessimisticFixpoint(); 3482 } else { 3483 const DerefState &DS = static_cast<const DerefState &>(AA.getState()); 3484 DerefBytes = DS.DerefBytesState.getAssumed(); 3485 T.GlobalState &= DS.GlobalState; 3486 } 3487 3488 // For now we do not try to "increase" dereferenceability due to negative 3489 // indices as we first have to come up with code to deal with loops and 3490 // for overflows of the dereferenceable bytes. 3491 int64_t OffsetSExt = Offset.getSExtValue(); 3492 if (OffsetSExt < 0) 3493 OffsetSExt = 0; 3494 3495 T.takeAssumedDerefBytesMinimum( 3496 std::max(int64_t(0), DerefBytes - OffsetSExt)); 3497 3498 if (this == &AA) { 3499 if (!Stripped) { 3500 // If nothing was stripped IR information is all we got. 3501 T.takeKnownDerefBytesMaximum( 3502 std::max(int64_t(0), DerefBytes - OffsetSExt)); 3503 T.indicatePessimisticFixpoint(); 3504 } else if (OffsetSExt > 0) { 3505 // If something was stripped but there is circular reasoning we look 3506 // for the offset. If it is positive we basically decrease the 3507 // dereferenceable bytes in a circluar loop now, which will simply 3508 // drive them down to the known value in a very slow way which we 3509 // can accelerate. 3510 T.indicatePessimisticFixpoint(); 3511 } 3512 } 3513 3514 return T.isValidState(); 3515 }; 3516 3517 DerefState T; 3518 if (!genericValueTraversal<AADereferenceable, DerefState>( 3519 A, getIRPosition(), *this, T, VisitValueCB, getCtxI())) 3520 return indicatePessimisticFixpoint(); 3521 3522 return clampStateAndIndicateChange(getState(), T); 3523 } 3524 3525 /// See AbstractAttribute::trackStatistics() 3526 void trackStatistics() const override { 3527 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) 3528 } 3529 }; 3530 3531 /// Dereferenceable attribute for a return value. 3532 struct AADereferenceableReturned final 3533 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { 3534 AADereferenceableReturned(const IRPosition &IRP, Attributor &A) 3535 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( 3536 IRP, A) {} 3537 3538 /// See AbstractAttribute::trackStatistics() 3539 void trackStatistics() const override { 3540 STATS_DECLTRACK_FNRET_ATTR(dereferenceable) 3541 } 3542 }; 3543 3544 /// Dereferenceable attribute for an argument 3545 struct AADereferenceableArgument final 3546 : AAArgumentFromCallSiteArguments<AADereferenceable, 3547 AADereferenceableImpl> { 3548 using Base = 3549 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; 3550 AADereferenceableArgument(const IRPosition &IRP, Attributor &A) 3551 : Base(IRP, A) {} 3552 3553 /// See AbstractAttribute::trackStatistics() 3554 void trackStatistics() const override { 3555 STATS_DECLTRACK_ARG_ATTR(dereferenceable) 3556 } 3557 }; 3558 3559 /// Dereferenceable attribute for a call site argument. 3560 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { 3561 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) 3562 : AADereferenceableFloating(IRP, A) {} 3563 3564 /// See AbstractAttribute::trackStatistics() 3565 void trackStatistics() const override { 3566 STATS_DECLTRACK_CSARG_ATTR(dereferenceable) 3567 } 3568 }; 3569 3570 /// Dereferenceable attribute deduction for a call site return value. 3571 struct AADereferenceableCallSiteReturned final 3572 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { 3573 using Base = 3574 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; 3575 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) 3576 : Base(IRP, A) {} 3577 3578 /// See AbstractAttribute::trackStatistics() 3579 void trackStatistics() const override { 3580 STATS_DECLTRACK_CS_ATTR(dereferenceable); 3581 } 3582 }; 3583 3584 // ------------------------ Align Argument Attribute ------------------------ 3585 3586 static unsigned getKnownAlignForUse(Attributor &A, 3587 AbstractAttribute &QueryingAA, 3588 Value &AssociatedValue, const Use *U, 3589 const Instruction *I, bool &TrackUse) { 3590 // We need to follow common pointer manipulation uses to the accesses they 3591 // feed into. 3592 if (isa<CastInst>(I)) { 3593 // Follow all but ptr2int casts. 3594 TrackUse = !isa<PtrToIntInst>(I); 3595 return 0; 3596 } 3597 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 3598 if (GEP->hasAllConstantIndices()) { 3599 TrackUse = true; 3600 return 0; 3601 } 3602 } 3603 3604 MaybeAlign MA; 3605 if (const auto *CB = dyn_cast<CallBase>(I)) { 3606 if (CB->isBundleOperand(U) || CB->isCallee(U)) 3607 return 0; 3608 3609 unsigned ArgNo = CB->getArgOperandNo(U); 3610 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 3611 // As long as we only use known information there is no need to track 3612 // dependences here. 3613 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, 3614 /* TrackDependence */ false); 3615 MA = MaybeAlign(AlignAA.getKnownAlign()); 3616 } 3617 3618 const DataLayout &DL = A.getDataLayout(); 3619 const Value *UseV = U->get(); 3620 if (auto *SI = dyn_cast<StoreInst>(I)) { 3621 if (SI->getPointerOperand() == UseV) 3622 MA = SI->getAlign(); 3623 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 3624 if (LI->getPointerOperand() == UseV) 3625 MA = LI->getAlign(); 3626 } 3627 3628 if (!MA || *MA <= 1) 3629 return 0; 3630 3631 unsigned Alignment = MA->value(); 3632 int64_t Offset; 3633 3634 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { 3635 if (Base == &AssociatedValue) { 3636 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 3637 // So we can say that the maximum power of two which is a divisor of 3638 // gcd(Offset, Alignment) is an alignment. 3639 3640 uint32_t gcd = 3641 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment); 3642 Alignment = llvm::PowerOf2Floor(gcd); 3643 } 3644 } 3645 3646 return Alignment; 3647 } 3648 3649 struct AAAlignImpl : AAAlign { 3650 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} 3651 3652 /// See AbstractAttribute::initialize(...). 3653 void initialize(Attributor &A) override { 3654 SmallVector<Attribute, 4> Attrs; 3655 getAttrs({Attribute::Alignment}, Attrs); 3656 for (const Attribute &Attr : Attrs) 3657 takeKnownMaximum(Attr.getValueAsInt()); 3658 3659 Value &V = getAssociatedValue(); 3660 // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int 3661 // use of the function pointer. This was caused by D73131. We want to 3662 // avoid this for function pointers especially because we iterate 3663 // their uses and int2ptr is not handled. It is not a correctness 3664 // problem though! 3665 if (!V.getType()->getPointerElementType()->isFunctionTy()) 3666 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value()); 3667 3668 if (getIRPosition().isFnInterfaceKind() && 3669 (!getAnchorScope() || 3670 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { 3671 indicatePessimisticFixpoint(); 3672 return; 3673 } 3674 3675 if (Instruction *CtxI = getCtxI()) 3676 followUsesInMBEC(*this, A, getState(), *CtxI); 3677 } 3678 3679 /// See AbstractAttribute::manifest(...). 3680 ChangeStatus manifest(Attributor &A) override { 3681 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; 3682 3683 // Check for users that allow alignment annotations. 3684 Value &AssociatedValue = getAssociatedValue(); 3685 for (const Use &U : AssociatedValue.uses()) { 3686 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { 3687 if (SI->getPointerOperand() == &AssociatedValue) 3688 if (SI->getAlignment() < getAssumedAlign()) { 3689 STATS_DECLTRACK(AAAlign, Store, 3690 "Number of times alignment added to a store"); 3691 SI->setAlignment(Align(getAssumedAlign())); 3692 LoadStoreChanged = ChangeStatus::CHANGED; 3693 } 3694 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { 3695 if (LI->getPointerOperand() == &AssociatedValue) 3696 if (LI->getAlignment() < getAssumedAlign()) { 3697 LI->setAlignment(Align(getAssumedAlign())); 3698 STATS_DECLTRACK(AAAlign, Load, 3699 "Number of times alignment added to a load"); 3700 LoadStoreChanged = ChangeStatus::CHANGED; 3701 } 3702 } 3703 } 3704 3705 ChangeStatus Changed = AAAlign::manifest(A); 3706 3707 Align InheritAlign = 3708 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 3709 if (InheritAlign >= getAssumedAlign()) 3710 return LoadStoreChanged; 3711 return Changed | LoadStoreChanged; 3712 } 3713 3714 // TODO: Provide a helper to determine the implied ABI alignment and check in 3715 // the existing manifest method and a new one for AAAlignImpl that value 3716 // to avoid making the alignment explicit if it did not improve. 3717 3718 /// See AbstractAttribute::getDeducedAttributes 3719 virtual void 3720 getDeducedAttributes(LLVMContext &Ctx, 3721 SmallVectorImpl<Attribute> &Attrs) const override { 3722 if (getAssumedAlign() > 1) 3723 Attrs.emplace_back( 3724 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); 3725 } 3726 3727 /// See followUsesInMBEC 3728 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 3729 AAAlign::StateType &State) { 3730 bool TrackUse = false; 3731 3732 unsigned int KnownAlign = 3733 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); 3734 State.takeKnownMaximum(KnownAlign); 3735 3736 return TrackUse; 3737 } 3738 3739 /// See AbstractAttribute::getAsStr(). 3740 const std::string getAsStr() const override { 3741 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + 3742 "-" + std::to_string(getAssumedAlign()) + ">") 3743 : "unknown-align"; 3744 } 3745 }; 3746 3747 /// Align attribute for a floating value. 3748 struct AAAlignFloating : AAAlignImpl { 3749 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} 3750 3751 /// See AbstractAttribute::updateImpl(...). 3752 ChangeStatus updateImpl(Attributor &A) override { 3753 const DataLayout &DL = A.getDataLayout(); 3754 3755 auto VisitValueCB = [&](Value &V, const Instruction *, 3756 AAAlign::StateType &T, bool Stripped) -> bool { 3757 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V)); 3758 if (!Stripped && this == &AA) { 3759 // Use only IR information if we did not strip anything. 3760 Align PA = V.getPointerAlignment(DL); 3761 T.takeKnownMaximum(PA.value()); 3762 T.indicatePessimisticFixpoint(); 3763 } else { 3764 // Use abstract attribute information. 3765 const AAAlign::StateType &DS = 3766 static_cast<const AAAlign::StateType &>(AA.getState()); 3767 T ^= DS; 3768 } 3769 return T.isValidState(); 3770 }; 3771 3772 StateType T; 3773 if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T, 3774 VisitValueCB, getCtxI())) 3775 return indicatePessimisticFixpoint(); 3776 3777 // TODO: If we know we visited all incoming values, thus no are assumed 3778 // dead, we can take the known information from the state T. 3779 return clampStateAndIndicateChange(getState(), T); 3780 } 3781 3782 /// See AbstractAttribute::trackStatistics() 3783 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } 3784 }; 3785 3786 /// Align attribute for function return value. 3787 struct AAAlignReturned final 3788 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { 3789 AAAlignReturned(const IRPosition &IRP, Attributor &A) 3790 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP, A) {} 3791 3792 /// See AbstractAttribute::trackStatistics() 3793 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } 3794 }; 3795 3796 /// Align attribute for function argument. 3797 struct AAAlignArgument final 3798 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { 3799 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; 3800 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 3801 3802 /// See AbstractAttribute::manifest(...). 3803 ChangeStatus manifest(Attributor &A) override { 3804 // If the associated argument is involved in a must-tail call we give up 3805 // because we would need to keep the argument alignments of caller and 3806 // callee in-sync. Just does not seem worth the trouble right now. 3807 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) 3808 return ChangeStatus::UNCHANGED; 3809 return Base::manifest(A); 3810 } 3811 3812 /// See AbstractAttribute::trackStatistics() 3813 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } 3814 }; 3815 3816 struct AAAlignCallSiteArgument final : AAAlignFloating { 3817 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) 3818 : AAAlignFloating(IRP, A) {} 3819 3820 /// See AbstractAttribute::manifest(...). 3821 ChangeStatus manifest(Attributor &A) override { 3822 // If the associated argument is involved in a must-tail call we give up 3823 // because we would need to keep the argument alignments of caller and 3824 // callee in-sync. Just does not seem worth the trouble right now. 3825 if (Argument *Arg = getAssociatedArgument()) 3826 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) 3827 return ChangeStatus::UNCHANGED; 3828 ChangeStatus Changed = AAAlignImpl::manifest(A); 3829 Align InheritAlign = 3830 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 3831 if (InheritAlign >= getAssumedAlign()) 3832 Changed = ChangeStatus::UNCHANGED; 3833 return Changed; 3834 } 3835 3836 /// See AbstractAttribute::updateImpl(Attributor &A). 3837 ChangeStatus updateImpl(Attributor &A) override { 3838 ChangeStatus Changed = AAAlignFloating::updateImpl(A); 3839 if (Argument *Arg = getAssociatedArgument()) { 3840 // We only take known information from the argument 3841 // so we do not need to track a dependence. 3842 const auto &ArgAlignAA = A.getAAFor<AAAlign>( 3843 *this, IRPosition::argument(*Arg), /* TrackDependence */ false); 3844 takeKnownMaximum(ArgAlignAA.getKnownAlign()); 3845 } 3846 return Changed; 3847 } 3848 3849 /// See AbstractAttribute::trackStatistics() 3850 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } 3851 }; 3852 3853 /// Align attribute deduction for a call site return value. 3854 struct AAAlignCallSiteReturned final 3855 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { 3856 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; 3857 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) 3858 : Base(IRP, A) {} 3859 3860 /// See AbstractAttribute::initialize(...). 3861 void initialize(Attributor &A) override { 3862 Base::initialize(A); 3863 Function *F = getAssociatedFunction(); 3864 if (!F) 3865 indicatePessimisticFixpoint(); 3866 } 3867 3868 /// See AbstractAttribute::trackStatistics() 3869 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } 3870 }; 3871 3872 /// ------------------ Function No-Return Attribute ---------------------------- 3873 struct AANoReturnImpl : public AANoReturn { 3874 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} 3875 3876 /// See AbstractAttribute::initialize(...). 3877 void initialize(Attributor &A) override { 3878 AANoReturn::initialize(A); 3879 Function *F = getAssociatedFunction(); 3880 if (!F) 3881 indicatePessimisticFixpoint(); 3882 } 3883 3884 /// See AbstractAttribute::getAsStr(). 3885 const std::string getAsStr() const override { 3886 return getAssumed() ? "noreturn" : "may-return"; 3887 } 3888 3889 /// See AbstractAttribute::updateImpl(Attributor &A). 3890 virtual ChangeStatus updateImpl(Attributor &A) override { 3891 auto CheckForNoReturn = [](Instruction &) { return false; }; 3892 if (!A.checkForAllInstructions(CheckForNoReturn, *this, 3893 {(unsigned)Instruction::Ret})) 3894 return indicatePessimisticFixpoint(); 3895 return ChangeStatus::UNCHANGED; 3896 } 3897 }; 3898 3899 struct AANoReturnFunction final : AANoReturnImpl { 3900 AANoReturnFunction(const IRPosition &IRP, Attributor &A) 3901 : AANoReturnImpl(IRP, A) {} 3902 3903 /// See AbstractAttribute::trackStatistics() 3904 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } 3905 }; 3906 3907 /// NoReturn attribute deduction for a call sites. 3908 struct AANoReturnCallSite final : AANoReturnImpl { 3909 AANoReturnCallSite(const IRPosition &IRP, Attributor &A) 3910 : AANoReturnImpl(IRP, A) {} 3911 3912 /// See AbstractAttribute::updateImpl(...). 3913 ChangeStatus updateImpl(Attributor &A) override { 3914 // TODO: Once we have call site specific value information we can provide 3915 // call site specific liveness information and then it makes 3916 // sense to specialize attributes for call sites arguments instead of 3917 // redirecting requests to the callee argument. 3918 Function *F = getAssociatedFunction(); 3919 const IRPosition &FnPos = IRPosition::function(*F); 3920 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos); 3921 return clampStateAndIndicateChange( 3922 getState(), 3923 static_cast<const AANoReturn::StateType &>(FnAA.getState())); 3924 } 3925 3926 /// See AbstractAttribute::trackStatistics() 3927 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } 3928 }; 3929 3930 /// ----------------------- Variable Capturing --------------------------------- 3931 3932 /// A class to hold the state of for no-capture attributes. 3933 struct AANoCaptureImpl : public AANoCapture { 3934 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} 3935 3936 /// See AbstractAttribute::initialize(...). 3937 void initialize(Attributor &A) override { 3938 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { 3939 indicateOptimisticFixpoint(); 3940 return; 3941 } 3942 Function *AnchorScope = getAnchorScope(); 3943 if (isFnInterfaceKind() && 3944 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { 3945 indicatePessimisticFixpoint(); 3946 return; 3947 } 3948 3949 // You cannot "capture" null in the default address space. 3950 if (isa<ConstantPointerNull>(getAssociatedValue()) && 3951 getAssociatedValue().getType()->getPointerAddressSpace() == 0) { 3952 indicateOptimisticFixpoint(); 3953 return; 3954 } 3955 3956 const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope; 3957 3958 // Check what state the associated function can actually capture. 3959 if (F) 3960 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 3961 else 3962 indicatePessimisticFixpoint(); 3963 } 3964 3965 /// See AbstractAttribute::updateImpl(...). 3966 ChangeStatus updateImpl(Attributor &A) override; 3967 3968 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). 3969 virtual void 3970 getDeducedAttributes(LLVMContext &Ctx, 3971 SmallVectorImpl<Attribute> &Attrs) const override { 3972 if (!isAssumedNoCaptureMaybeReturned()) 3973 return; 3974 3975 if (getArgNo() >= 0) { 3976 if (isAssumedNoCapture()) 3977 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); 3978 else if (ManifestInternal) 3979 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); 3980 } 3981 } 3982 3983 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known 3984 /// depending on the ability of the function associated with \p IRP to capture 3985 /// state in memory and through "returning/throwing", respectively. 3986 static void determineFunctionCaptureCapabilities(const IRPosition &IRP, 3987 const Function &F, 3988 BitIntegerState &State) { 3989 // TODO: Once we have memory behavior attributes we should use them here. 3990 3991 // If we know we cannot communicate or write to memory, we do not care about 3992 // ptr2int anymore. 3993 if (F.onlyReadsMemory() && F.doesNotThrow() && 3994 F.getReturnType()->isVoidTy()) { 3995 State.addKnownBits(NO_CAPTURE); 3996 return; 3997 } 3998 3999 // A function cannot capture state in memory if it only reads memory, it can 4000 // however return/throw state and the state might be influenced by the 4001 // pointer value, e.g., loading from a returned pointer might reveal a bit. 4002 if (F.onlyReadsMemory()) 4003 State.addKnownBits(NOT_CAPTURED_IN_MEM); 4004 4005 // A function cannot communicate state back if it does not through 4006 // exceptions and doesn not return values. 4007 if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) 4008 State.addKnownBits(NOT_CAPTURED_IN_RET); 4009 4010 // Check existing "returned" attributes. 4011 int ArgNo = IRP.getArgNo(); 4012 if (F.doesNotThrow() && ArgNo >= 0) { 4013 for (unsigned u = 0, e = F.arg_size(); u < e; ++u) 4014 if (F.hasParamAttribute(u, Attribute::Returned)) { 4015 if (u == unsigned(ArgNo)) 4016 State.removeAssumedBits(NOT_CAPTURED_IN_RET); 4017 else if (F.onlyReadsMemory()) 4018 State.addKnownBits(NO_CAPTURE); 4019 else 4020 State.addKnownBits(NOT_CAPTURED_IN_RET); 4021 break; 4022 } 4023 } 4024 } 4025 4026 /// See AbstractState::getAsStr(). 4027 const std::string getAsStr() const override { 4028 if (isKnownNoCapture()) 4029 return "known not-captured"; 4030 if (isAssumedNoCapture()) 4031 return "assumed not-captured"; 4032 if (isKnownNoCaptureMaybeReturned()) 4033 return "known not-captured-maybe-returned"; 4034 if (isAssumedNoCaptureMaybeReturned()) 4035 return "assumed not-captured-maybe-returned"; 4036 return "assumed-captured"; 4037 } 4038 }; 4039 4040 /// Attributor-aware capture tracker. 4041 struct AACaptureUseTracker final : public CaptureTracker { 4042 4043 /// Create a capture tracker that can lookup in-flight abstract attributes 4044 /// through the Attributor \p A. 4045 /// 4046 /// If a use leads to a potential capture, \p CapturedInMemory is set and the 4047 /// search is stopped. If a use leads to a return instruction, 4048 /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed. 4049 /// If a use leads to a ptr2int which may capture the value, 4050 /// \p CapturedInInteger is set. If a use is found that is currently assumed 4051 /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies 4052 /// set. All values in \p PotentialCopies are later tracked as well. For every 4053 /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0, 4054 /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger 4055 /// conservatively set to true. 4056 AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA, 4057 const AAIsDead &IsDeadAA, AANoCapture::StateType &State, 4058 SmallVectorImpl<const Value *> &PotentialCopies, 4059 unsigned &RemainingUsesToExplore) 4060 : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State), 4061 PotentialCopies(PotentialCopies), 4062 RemainingUsesToExplore(RemainingUsesToExplore) {} 4063 4064 /// Determine if \p V maybe captured. *Also updates the state!* 4065 bool valueMayBeCaptured(const Value *V) { 4066 if (V->getType()->isPointerTy()) { 4067 PointerMayBeCaptured(V, this); 4068 } else { 4069 State.indicatePessimisticFixpoint(); 4070 } 4071 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4072 } 4073 4074 /// See CaptureTracker::tooManyUses(). 4075 void tooManyUses() override { 4076 State.removeAssumedBits(AANoCapture::NO_CAPTURE); 4077 } 4078 4079 bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override { 4080 if (CaptureTracker::isDereferenceableOrNull(O, DL)) 4081 return true; 4082 const auto &DerefAA = A.getAAFor<AADereferenceable>( 4083 NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true, 4084 DepClassTy::OPTIONAL); 4085 return DerefAA.getAssumedDereferenceableBytes(); 4086 } 4087 4088 /// See CaptureTracker::captured(...). 4089 bool captured(const Use *U) override { 4090 Instruction *UInst = cast<Instruction>(U->getUser()); 4091 LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst 4092 << "\n"); 4093 4094 // Because we may reuse the tracker multiple times we keep track of the 4095 // number of explored uses ourselves as well. 4096 if (RemainingUsesToExplore-- == 0) { 4097 LLVM_DEBUG(dbgs() << " - too many uses to explore!\n"); 4098 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4099 /* Return */ true); 4100 } 4101 4102 // Deal with ptr2int by following uses. 4103 if (isa<PtrToIntInst>(UInst)) { 4104 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n"); 4105 return valueMayBeCaptured(UInst); 4106 } 4107 4108 // Explicitly catch return instructions. 4109 if (isa<ReturnInst>(UInst)) 4110 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4111 /* Return */ true); 4112 4113 // For now we only use special logic for call sites. However, the tracker 4114 // itself knows about a lot of other non-capturing cases already. 4115 auto *CB = dyn_cast<CallBase>(UInst); 4116 if (!CB || !CB->isArgOperand(U)) 4117 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4118 /* Return */ true); 4119 4120 unsigned ArgNo = CB->getArgOperandNo(U); 4121 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); 4122 // If we have a abstract no-capture attribute for the argument we can use 4123 // it to justify a non-capture attribute here. This allows recursion! 4124 auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos); 4125 if (ArgNoCaptureAA.isAssumedNoCapture()) 4126 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4127 /* Return */ false); 4128 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 4129 addPotentialCopy(*CB); 4130 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4131 /* Return */ false); 4132 } 4133 4134 // Lastly, we could not find a reason no-capture can be assumed so we don't. 4135 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4136 /* Return */ true); 4137 } 4138 4139 /// Register \p CS as potential copy of the value we are checking. 4140 void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); } 4141 4142 /// See CaptureTracker::shouldExplore(...). 4143 bool shouldExplore(const Use *U) override { 4144 // Check liveness and ignore droppable users. 4145 return !U->getUser()->isDroppable() && 4146 !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA); 4147 } 4148 4149 /// Update the state according to \p CapturedInMem, \p CapturedInInt, and 4150 /// \p CapturedInRet, then return the appropriate value for use in the 4151 /// CaptureTracker::captured() interface. 4152 bool isCapturedIn(bool CapturedInMem, bool CapturedInInt, 4153 bool CapturedInRet) { 4154 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " 4155 << CapturedInInt << "|Ret " << CapturedInRet << "]\n"); 4156 if (CapturedInMem) 4157 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); 4158 if (CapturedInInt) 4159 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); 4160 if (CapturedInRet) 4161 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); 4162 return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4163 } 4164 4165 private: 4166 /// The attributor providing in-flight abstract attributes. 4167 Attributor &A; 4168 4169 /// The abstract attribute currently updated. 4170 AANoCapture &NoCaptureAA; 4171 4172 /// The abstract liveness state. 4173 const AAIsDead &IsDeadAA; 4174 4175 /// The state currently updated. 4176 AANoCapture::StateType &State; 4177 4178 /// Set of potential copies of the tracked value. 4179 SmallVectorImpl<const Value *> &PotentialCopies; 4180 4181 /// Global counter to limit the number of explored uses. 4182 unsigned &RemainingUsesToExplore; 4183 }; 4184 4185 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { 4186 const IRPosition &IRP = getIRPosition(); 4187 const Value *V = 4188 getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue(); 4189 if (!V) 4190 return indicatePessimisticFixpoint(); 4191 4192 const Function *F = 4193 getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 4194 assert(F && "Expected a function!"); 4195 const IRPosition &FnPos = IRPosition::function(*F); 4196 const auto &IsDeadAA = 4197 A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false); 4198 4199 AANoCapture::StateType T; 4200 4201 // Readonly means we cannot capture through memory. 4202 const auto &FnMemAA = 4203 A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false); 4204 if (FnMemAA.isAssumedReadOnly()) { 4205 T.addKnownBits(NOT_CAPTURED_IN_MEM); 4206 if (FnMemAA.isKnownReadOnly()) 4207 addKnownBits(NOT_CAPTURED_IN_MEM); 4208 else 4209 A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL); 4210 } 4211 4212 // Make sure all returned values are different than the underlying value. 4213 // TODO: we could do this in a more sophisticated way inside 4214 // AAReturnedValues, e.g., track all values that escape through returns 4215 // directly somehow. 4216 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { 4217 bool SeenConstant = false; 4218 for (auto &It : RVAA.returned_values()) { 4219 if (isa<Constant>(It.first)) { 4220 if (SeenConstant) 4221 return false; 4222 SeenConstant = true; 4223 } else if (!isa<Argument>(It.first) || 4224 It.first == getAssociatedArgument()) 4225 return false; 4226 } 4227 return true; 4228 }; 4229 4230 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( 4231 *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 4232 if (NoUnwindAA.isAssumedNoUnwind()) { 4233 bool IsVoidTy = F->getReturnType()->isVoidTy(); 4234 const AAReturnedValues *RVAA = 4235 IsVoidTy ? nullptr 4236 : &A.getAAFor<AAReturnedValues>(*this, FnPos, 4237 /* TrackDependence */ true, 4238 DepClassTy::OPTIONAL); 4239 if (IsVoidTy || CheckReturnedArgs(*RVAA)) { 4240 T.addKnownBits(NOT_CAPTURED_IN_RET); 4241 if (T.isKnown(NOT_CAPTURED_IN_MEM)) 4242 return ChangeStatus::UNCHANGED; 4243 if (NoUnwindAA.isKnownNoUnwind() && 4244 (IsVoidTy || RVAA->getState().isAtFixpoint())) { 4245 addKnownBits(NOT_CAPTURED_IN_RET); 4246 if (isKnown(NOT_CAPTURED_IN_MEM)) 4247 return indicateOptimisticFixpoint(); 4248 } 4249 } 4250 } 4251 4252 // Use the CaptureTracker interface and logic with the specialized tracker, 4253 // defined in AACaptureUseTracker, that can look at in-flight abstract 4254 // attributes and directly updates the assumed state. 4255 SmallVector<const Value *, 4> PotentialCopies; 4256 unsigned RemainingUsesToExplore = 4257 getDefaultMaxUsesToExploreForCaptureTracking(); 4258 AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies, 4259 RemainingUsesToExplore); 4260 4261 // Check all potential copies of the associated value until we can assume 4262 // none will be captured or we have to assume at least one might be. 4263 unsigned Idx = 0; 4264 PotentialCopies.push_back(V); 4265 while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size()) 4266 Tracker.valueMayBeCaptured(PotentialCopies[Idx++]); 4267 4268 AANoCapture::StateType &S = getState(); 4269 auto Assumed = S.getAssumed(); 4270 S.intersectAssumedBits(T.getAssumed()); 4271 if (!isAssumedNoCaptureMaybeReturned()) 4272 return indicatePessimisticFixpoint(); 4273 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 4274 : ChangeStatus::CHANGED; 4275 } 4276 4277 /// NoCapture attribute for function arguments. 4278 struct AANoCaptureArgument final : AANoCaptureImpl { 4279 AANoCaptureArgument(const IRPosition &IRP, Attributor &A) 4280 : AANoCaptureImpl(IRP, A) {} 4281 4282 /// See AbstractAttribute::trackStatistics() 4283 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } 4284 }; 4285 4286 /// NoCapture attribute for call site arguments. 4287 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { 4288 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) 4289 : AANoCaptureImpl(IRP, A) {} 4290 4291 /// See AbstractAttribute::initialize(...). 4292 void initialize(Attributor &A) override { 4293 if (Argument *Arg = getAssociatedArgument()) 4294 if (Arg->hasByValAttr()) 4295 indicateOptimisticFixpoint(); 4296 AANoCaptureImpl::initialize(A); 4297 } 4298 4299 /// See AbstractAttribute::updateImpl(...). 4300 ChangeStatus updateImpl(Attributor &A) override { 4301 // TODO: Once we have call site specific value information we can provide 4302 // call site specific liveness information and then it makes 4303 // sense to specialize attributes for call sites arguments instead of 4304 // redirecting requests to the callee argument. 4305 Argument *Arg = getAssociatedArgument(); 4306 if (!Arg) 4307 return indicatePessimisticFixpoint(); 4308 const IRPosition &ArgPos = IRPosition::argument(*Arg); 4309 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos); 4310 return clampStateAndIndicateChange( 4311 getState(), 4312 static_cast<const AANoCapture::StateType &>(ArgAA.getState())); 4313 } 4314 4315 /// See AbstractAttribute::trackStatistics() 4316 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)}; 4317 }; 4318 4319 /// NoCapture attribute for floating values. 4320 struct AANoCaptureFloating final : AANoCaptureImpl { 4321 AANoCaptureFloating(const IRPosition &IRP, Attributor &A) 4322 : AANoCaptureImpl(IRP, A) {} 4323 4324 /// See AbstractAttribute::trackStatistics() 4325 void trackStatistics() const override { 4326 STATS_DECLTRACK_FLOATING_ATTR(nocapture) 4327 } 4328 }; 4329 4330 /// NoCapture attribute for function return value. 4331 struct AANoCaptureReturned final : AANoCaptureImpl { 4332 AANoCaptureReturned(const IRPosition &IRP, Attributor &A) 4333 : AANoCaptureImpl(IRP, A) { 4334 llvm_unreachable("NoCapture is not applicable to function returns!"); 4335 } 4336 4337 /// See AbstractAttribute::initialize(...). 4338 void initialize(Attributor &A) override { 4339 llvm_unreachable("NoCapture is not applicable to function returns!"); 4340 } 4341 4342 /// See AbstractAttribute::updateImpl(...). 4343 ChangeStatus updateImpl(Attributor &A) override { 4344 llvm_unreachable("NoCapture is not applicable to function returns!"); 4345 } 4346 4347 /// See AbstractAttribute::trackStatistics() 4348 void trackStatistics() const override {} 4349 }; 4350 4351 /// NoCapture attribute deduction for a call site return value. 4352 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { 4353 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) 4354 : AANoCaptureImpl(IRP, A) {} 4355 4356 /// See AbstractAttribute::trackStatistics() 4357 void trackStatistics() const override { 4358 STATS_DECLTRACK_CSRET_ATTR(nocapture) 4359 } 4360 }; 4361 4362 /// ------------------ Value Simplify Attribute ---------------------------- 4363 struct AAValueSimplifyImpl : AAValueSimplify { 4364 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) 4365 : AAValueSimplify(IRP, A) {} 4366 4367 /// See AbstractAttribute::initialize(...). 4368 void initialize(Attributor &A) override { 4369 if (getAssociatedValue().getType()->isVoidTy()) 4370 indicatePessimisticFixpoint(); 4371 } 4372 4373 /// See AbstractAttribute::getAsStr(). 4374 const std::string getAsStr() const override { 4375 return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple") 4376 : "not-simple"; 4377 } 4378 4379 /// See AbstractAttribute::trackStatistics() 4380 void trackStatistics() const override {} 4381 4382 /// See AAValueSimplify::getAssumedSimplifiedValue() 4383 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 4384 if (!getAssumed()) 4385 return const_cast<Value *>(&getAssociatedValue()); 4386 return SimplifiedAssociatedValue; 4387 } 4388 4389 /// Helper function for querying AAValueSimplify and updating candicate. 4390 /// \param QueryingValue Value trying to unify with SimplifiedValue 4391 /// \param AccumulatedSimplifiedValue Current simplification result. 4392 static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, 4393 Value &QueryingValue, 4394 Optional<Value *> &AccumulatedSimplifiedValue) { 4395 // FIXME: Add a typecast support. 4396 4397 auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>( 4398 QueryingAA, IRPosition::value(QueryingValue)); 4399 4400 Optional<Value *> QueryingValueSimplified = 4401 ValueSimplifyAA.getAssumedSimplifiedValue(A); 4402 4403 if (!QueryingValueSimplified.hasValue()) 4404 return true; 4405 4406 if (!QueryingValueSimplified.getValue()) 4407 return false; 4408 4409 Value &QueryingValueSimplifiedUnwrapped = 4410 *QueryingValueSimplified.getValue(); 4411 4412 if (AccumulatedSimplifiedValue.hasValue() && 4413 !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) && 4414 !isa<UndefValue>(QueryingValueSimplifiedUnwrapped)) 4415 return AccumulatedSimplifiedValue == QueryingValueSimplified; 4416 if (AccumulatedSimplifiedValue.hasValue() && 4417 isa<UndefValue>(QueryingValueSimplifiedUnwrapped)) 4418 return true; 4419 4420 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue 4421 << " is assumed to be " 4422 << QueryingValueSimplifiedUnwrapped << "\n"); 4423 4424 AccumulatedSimplifiedValue = QueryingValueSimplified; 4425 return true; 4426 } 4427 4428 bool askSimplifiedValueForAAValueConstantRange(Attributor &A) { 4429 if (!getAssociatedValue().getType()->isIntegerTy()) 4430 return false; 4431 4432 const auto &ValueConstantRangeAA = 4433 A.getAAFor<AAValueConstantRange>(*this, getIRPosition()); 4434 4435 Optional<ConstantInt *> COpt = 4436 ValueConstantRangeAA.getAssumedConstantInt(A); 4437 if (COpt.hasValue()) { 4438 if (auto *C = COpt.getValue()) 4439 SimplifiedAssociatedValue = C; 4440 else 4441 return false; 4442 } else { 4443 SimplifiedAssociatedValue = llvm::None; 4444 } 4445 return true; 4446 } 4447 4448 /// See AbstractAttribute::manifest(...). 4449 ChangeStatus manifest(Attributor &A) override { 4450 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4451 4452 if (SimplifiedAssociatedValue.hasValue() && 4453 !SimplifiedAssociatedValue.getValue()) 4454 return Changed; 4455 4456 Value &V = getAssociatedValue(); 4457 auto *C = SimplifiedAssociatedValue.hasValue() 4458 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) 4459 : UndefValue::get(V.getType()); 4460 if (C) { 4461 // We can replace the AssociatedValue with the constant. 4462 if (!V.user_empty() && &V != C && V.getType() == C->getType()) { 4463 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C 4464 << " :: " << *this << "\n"); 4465 if (A.changeValueAfterManifest(V, *C)) 4466 Changed = ChangeStatus::CHANGED; 4467 } 4468 } 4469 4470 return Changed | AAValueSimplify::manifest(A); 4471 } 4472 4473 /// See AbstractState::indicatePessimisticFixpoint(...). 4474 ChangeStatus indicatePessimisticFixpoint() override { 4475 // NOTE: Associated value will be returned in a pessimistic fixpoint and is 4476 // regarded as known. That's why`indicateOptimisticFixpoint` is called. 4477 SimplifiedAssociatedValue = &getAssociatedValue(); 4478 indicateOptimisticFixpoint(); 4479 return ChangeStatus::CHANGED; 4480 } 4481 4482 protected: 4483 // An assumed simplified value. Initially, it is set to Optional::None, which 4484 // means that the value is not clear under current assumption. If in the 4485 // pessimistic state, getAssumedSimplifiedValue doesn't return this value but 4486 // returns orignal associated value. 4487 Optional<Value *> SimplifiedAssociatedValue; 4488 }; 4489 4490 struct AAValueSimplifyArgument final : AAValueSimplifyImpl { 4491 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) 4492 : AAValueSimplifyImpl(IRP, A) {} 4493 4494 void initialize(Attributor &A) override { 4495 AAValueSimplifyImpl::initialize(A); 4496 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) 4497 indicatePessimisticFixpoint(); 4498 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated, 4499 Attribute::StructRet, Attribute::Nest}, 4500 /* IgnoreSubsumingPositions */ true)) 4501 indicatePessimisticFixpoint(); 4502 4503 // FIXME: This is a hack to prevent us from propagating function poiner in 4504 // the new pass manager CGSCC pass as it creates call edges the 4505 // CallGraphUpdater cannot handle yet. 4506 Value &V = getAssociatedValue(); 4507 if (V.getType()->isPointerTy() && 4508 V.getType()->getPointerElementType()->isFunctionTy() && 4509 !A.isModulePass()) 4510 indicatePessimisticFixpoint(); 4511 } 4512 4513 /// See AbstractAttribute::updateImpl(...). 4514 ChangeStatus updateImpl(Attributor &A) override { 4515 // Byval is only replacable if it is readonly otherwise we would write into 4516 // the replaced value and not the copy that byval creates implicitly. 4517 Argument *Arg = getAssociatedArgument(); 4518 if (Arg->hasByValAttr()) { 4519 // TODO: We probably need to verify synchronization is not an issue, e.g., 4520 // there is no race by not copying a constant byval. 4521 const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition()); 4522 if (!MemAA.isAssumedReadOnly()) 4523 return indicatePessimisticFixpoint(); 4524 } 4525 4526 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4527 4528 auto PredForCallSite = [&](AbstractCallSite ACS) { 4529 const IRPosition &ACSArgPos = 4530 IRPosition::callsite_argument(ACS, getArgNo()); 4531 // Check if a coresponding argument was found or if it is on not 4532 // associated (which can happen for callback calls). 4533 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 4534 return false; 4535 4536 // We can only propagate thread independent values through callbacks. 4537 // This is different to direct/indirect call sites because for them we 4538 // know the thread executing the caller and callee is the same. For 4539 // callbacks this is not guaranteed, thus a thread dependent value could 4540 // be different for the caller and callee, making it invalid to propagate. 4541 Value &ArgOp = ACSArgPos.getAssociatedValue(); 4542 if (ACS.isCallbackCall()) 4543 if (auto *C = dyn_cast<Constant>(&ArgOp)) 4544 if (C->isThreadDependent()) 4545 return false; 4546 return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue); 4547 }; 4548 4549 bool AllCallSitesKnown; 4550 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 4551 AllCallSitesKnown)) 4552 if (!askSimplifiedValueForAAValueConstantRange(A)) 4553 return indicatePessimisticFixpoint(); 4554 4555 // If a candicate was found in this update, return CHANGED. 4556 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4557 ? ChangeStatus::UNCHANGED 4558 : ChangeStatus ::CHANGED; 4559 } 4560 4561 /// See AbstractAttribute::trackStatistics() 4562 void trackStatistics() const override { 4563 STATS_DECLTRACK_ARG_ATTR(value_simplify) 4564 } 4565 }; 4566 4567 struct AAValueSimplifyReturned : AAValueSimplifyImpl { 4568 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) 4569 : AAValueSimplifyImpl(IRP, A) {} 4570 4571 /// See AbstractAttribute::updateImpl(...). 4572 ChangeStatus updateImpl(Attributor &A) override { 4573 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4574 4575 auto PredForReturned = [&](Value &V) { 4576 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue); 4577 }; 4578 4579 if (!A.checkForAllReturnedValues(PredForReturned, *this)) 4580 if (!askSimplifiedValueForAAValueConstantRange(A)) 4581 return indicatePessimisticFixpoint(); 4582 4583 // If a candicate was found in this update, return CHANGED. 4584 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4585 ? ChangeStatus::UNCHANGED 4586 : ChangeStatus ::CHANGED; 4587 } 4588 4589 ChangeStatus manifest(Attributor &A) override { 4590 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4591 4592 if (SimplifiedAssociatedValue.hasValue() && 4593 !SimplifiedAssociatedValue.getValue()) 4594 return Changed; 4595 4596 Value &V = getAssociatedValue(); 4597 auto *C = SimplifiedAssociatedValue.hasValue() 4598 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) 4599 : UndefValue::get(V.getType()); 4600 if (C) { 4601 auto PredForReturned = 4602 [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 4603 // We can replace the AssociatedValue with the constant. 4604 if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V)) 4605 return true; 4606 4607 for (ReturnInst *RI : RetInsts) { 4608 if (RI->getFunction() != getAnchorScope()) 4609 continue; 4610 auto *RC = C; 4611 if (RC->getType() != RI->getReturnValue()->getType()) 4612 RC = ConstantExpr::getBitCast(RC, 4613 RI->getReturnValue()->getType()); 4614 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC 4615 << " in " << *RI << " :: " << *this << "\n"); 4616 if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC)) 4617 Changed = ChangeStatus::CHANGED; 4618 } 4619 return true; 4620 }; 4621 A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this); 4622 } 4623 4624 return Changed | AAValueSimplify::manifest(A); 4625 } 4626 4627 /// See AbstractAttribute::trackStatistics() 4628 void trackStatistics() const override { 4629 STATS_DECLTRACK_FNRET_ATTR(value_simplify) 4630 } 4631 }; 4632 4633 struct AAValueSimplifyFloating : AAValueSimplifyImpl { 4634 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) 4635 : AAValueSimplifyImpl(IRP, A) {} 4636 4637 /// See AbstractAttribute::initialize(...). 4638 void initialize(Attributor &A) override { 4639 // FIXME: This might have exposed a SCC iterator update bug in the old PM. 4640 // Needs investigation. 4641 // AAValueSimplifyImpl::initialize(A); 4642 Value &V = getAnchorValue(); 4643 4644 // TODO: add other stuffs 4645 if (isa<Constant>(V)) 4646 indicatePessimisticFixpoint(); 4647 } 4648 4649 /// See AbstractAttribute::updateImpl(...). 4650 ChangeStatus updateImpl(Attributor &A) override { 4651 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4652 4653 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &, 4654 bool Stripped) -> bool { 4655 auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V)); 4656 if (!Stripped && this == &AA) { 4657 // TODO: Look the instruction and check recursively. 4658 4659 LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V 4660 << "\n"); 4661 return false; 4662 } 4663 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue); 4664 }; 4665 4666 bool Dummy = false; 4667 if (!genericValueTraversal<AAValueSimplify, bool>( 4668 A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(), 4669 /* UseValueSimplify */ false)) 4670 if (!askSimplifiedValueForAAValueConstantRange(A)) 4671 return indicatePessimisticFixpoint(); 4672 4673 // If a candicate was found in this update, return CHANGED. 4674 4675 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4676 ? ChangeStatus::UNCHANGED 4677 : ChangeStatus ::CHANGED; 4678 } 4679 4680 /// See AbstractAttribute::trackStatistics() 4681 void trackStatistics() const override { 4682 STATS_DECLTRACK_FLOATING_ATTR(value_simplify) 4683 } 4684 }; 4685 4686 struct AAValueSimplifyFunction : AAValueSimplifyImpl { 4687 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) 4688 : AAValueSimplifyImpl(IRP, A) {} 4689 4690 /// See AbstractAttribute::initialize(...). 4691 void initialize(Attributor &A) override { 4692 SimplifiedAssociatedValue = &getAnchorValue(); 4693 indicateOptimisticFixpoint(); 4694 } 4695 /// See AbstractAttribute::initialize(...). 4696 ChangeStatus updateImpl(Attributor &A) override { 4697 llvm_unreachable( 4698 "AAValueSimplify(Function|CallSite)::updateImpl will not be called"); 4699 } 4700 /// See AbstractAttribute::trackStatistics() 4701 void trackStatistics() const override { 4702 STATS_DECLTRACK_FN_ATTR(value_simplify) 4703 } 4704 }; 4705 4706 struct AAValueSimplifyCallSite : AAValueSimplifyFunction { 4707 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) 4708 : AAValueSimplifyFunction(IRP, A) {} 4709 /// See AbstractAttribute::trackStatistics() 4710 void trackStatistics() const override { 4711 STATS_DECLTRACK_CS_ATTR(value_simplify) 4712 } 4713 }; 4714 4715 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned { 4716 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) 4717 : AAValueSimplifyReturned(IRP, A) {} 4718 4719 /// See AbstractAttribute::manifest(...). 4720 ChangeStatus manifest(Attributor &A) override { 4721 return AAValueSimplifyImpl::manifest(A); 4722 } 4723 4724 void trackStatistics() const override { 4725 STATS_DECLTRACK_CSRET_ATTR(value_simplify) 4726 } 4727 }; 4728 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { 4729 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) 4730 : AAValueSimplifyFloating(IRP, A) {} 4731 4732 /// See AbstractAttribute::manifest(...). 4733 ChangeStatus manifest(Attributor &A) override { 4734 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4735 4736 if (SimplifiedAssociatedValue.hasValue() && 4737 !SimplifiedAssociatedValue.getValue()) 4738 return Changed; 4739 4740 Value &V = getAssociatedValue(); 4741 auto *C = SimplifiedAssociatedValue.hasValue() 4742 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) 4743 : UndefValue::get(V.getType()); 4744 if (C) { 4745 Use &U = cast<CallBase>(&getAnchorValue())->getArgOperandUse(getArgNo()); 4746 // We can replace the AssociatedValue with the constant. 4747 if (&V != C && V.getType() == C->getType()) { 4748 if (A.changeUseAfterManifest(U, *C)) 4749 Changed = ChangeStatus::CHANGED; 4750 } 4751 } 4752 4753 return Changed | AAValueSimplify::manifest(A); 4754 } 4755 4756 void trackStatistics() const override { 4757 STATS_DECLTRACK_CSARG_ATTR(value_simplify) 4758 } 4759 }; 4760 4761 /// ----------------------- Heap-To-Stack Conversion --------------------------- 4762 struct AAHeapToStackImpl : public AAHeapToStack { 4763 AAHeapToStackImpl(const IRPosition &IRP, Attributor &A) 4764 : AAHeapToStack(IRP, A) {} 4765 4766 const std::string getAsStr() const override { 4767 return "[H2S] Mallocs: " + std::to_string(MallocCalls.size()); 4768 } 4769 4770 ChangeStatus manifest(Attributor &A) override { 4771 assert(getState().isValidState() && 4772 "Attempted to manifest an invalid state!"); 4773 4774 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 4775 Function *F = getAnchorScope(); 4776 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 4777 4778 for (Instruction *MallocCall : MallocCalls) { 4779 // This malloc cannot be replaced. 4780 if (BadMallocCalls.count(MallocCall)) 4781 continue; 4782 4783 for (Instruction *FreeCall : FreesForMalloc[MallocCall]) { 4784 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); 4785 A.deleteAfterManifest(*FreeCall); 4786 HasChanged = ChangeStatus::CHANGED; 4787 } 4788 4789 LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall 4790 << "\n"); 4791 4792 Align Alignment; 4793 Constant *Size; 4794 if (isCallocLikeFn(MallocCall, TLI)) { 4795 auto *Num = cast<ConstantInt>(MallocCall->getOperand(0)); 4796 auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1)); 4797 APInt TotalSize = SizeT->getValue() * Num->getValue(); 4798 Size = 4799 ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize); 4800 } else if (isAlignedAllocLikeFn(MallocCall, TLI)) { 4801 Size = cast<ConstantInt>(MallocCall->getOperand(1)); 4802 Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0)) 4803 ->getValue() 4804 .getZExtValue()) 4805 .valueOrOne(); 4806 } else { 4807 Size = cast<ConstantInt>(MallocCall->getOperand(0)); 4808 } 4809 4810 unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace(); 4811 Instruction *AI = 4812 new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment, 4813 "", MallocCall->getNextNode()); 4814 4815 if (AI->getType() != MallocCall->getType()) 4816 AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc", 4817 AI->getNextNode()); 4818 4819 A.changeValueAfterManifest(*MallocCall, *AI); 4820 4821 if (auto *II = dyn_cast<InvokeInst>(MallocCall)) { 4822 auto *NBB = II->getNormalDest(); 4823 BranchInst::Create(NBB, MallocCall->getParent()); 4824 A.deleteAfterManifest(*MallocCall); 4825 } else { 4826 A.deleteAfterManifest(*MallocCall); 4827 } 4828 4829 // Zero out the allocated memory if it was a calloc. 4830 if (isCallocLikeFn(MallocCall, TLI)) { 4831 auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc", 4832 AI->getNextNode()); 4833 Value *Ops[] = { 4834 BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size, 4835 ConstantInt::get(Type::getInt1Ty(F->getContext()), false)}; 4836 4837 Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()}; 4838 Module *M = F->getParent(); 4839 Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys); 4840 CallInst::Create(Fn, Ops, "", BI->getNextNode()); 4841 } 4842 HasChanged = ChangeStatus::CHANGED; 4843 } 4844 4845 return HasChanged; 4846 } 4847 4848 /// Collection of all malloc calls in a function. 4849 SmallSetVector<Instruction *, 4> MallocCalls; 4850 4851 /// Collection of malloc calls that cannot be converted. 4852 DenseSet<const Instruction *> BadMallocCalls; 4853 4854 /// A map for each malloc call to the set of associated free calls. 4855 DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc; 4856 4857 ChangeStatus updateImpl(Attributor &A) override; 4858 }; 4859 4860 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) { 4861 const Function *F = getAnchorScope(); 4862 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 4863 4864 MustBeExecutedContextExplorer &Explorer = 4865 A.getInfoCache().getMustBeExecutedContextExplorer(); 4866 4867 auto FreeCheck = [&](Instruction &I) { 4868 const auto &Frees = FreesForMalloc.lookup(&I); 4869 if (Frees.size() != 1) 4870 return false; 4871 Instruction *UniqueFree = *Frees.begin(); 4872 return Explorer.findInContextOf(UniqueFree, I.getNextNode()); 4873 }; 4874 4875 auto UsesCheck = [&](Instruction &I) { 4876 bool ValidUsesOnly = true; 4877 bool MustUse = true; 4878 auto Pred = [&](const Use &U, bool &Follow) -> bool { 4879 Instruction *UserI = cast<Instruction>(U.getUser()); 4880 if (isa<LoadInst>(UserI)) 4881 return true; 4882 if (auto *SI = dyn_cast<StoreInst>(UserI)) { 4883 if (SI->getValueOperand() == U.get()) { 4884 LLVM_DEBUG(dbgs() 4885 << "[H2S] escaping store to memory: " << *UserI << "\n"); 4886 ValidUsesOnly = false; 4887 } else { 4888 // A store into the malloc'ed memory is fine. 4889 } 4890 return true; 4891 } 4892 if (auto *CB = dyn_cast<CallBase>(UserI)) { 4893 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) 4894 return true; 4895 // Record malloc. 4896 if (isFreeCall(UserI, TLI)) { 4897 if (MustUse) { 4898 FreesForMalloc[&I].insert(UserI); 4899 } else { 4900 LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: " 4901 << *UserI << "\n"); 4902 ValidUsesOnly = false; 4903 } 4904 return true; 4905 } 4906 4907 unsigned ArgNo = CB->getArgOperandNo(&U); 4908 4909 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 4910 *this, IRPosition::callsite_argument(*CB, ArgNo)); 4911 4912 // If a callsite argument use is nofree, we are fine. 4913 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( 4914 *this, IRPosition::callsite_argument(*CB, ArgNo)); 4915 4916 if (!NoCaptureAA.isAssumedNoCapture() || 4917 !ArgNoFreeAA.isAssumedNoFree()) { 4918 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n"); 4919 ValidUsesOnly = false; 4920 } 4921 return true; 4922 } 4923 4924 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 4925 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 4926 MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI)); 4927 Follow = true; 4928 return true; 4929 } 4930 // Unknown user for which we can not track uses further (in a way that 4931 // makes sense). 4932 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n"); 4933 ValidUsesOnly = false; 4934 return true; 4935 }; 4936 A.checkForAllUses(Pred, *this, I); 4937 return ValidUsesOnly; 4938 }; 4939 4940 auto MallocCallocCheck = [&](Instruction &I) { 4941 if (BadMallocCalls.count(&I)) 4942 return true; 4943 4944 bool IsMalloc = isMallocLikeFn(&I, TLI); 4945 bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI); 4946 bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI); 4947 if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) { 4948 BadMallocCalls.insert(&I); 4949 return true; 4950 } 4951 4952 if (IsMalloc) { 4953 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0))) 4954 if (Size->getValue().ule(MaxHeapToStackSize)) 4955 if (UsesCheck(I) || FreeCheck(I)) { 4956 MallocCalls.insert(&I); 4957 return true; 4958 } 4959 } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) { 4960 // Only if the alignment and sizes are constant. 4961 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1))) 4962 if (Size->getValue().ule(MaxHeapToStackSize)) 4963 if (UsesCheck(I) || FreeCheck(I)) { 4964 MallocCalls.insert(&I); 4965 return true; 4966 } 4967 } else if (IsCalloc) { 4968 bool Overflow = false; 4969 if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0))) 4970 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1))) 4971 if ((Size->getValue().umul_ov(Num->getValue(), Overflow)) 4972 .ule(MaxHeapToStackSize)) 4973 if (!Overflow && (UsesCheck(I) || FreeCheck(I))) { 4974 MallocCalls.insert(&I); 4975 return true; 4976 } 4977 } 4978 4979 BadMallocCalls.insert(&I); 4980 return true; 4981 }; 4982 4983 size_t NumBadMallocs = BadMallocCalls.size(); 4984 4985 A.checkForAllCallLikeInstructions(MallocCallocCheck, *this); 4986 4987 if (NumBadMallocs != BadMallocCalls.size()) 4988 return ChangeStatus::CHANGED; 4989 4990 return ChangeStatus::UNCHANGED; 4991 } 4992 4993 struct AAHeapToStackFunction final : public AAHeapToStackImpl { 4994 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) 4995 : AAHeapToStackImpl(IRP, A) {} 4996 4997 /// See AbstractAttribute::trackStatistics(). 4998 void trackStatistics() const override { 4999 STATS_DECL( 5000 MallocCalls, Function, 5001 "Number of malloc/calloc/aligned_alloc calls converted to allocas"); 5002 for (auto *C : MallocCalls) 5003 if (!BadMallocCalls.count(C)) 5004 ++BUILD_STAT_NAME(MallocCalls, Function); 5005 } 5006 }; 5007 5008 /// ----------------------- Privatizable Pointers ------------------------------ 5009 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { 5010 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) 5011 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} 5012 5013 ChangeStatus indicatePessimisticFixpoint() override { 5014 AAPrivatizablePtr::indicatePessimisticFixpoint(); 5015 PrivatizableType = nullptr; 5016 return ChangeStatus::CHANGED; 5017 } 5018 5019 /// Identify the type we can chose for a private copy of the underlying 5020 /// argument. None means it is not clear yet, nullptr means there is none. 5021 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; 5022 5023 /// Return a privatizable type that encloses both T0 and T1. 5024 /// TODO: This is merely a stub for now as we should manage a mapping as well. 5025 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { 5026 if (!T0.hasValue()) 5027 return T1; 5028 if (!T1.hasValue()) 5029 return T0; 5030 if (T0 == T1) 5031 return T0; 5032 return nullptr; 5033 } 5034 5035 Optional<Type *> getPrivatizableType() const override { 5036 return PrivatizableType; 5037 } 5038 5039 const std::string getAsStr() const override { 5040 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; 5041 } 5042 5043 protected: 5044 Optional<Type *> PrivatizableType; 5045 }; 5046 5047 // TODO: Do this for call site arguments (probably also other values) as well. 5048 5049 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { 5050 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) 5051 : AAPrivatizablePtrImpl(IRP, A) {} 5052 5053 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 5054 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 5055 // If this is a byval argument and we know all the call sites (so we can 5056 // rewrite them), there is no need to check them explicitly. 5057 bool AllCallSitesKnown; 5058 if (getIRPosition().hasAttr(Attribute::ByVal) && 5059 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, 5060 true, AllCallSitesKnown)) 5061 return getAssociatedValue().getType()->getPointerElementType(); 5062 5063 Optional<Type *> Ty; 5064 unsigned ArgNo = getIRPosition().getArgNo(); 5065 5066 // Make sure the associated call site argument has the same type at all call 5067 // sites and it is an allocation we know is safe to privatize, for now that 5068 // means we only allow alloca instructions. 5069 // TODO: We can additionally analyze the accesses in the callee to create 5070 // the type from that information instead. That is a little more 5071 // involved and will be done in a follow up patch. 5072 auto CallSiteCheck = [&](AbstractCallSite ACS) { 5073 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 5074 // Check if a coresponding argument was found or if it is one not 5075 // associated (which can happen for callback calls). 5076 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 5077 return false; 5078 5079 // Check that all call sites agree on a type. 5080 auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos); 5081 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); 5082 5083 LLVM_DEBUG({ 5084 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; 5085 if (CSTy.hasValue() && CSTy.getValue()) 5086 CSTy.getValue()->print(dbgs()); 5087 else if (CSTy.hasValue()) 5088 dbgs() << "<nullptr>"; 5089 else 5090 dbgs() << "<none>"; 5091 }); 5092 5093 Ty = combineTypes(Ty, CSTy); 5094 5095 LLVM_DEBUG({ 5096 dbgs() << " : New Type: "; 5097 if (Ty.hasValue() && Ty.getValue()) 5098 Ty.getValue()->print(dbgs()); 5099 else if (Ty.hasValue()) 5100 dbgs() << "<nullptr>"; 5101 else 5102 dbgs() << "<none>"; 5103 dbgs() << "\n"; 5104 }); 5105 5106 return !Ty.hasValue() || Ty.getValue(); 5107 }; 5108 5109 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown)) 5110 return nullptr; 5111 return Ty; 5112 } 5113 5114 /// See AbstractAttribute::updateImpl(...). 5115 ChangeStatus updateImpl(Attributor &A) override { 5116 PrivatizableType = identifyPrivatizableType(A); 5117 if (!PrivatizableType.hasValue()) 5118 return ChangeStatus::UNCHANGED; 5119 if (!PrivatizableType.getValue()) 5120 return indicatePessimisticFixpoint(); 5121 5122 // The dependence is optional so we don't give up once we give up on the 5123 // alignment. 5124 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), 5125 /* TrackDependence */ true, DepClassTy::OPTIONAL); 5126 5127 // Avoid arguments with padding for now. 5128 if (!getIRPosition().hasAttr(Attribute::ByVal) && 5129 !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(), 5130 A.getInfoCache().getDL())) { 5131 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n"); 5132 return indicatePessimisticFixpoint(); 5133 } 5134 5135 // Verify callee and caller agree on how the promoted argument would be 5136 // passed. 5137 // TODO: The use of the ArgumentPromotion interface here is ugly, we need a 5138 // specialized form of TargetTransformInfo::areFunctionArgsABICompatible 5139 // which doesn't require the arguments ArgumentPromotion wanted to pass. 5140 Function &Fn = *getIRPosition().getAnchorScope(); 5141 SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy; 5142 ArgsToPromote.insert(getAssociatedArgument()); 5143 const auto *TTI = 5144 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); 5145 if (!TTI || 5146 !ArgumentPromotionPass::areFunctionArgsABICompatible( 5147 Fn, *TTI, ArgsToPromote, Dummy) || 5148 ArgsToPromote.empty()) { 5149 LLVM_DEBUG( 5150 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " 5151 << Fn.getName() << "\n"); 5152 return indicatePessimisticFixpoint(); 5153 } 5154 5155 // Collect the types that will replace the privatizable type in the function 5156 // signature. 5157 SmallVector<Type *, 16> ReplacementTypes; 5158 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 5159 5160 // Register a rewrite of the argument. 5161 Argument *Arg = getAssociatedArgument(); 5162 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { 5163 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"); 5164 return indicatePessimisticFixpoint(); 5165 } 5166 5167 unsigned ArgNo = Arg->getArgNo(); 5168 5169 // Helper to check if for the given call site the associated argument is 5170 // passed to a callback where the privatization would be different. 5171 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { 5172 SmallVector<const Use *, 4> CallbackUses; 5173 AbstractCallSite::getCallbackUses(CB, CallbackUses); 5174 for (const Use *U : CallbackUses) { 5175 AbstractCallSite CBACS(U); 5176 assert(CBACS && CBACS.isCallbackCall()); 5177 for (Argument &CBArg : CBACS.getCalledFunction()->args()) { 5178 int CBArgNo = CBACS.getCallArgOperandNo(CBArg); 5179 5180 LLVM_DEBUG({ 5181 dbgs() 5182 << "[AAPrivatizablePtr] Argument " << *Arg 5183 << "check if can be privatized in the context of its parent (" 5184 << Arg->getParent()->getName() 5185 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5186 "callback (" 5187 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 5188 << ")\n[AAPrivatizablePtr] " << CBArg << " : " 5189 << CBACS.getCallArgOperand(CBArg) << " vs " 5190 << CB.getArgOperand(ArgNo) << "\n" 5191 << "[AAPrivatizablePtr] " << CBArg << " : " 5192 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; 5193 }); 5194 5195 if (CBArgNo != int(ArgNo)) 5196 continue; 5197 const auto &CBArgPrivAA = 5198 A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg)); 5199 if (CBArgPrivAA.isValidState()) { 5200 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); 5201 if (!CBArgPrivTy.hasValue()) 5202 continue; 5203 if (CBArgPrivTy.getValue() == PrivatizableType) 5204 continue; 5205 } 5206 5207 LLVM_DEBUG({ 5208 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5209 << " cannot be privatized in the context of its parent (" 5210 << Arg->getParent()->getName() 5211 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5212 "callback (" 5213 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 5214 << ").\n[AAPrivatizablePtr] for which the argument " 5215 "privatization is not compatible.\n"; 5216 }); 5217 return false; 5218 } 5219 } 5220 return true; 5221 }; 5222 5223 // Helper to check if for the given call site the associated argument is 5224 // passed to a direct call where the privatization would be different. 5225 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { 5226 CallBase *DC = cast<CallBase>(ACS.getInstruction()); 5227 int DCArgNo = ACS.getCallArgOperandNo(ArgNo); 5228 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() && 5229 "Expected a direct call operand for callback call operand"); 5230 5231 LLVM_DEBUG({ 5232 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5233 << " check if be privatized in the context of its parent (" 5234 << Arg->getParent()->getName() 5235 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5236 "direct call of (" 5237 << DCArgNo << "@" << DC->getCalledFunction()->getName() 5238 << ").\n"; 5239 }); 5240 5241 Function *DCCallee = DC->getCalledFunction(); 5242 if (unsigned(DCArgNo) < DCCallee->arg_size()) { 5243 const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 5244 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo))); 5245 if (DCArgPrivAA.isValidState()) { 5246 auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); 5247 if (!DCArgPrivTy.hasValue()) 5248 return true; 5249 if (DCArgPrivTy.getValue() == PrivatizableType) 5250 return true; 5251 } 5252 } 5253 5254 LLVM_DEBUG({ 5255 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5256 << " cannot be privatized in the context of its parent (" 5257 << Arg->getParent()->getName() 5258 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5259 "direct call of (" 5260 << ACS.getInstruction()->getCalledFunction()->getName() 5261 << ").\n[AAPrivatizablePtr] for which the argument " 5262 "privatization is not compatible.\n"; 5263 }); 5264 return false; 5265 }; 5266 5267 // Helper to check if the associated argument is used at the given abstract 5268 // call site in a way that is incompatible with the privatization assumed 5269 // here. 5270 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { 5271 if (ACS.isDirectCall()) 5272 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); 5273 if (ACS.isCallbackCall()) 5274 return IsCompatiblePrivArgOfDirectCS(ACS); 5275 return false; 5276 }; 5277 5278 bool AllCallSitesKnown; 5279 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true, 5280 AllCallSitesKnown)) 5281 return indicatePessimisticFixpoint(); 5282 5283 return ChangeStatus::UNCHANGED; 5284 } 5285 5286 /// Given a type to private \p PrivType, collect the constituates (which are 5287 /// used) in \p ReplacementTypes. 5288 static void 5289 identifyReplacementTypes(Type *PrivType, 5290 SmallVectorImpl<Type *> &ReplacementTypes) { 5291 // TODO: For now we expand the privatization type to the fullest which can 5292 // lead to dead arguments that need to be removed later. 5293 assert(PrivType && "Expected privatizable type!"); 5294 5295 // Traverse the type, extract constituate types on the outermost level. 5296 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5297 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) 5298 ReplacementTypes.push_back(PrivStructType->getElementType(u)); 5299 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5300 ReplacementTypes.append(PrivArrayType->getNumElements(), 5301 PrivArrayType->getElementType()); 5302 } else { 5303 ReplacementTypes.push_back(PrivType); 5304 } 5305 } 5306 5307 /// Initialize \p Base according to the type \p PrivType at position \p IP. 5308 /// The values needed are taken from the arguments of \p F starting at 5309 /// position \p ArgNo. 5310 static void createInitialization(Type *PrivType, Value &Base, Function &F, 5311 unsigned ArgNo, Instruction &IP) { 5312 assert(PrivType && "Expected privatizable type!"); 5313 5314 IRBuilder<NoFolder> IRB(&IP); 5315 const DataLayout &DL = F.getParent()->getDataLayout(); 5316 5317 // Traverse the type, build GEPs and stores. 5318 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5319 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 5320 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 5321 Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo(); 5322 Value *Ptr = constructPointer( 5323 PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL); 5324 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 5325 } 5326 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5327 Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo(); 5328 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy); 5329 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 5330 Value *Ptr = 5331 constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL); 5332 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 5333 } 5334 } else { 5335 new StoreInst(F.getArg(ArgNo), &Base, &IP); 5336 } 5337 } 5338 5339 /// Extract values from \p Base according to the type \p PrivType at the 5340 /// call position \p ACS. The values are appended to \p ReplacementValues. 5341 void createReplacementValues(Align Alignment, Type *PrivType, 5342 AbstractCallSite ACS, Value *Base, 5343 SmallVectorImpl<Value *> &ReplacementValues) { 5344 assert(Base && "Expected base value!"); 5345 assert(PrivType && "Expected privatizable type!"); 5346 Instruction *IP = ACS.getInstruction(); 5347 5348 IRBuilder<NoFolder> IRB(IP); 5349 const DataLayout &DL = IP->getModule()->getDataLayout(); 5350 5351 if (Base->getType()->getPointerElementType() != PrivType) 5352 Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(), 5353 "", ACS.getInstruction()); 5354 5355 // Traverse the type, build GEPs and loads. 5356 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5357 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 5358 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 5359 Type *PointeeTy = PrivStructType->getElementType(u); 5360 Value *Ptr = 5361 constructPointer(PointeeTy->getPointerTo(), Base, 5362 PrivStructLayout->getElementOffset(u), IRB, DL); 5363 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 5364 L->setAlignment(Alignment); 5365 ReplacementValues.push_back(L); 5366 } 5367 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5368 Type *PointeeTy = PrivArrayType->getElementType(); 5369 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 5370 Type *PointeePtrTy = PointeeTy->getPointerTo(); 5371 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 5372 Value *Ptr = 5373 constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL); 5374 LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP); 5375 L->setAlignment(Alignment); 5376 ReplacementValues.push_back(L); 5377 } 5378 } else { 5379 LoadInst *L = new LoadInst(PrivType, Base, "", IP); 5380 L->setAlignment(Alignment); 5381 ReplacementValues.push_back(L); 5382 } 5383 } 5384 5385 /// See AbstractAttribute::manifest(...) 5386 ChangeStatus manifest(Attributor &A) override { 5387 if (!PrivatizableType.hasValue()) 5388 return ChangeStatus::UNCHANGED; 5389 assert(PrivatizableType.getValue() && "Expected privatizable type!"); 5390 5391 // Collect all tail calls in the function as we cannot allow new allocas to 5392 // escape into tail recursion. 5393 // TODO: Be smarter about new allocas escaping into tail calls. 5394 SmallVector<CallInst *, 16> TailCalls; 5395 if (!A.checkForAllInstructions( 5396 [&](Instruction &I) { 5397 CallInst &CI = cast<CallInst>(I); 5398 if (CI.isTailCall()) 5399 TailCalls.push_back(&CI); 5400 return true; 5401 }, 5402 *this, {Instruction::Call})) 5403 return ChangeStatus::UNCHANGED; 5404 5405 Argument *Arg = getAssociatedArgument(); 5406 // Query AAAlign attribute for alignment of associated argument to 5407 // determine the best alignment of loads. 5408 const auto &AlignAA = A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg)); 5409 5410 // Callback to repair the associated function. A new alloca is placed at the 5411 // beginning and initialized with the values passed through arguments. The 5412 // new alloca replaces the use of the old pointer argument. 5413 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = 5414 [=](const Attributor::ArgumentReplacementInfo &ARI, 5415 Function &ReplacementFn, Function::arg_iterator ArgIt) { 5416 BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); 5417 Instruction *IP = &*EntryBB.getFirstInsertionPt(); 5418 auto *AI = new AllocaInst(PrivatizableType.getValue(), 0, 5419 Arg->getName() + ".priv", IP); 5420 createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn, 5421 ArgIt->getArgNo(), *IP); 5422 Arg->replaceAllUsesWith(AI); 5423 5424 for (CallInst *CI : TailCalls) 5425 CI->setTailCall(false); 5426 }; 5427 5428 // Callback to repair a call site of the associated function. The elements 5429 // of the privatizable type are loaded prior to the call and passed to the 5430 // new function version. 5431 Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = 5432 [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI, 5433 AbstractCallSite ACS, 5434 SmallVectorImpl<Value *> &NewArgOperands) { 5435 // When no alignment is specified for the load instruction, 5436 // natural alignment is assumed. 5437 createReplacementValues( 5438 assumeAligned(AlignAA.getAssumedAlign()), 5439 PrivatizableType.getValue(), ACS, 5440 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()), 5441 NewArgOperands); 5442 }; 5443 5444 // Collect the types that will replace the privatizable type in the function 5445 // signature. 5446 SmallVector<Type *, 16> ReplacementTypes; 5447 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 5448 5449 // Register a rewrite of the argument. 5450 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes, 5451 std::move(FnRepairCB), 5452 std::move(ACSRepairCB))) 5453 return ChangeStatus::CHANGED; 5454 return ChangeStatus::UNCHANGED; 5455 } 5456 5457 /// See AbstractAttribute::trackStatistics() 5458 void trackStatistics() const override { 5459 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr); 5460 } 5461 }; 5462 5463 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { 5464 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) 5465 : AAPrivatizablePtrImpl(IRP, A) {} 5466 5467 /// See AbstractAttribute::initialize(...). 5468 virtual void initialize(Attributor &A) override { 5469 // TODO: We can privatize more than arguments. 5470 indicatePessimisticFixpoint(); 5471 } 5472 5473 ChangeStatus updateImpl(Attributor &A) override { 5474 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" 5475 "updateImpl will not be called"); 5476 } 5477 5478 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 5479 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 5480 Value *Obj = getUnderlyingObject(&getAssociatedValue()); 5481 if (!Obj) { 5482 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n"); 5483 return nullptr; 5484 } 5485 5486 if (auto *AI = dyn_cast<AllocaInst>(Obj)) 5487 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 5488 if (CI->isOne()) 5489 return Obj->getType()->getPointerElementType(); 5490 if (auto *Arg = dyn_cast<Argument>(Obj)) { 5491 auto &PrivArgAA = 5492 A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg)); 5493 if (PrivArgAA.isAssumedPrivatizablePtr()) 5494 return Obj->getType()->getPointerElementType(); 5495 } 5496 5497 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " 5498 "alloca nor privatizable argument: " 5499 << *Obj << "!\n"); 5500 return nullptr; 5501 } 5502 5503 /// See AbstractAttribute::trackStatistics() 5504 void trackStatistics() const override { 5505 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr); 5506 } 5507 }; 5508 5509 struct AAPrivatizablePtrCallSiteArgument final 5510 : public AAPrivatizablePtrFloating { 5511 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) 5512 : AAPrivatizablePtrFloating(IRP, A) {} 5513 5514 /// See AbstractAttribute::initialize(...). 5515 void initialize(Attributor &A) override { 5516 if (getIRPosition().hasAttr(Attribute::ByVal)) 5517 indicateOptimisticFixpoint(); 5518 } 5519 5520 /// See AbstractAttribute::updateImpl(...). 5521 ChangeStatus updateImpl(Attributor &A) override { 5522 PrivatizableType = identifyPrivatizableType(A); 5523 if (!PrivatizableType.hasValue()) 5524 return ChangeStatus::UNCHANGED; 5525 if (!PrivatizableType.getValue()) 5526 return indicatePessimisticFixpoint(); 5527 5528 const IRPosition &IRP = getIRPosition(); 5529 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP); 5530 if (!NoCaptureAA.isAssumedNoCapture()) { 5531 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n"); 5532 return indicatePessimisticFixpoint(); 5533 } 5534 5535 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP); 5536 if (!NoAliasAA.isAssumedNoAlias()) { 5537 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n"); 5538 return indicatePessimisticFixpoint(); 5539 } 5540 5541 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP); 5542 if (!MemBehaviorAA.isAssumedReadOnly()) { 5543 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n"); 5544 return indicatePessimisticFixpoint(); 5545 } 5546 5547 return ChangeStatus::UNCHANGED; 5548 } 5549 5550 /// See AbstractAttribute::trackStatistics() 5551 void trackStatistics() const override { 5552 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr); 5553 } 5554 }; 5555 5556 struct AAPrivatizablePtrCallSiteReturned final 5557 : public AAPrivatizablePtrFloating { 5558 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) 5559 : AAPrivatizablePtrFloating(IRP, A) {} 5560 5561 /// See AbstractAttribute::initialize(...). 5562 void initialize(Attributor &A) override { 5563 // TODO: We can privatize more than arguments. 5564 indicatePessimisticFixpoint(); 5565 } 5566 5567 /// See AbstractAttribute::trackStatistics() 5568 void trackStatistics() const override { 5569 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr); 5570 } 5571 }; 5572 5573 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { 5574 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) 5575 : AAPrivatizablePtrFloating(IRP, A) {} 5576 5577 /// See AbstractAttribute::initialize(...). 5578 void initialize(Attributor &A) override { 5579 // TODO: We can privatize more than arguments. 5580 indicatePessimisticFixpoint(); 5581 } 5582 5583 /// See AbstractAttribute::trackStatistics() 5584 void trackStatistics() const override { 5585 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr); 5586 } 5587 }; 5588 5589 /// -------------------- Memory Behavior Attributes ---------------------------- 5590 /// Includes read-none, read-only, and write-only. 5591 /// ---------------------------------------------------------------------------- 5592 struct AAMemoryBehaviorImpl : public AAMemoryBehavior { 5593 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) 5594 : AAMemoryBehavior(IRP, A) {} 5595 5596 /// See AbstractAttribute::initialize(...). 5597 void initialize(Attributor &A) override { 5598 intersectAssumedBits(BEST_STATE); 5599 getKnownStateFromValue(getIRPosition(), getState()); 5600 IRAttribute::initialize(A); 5601 } 5602 5603 /// Return the memory behavior information encoded in the IR for \p IRP. 5604 static void getKnownStateFromValue(const IRPosition &IRP, 5605 BitIntegerState &State, 5606 bool IgnoreSubsumingPositions = false) { 5607 SmallVector<Attribute, 2> Attrs; 5608 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 5609 for (const Attribute &Attr : Attrs) { 5610 switch (Attr.getKindAsEnum()) { 5611 case Attribute::ReadNone: 5612 State.addKnownBits(NO_ACCESSES); 5613 break; 5614 case Attribute::ReadOnly: 5615 State.addKnownBits(NO_WRITES); 5616 break; 5617 case Attribute::WriteOnly: 5618 State.addKnownBits(NO_READS); 5619 break; 5620 default: 5621 llvm_unreachable("Unexpected attribute!"); 5622 } 5623 } 5624 5625 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) { 5626 if (!I->mayReadFromMemory()) 5627 State.addKnownBits(NO_READS); 5628 if (!I->mayWriteToMemory()) 5629 State.addKnownBits(NO_WRITES); 5630 } 5631 } 5632 5633 /// See AbstractAttribute::getDeducedAttributes(...). 5634 void getDeducedAttributes(LLVMContext &Ctx, 5635 SmallVectorImpl<Attribute> &Attrs) const override { 5636 assert(Attrs.size() == 0); 5637 if (isAssumedReadNone()) 5638 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 5639 else if (isAssumedReadOnly()) 5640 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly)); 5641 else if (isAssumedWriteOnly()) 5642 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly)); 5643 assert(Attrs.size() <= 1); 5644 } 5645 5646 /// See AbstractAttribute::manifest(...). 5647 ChangeStatus manifest(Attributor &A) override { 5648 if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true)) 5649 return ChangeStatus::UNCHANGED; 5650 5651 const IRPosition &IRP = getIRPosition(); 5652 5653 // Check if we would improve the existing attributes first. 5654 SmallVector<Attribute, 4> DeducedAttrs; 5655 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 5656 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 5657 return IRP.hasAttr(Attr.getKindAsEnum(), 5658 /* IgnoreSubsumingPositions */ true); 5659 })) 5660 return ChangeStatus::UNCHANGED; 5661 5662 // Clear existing attributes. 5663 IRP.removeAttrs(AttrKinds); 5664 5665 // Use the generic manifest method. 5666 return IRAttribute::manifest(A); 5667 } 5668 5669 /// See AbstractState::getAsStr(). 5670 const std::string getAsStr() const override { 5671 if (isAssumedReadNone()) 5672 return "readnone"; 5673 if (isAssumedReadOnly()) 5674 return "readonly"; 5675 if (isAssumedWriteOnly()) 5676 return "writeonly"; 5677 return "may-read/write"; 5678 } 5679 5680 /// The set of IR attributes AAMemoryBehavior deals with. 5681 static const Attribute::AttrKind AttrKinds[3]; 5682 }; 5683 5684 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { 5685 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; 5686 5687 /// Memory behavior attribute for a floating value. 5688 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { 5689 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) 5690 : AAMemoryBehaviorImpl(IRP, A) {} 5691 5692 /// See AbstractAttribute::initialize(...). 5693 void initialize(Attributor &A) override { 5694 AAMemoryBehaviorImpl::initialize(A); 5695 // Initialize the use vector with all direct uses of the associated value. 5696 for (const Use &U : getAssociatedValue().uses()) 5697 Uses.insert(&U); 5698 } 5699 5700 /// See AbstractAttribute::updateImpl(...). 5701 ChangeStatus updateImpl(Attributor &A) override; 5702 5703 /// See AbstractAttribute::trackStatistics() 5704 void trackStatistics() const override { 5705 if (isAssumedReadNone()) 5706 STATS_DECLTRACK_FLOATING_ATTR(readnone) 5707 else if (isAssumedReadOnly()) 5708 STATS_DECLTRACK_FLOATING_ATTR(readonly) 5709 else if (isAssumedWriteOnly()) 5710 STATS_DECLTRACK_FLOATING_ATTR(writeonly) 5711 } 5712 5713 private: 5714 /// Return true if users of \p UserI might access the underlying 5715 /// variable/location described by \p U and should therefore be analyzed. 5716 bool followUsersOfUseIn(Attributor &A, const Use *U, 5717 const Instruction *UserI); 5718 5719 /// Update the state according to the effect of use \p U in \p UserI. 5720 void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI); 5721 5722 protected: 5723 /// Container for (transitive) uses of the associated argument. 5724 SetVector<const Use *> Uses; 5725 }; 5726 5727 /// Memory behavior attribute for function argument. 5728 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { 5729 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) 5730 : AAMemoryBehaviorFloating(IRP, A) {} 5731 5732 /// See AbstractAttribute::initialize(...). 5733 void initialize(Attributor &A) override { 5734 intersectAssumedBits(BEST_STATE); 5735 const IRPosition &IRP = getIRPosition(); 5736 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we 5737 // can query it when we use has/getAttr. That would allow us to reuse the 5738 // initialize of the base class here. 5739 bool HasByVal = 5740 IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true); 5741 getKnownStateFromValue(IRP, getState(), 5742 /* IgnoreSubsumingPositions */ HasByVal); 5743 5744 // Initialize the use vector with all direct uses of the associated value. 5745 Argument *Arg = getAssociatedArgument(); 5746 if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) { 5747 indicatePessimisticFixpoint(); 5748 } else { 5749 // Initialize the use vector with all direct uses of the associated value. 5750 for (const Use &U : Arg->uses()) 5751 Uses.insert(&U); 5752 } 5753 } 5754 5755 ChangeStatus manifest(Attributor &A) override { 5756 // TODO: Pointer arguments are not supported on vectors of pointers yet. 5757 if (!getAssociatedValue().getType()->isPointerTy()) 5758 return ChangeStatus::UNCHANGED; 5759 5760 // TODO: From readattrs.ll: "inalloca parameters are always 5761 // considered written" 5762 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) { 5763 removeKnownBits(NO_WRITES); 5764 removeAssumedBits(NO_WRITES); 5765 } 5766 return AAMemoryBehaviorFloating::manifest(A); 5767 } 5768 5769 /// See AbstractAttribute::trackStatistics() 5770 void trackStatistics() const override { 5771 if (isAssumedReadNone()) 5772 STATS_DECLTRACK_ARG_ATTR(readnone) 5773 else if (isAssumedReadOnly()) 5774 STATS_DECLTRACK_ARG_ATTR(readonly) 5775 else if (isAssumedWriteOnly()) 5776 STATS_DECLTRACK_ARG_ATTR(writeonly) 5777 } 5778 }; 5779 5780 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { 5781 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) 5782 : AAMemoryBehaviorArgument(IRP, A) {} 5783 5784 /// See AbstractAttribute::initialize(...). 5785 void initialize(Attributor &A) override { 5786 if (Argument *Arg = getAssociatedArgument()) { 5787 if (Arg->hasByValAttr()) { 5788 addKnownBits(NO_WRITES); 5789 removeKnownBits(NO_READS); 5790 removeAssumedBits(NO_READS); 5791 } 5792 } 5793 AAMemoryBehaviorArgument::initialize(A); 5794 } 5795 5796 /// See AbstractAttribute::updateImpl(...). 5797 ChangeStatus updateImpl(Attributor &A) override { 5798 // TODO: Once we have call site specific value information we can provide 5799 // call site specific liveness liveness information and then it makes 5800 // sense to specialize attributes for call sites arguments instead of 5801 // redirecting requests to the callee argument. 5802 Argument *Arg = getAssociatedArgument(); 5803 const IRPosition &ArgPos = IRPosition::argument(*Arg); 5804 auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos); 5805 return clampStateAndIndicateChange( 5806 getState(), 5807 static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState())); 5808 } 5809 5810 /// See AbstractAttribute::trackStatistics() 5811 void trackStatistics() const override { 5812 if (isAssumedReadNone()) 5813 STATS_DECLTRACK_CSARG_ATTR(readnone) 5814 else if (isAssumedReadOnly()) 5815 STATS_DECLTRACK_CSARG_ATTR(readonly) 5816 else if (isAssumedWriteOnly()) 5817 STATS_DECLTRACK_CSARG_ATTR(writeonly) 5818 } 5819 }; 5820 5821 /// Memory behavior attribute for a call site return position. 5822 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { 5823 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) 5824 : AAMemoryBehaviorFloating(IRP, A) {} 5825 5826 /// See AbstractAttribute::manifest(...). 5827 ChangeStatus manifest(Attributor &A) override { 5828 // We do not annotate returned values. 5829 return ChangeStatus::UNCHANGED; 5830 } 5831 5832 /// See AbstractAttribute::trackStatistics() 5833 void trackStatistics() const override {} 5834 }; 5835 5836 /// An AA to represent the memory behavior function attributes. 5837 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { 5838 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) 5839 : AAMemoryBehaviorImpl(IRP, A) {} 5840 5841 /// See AbstractAttribute::updateImpl(Attributor &A). 5842 virtual ChangeStatus updateImpl(Attributor &A) override; 5843 5844 /// See AbstractAttribute::manifest(...). 5845 ChangeStatus manifest(Attributor &A) override { 5846 Function &F = cast<Function>(getAnchorValue()); 5847 if (isAssumedReadNone()) { 5848 F.removeFnAttr(Attribute::ArgMemOnly); 5849 F.removeFnAttr(Attribute::InaccessibleMemOnly); 5850 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); 5851 } 5852 return AAMemoryBehaviorImpl::manifest(A); 5853 } 5854 5855 /// See AbstractAttribute::trackStatistics() 5856 void trackStatistics() const override { 5857 if (isAssumedReadNone()) 5858 STATS_DECLTRACK_FN_ATTR(readnone) 5859 else if (isAssumedReadOnly()) 5860 STATS_DECLTRACK_FN_ATTR(readonly) 5861 else if (isAssumedWriteOnly()) 5862 STATS_DECLTRACK_FN_ATTR(writeonly) 5863 } 5864 }; 5865 5866 /// AAMemoryBehavior attribute for call sites. 5867 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl { 5868 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) 5869 : AAMemoryBehaviorImpl(IRP, A) {} 5870 5871 /// See AbstractAttribute::initialize(...). 5872 void initialize(Attributor &A) override { 5873 AAMemoryBehaviorImpl::initialize(A); 5874 Function *F = getAssociatedFunction(); 5875 if (!F || !A.isFunctionIPOAmendable(*F)) { 5876 indicatePessimisticFixpoint(); 5877 return; 5878 } 5879 } 5880 5881 /// See AbstractAttribute::updateImpl(...). 5882 ChangeStatus updateImpl(Attributor &A) override { 5883 // TODO: Once we have call site specific value information we can provide 5884 // call site specific liveness liveness information and then it makes 5885 // sense to specialize attributes for call sites arguments instead of 5886 // redirecting requests to the callee argument. 5887 Function *F = getAssociatedFunction(); 5888 const IRPosition &FnPos = IRPosition::function(*F); 5889 auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos); 5890 return clampStateAndIndicateChange( 5891 getState(), 5892 static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState())); 5893 } 5894 5895 /// See AbstractAttribute::trackStatistics() 5896 void trackStatistics() const override { 5897 if (isAssumedReadNone()) 5898 STATS_DECLTRACK_CS_ATTR(readnone) 5899 else if (isAssumedReadOnly()) 5900 STATS_DECLTRACK_CS_ATTR(readonly) 5901 else if (isAssumedWriteOnly()) 5902 STATS_DECLTRACK_CS_ATTR(writeonly) 5903 } 5904 }; 5905 5906 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { 5907 5908 // The current assumed state used to determine a change. 5909 auto AssumedState = getAssumed(); 5910 5911 auto CheckRWInst = [&](Instruction &I) { 5912 // If the instruction has an own memory behavior state, use it to restrict 5913 // the local state. No further analysis is required as the other memory 5914 // state is as optimistic as it gets. 5915 if (const auto *CB = dyn_cast<CallBase>(&I)) { 5916 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 5917 *this, IRPosition::callsite_function(*CB)); 5918 intersectAssumedBits(MemBehaviorAA.getAssumed()); 5919 return !isAtFixpoint(); 5920 } 5921 5922 // Remove access kind modifiers if necessary. 5923 if (I.mayReadFromMemory()) 5924 removeAssumedBits(NO_READS); 5925 if (I.mayWriteToMemory()) 5926 removeAssumedBits(NO_WRITES); 5927 return !isAtFixpoint(); 5928 }; 5929 5930 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this)) 5931 return indicatePessimisticFixpoint(); 5932 5933 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 5934 : ChangeStatus::UNCHANGED; 5935 } 5936 5937 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { 5938 5939 const IRPosition &IRP = getIRPosition(); 5940 const IRPosition &FnPos = IRPosition::function_scope(IRP); 5941 AAMemoryBehavior::StateType &S = getState(); 5942 5943 // First, check the function scope. We take the known information and we avoid 5944 // work if the assumed information implies the current assumed information for 5945 // this attribute. This is a valid for all but byval arguments. 5946 Argument *Arg = IRP.getAssociatedArgument(); 5947 AAMemoryBehavior::base_t FnMemAssumedState = 5948 AAMemoryBehavior::StateType::getWorstState(); 5949 if (!Arg || !Arg->hasByValAttr()) { 5950 const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>( 5951 *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 5952 FnMemAssumedState = FnMemAA.getAssumed(); 5953 S.addKnownBits(FnMemAA.getKnown()); 5954 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed()) 5955 return ChangeStatus::UNCHANGED; 5956 } 5957 5958 // Make sure the value is not captured (except through "return"), if 5959 // it is, any information derived would be irrelevant anyway as we cannot 5960 // check the potential aliases introduced by the capture. However, no need 5961 // to fall back to anythign less optimistic than the function state. 5962 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 5963 *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL); 5964 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 5965 S.intersectAssumedBits(FnMemAssumedState); 5966 return ChangeStatus::CHANGED; 5967 } 5968 5969 // The current assumed state used to determine a change. 5970 auto AssumedState = S.getAssumed(); 5971 5972 // Liveness information to exclude dead users. 5973 // TODO: Take the FnPos once we have call site specific liveness information. 5974 const auto &LivenessAA = A.getAAFor<AAIsDead>( 5975 *this, IRPosition::function(*IRP.getAssociatedFunction()), 5976 /* TrackDependence */ false); 5977 5978 // Visit and expand uses until all are analyzed or a fixpoint is reached. 5979 for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) { 5980 const Use *U = Uses[i]; 5981 Instruction *UserI = cast<Instruction>(U->getUser()); 5982 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI 5983 << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA)) 5984 << "]\n"); 5985 if (A.isAssumedDead(*U, this, &LivenessAA)) 5986 continue; 5987 5988 // Droppable users, e.g., llvm::assume does not actually perform any action. 5989 if (UserI->isDroppable()) 5990 continue; 5991 5992 // Check if the users of UserI should also be visited. 5993 if (followUsersOfUseIn(A, U, UserI)) 5994 for (const Use &UserIUse : UserI->uses()) 5995 Uses.insert(&UserIUse); 5996 5997 // If UserI might touch memory we analyze the use in detail. 5998 if (UserI->mayReadOrWriteMemory()) 5999 analyzeUseIn(A, U, UserI); 6000 } 6001 6002 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 6003 : ChangeStatus::UNCHANGED; 6004 } 6005 6006 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U, 6007 const Instruction *UserI) { 6008 // The loaded value is unrelated to the pointer argument, no need to 6009 // follow the users of the load. 6010 if (isa<LoadInst>(UserI)) 6011 return false; 6012 6013 // By default we follow all uses assuming UserI might leak information on U, 6014 // we have special handling for call sites operands though. 6015 const auto *CB = dyn_cast<CallBase>(UserI); 6016 if (!CB || !CB->isArgOperand(U)) 6017 return true; 6018 6019 // If the use is a call argument known not to be captured, the users of 6020 // the call do not need to be visited because they have to be unrelated to 6021 // the input. Note that this check is not trivial even though we disallow 6022 // general capturing of the underlying argument. The reason is that the 6023 // call might the argument "through return", which we allow and for which we 6024 // need to check call users. 6025 if (U->get()->getType()->isPointerTy()) { 6026 unsigned ArgNo = CB->getArgOperandNo(U); 6027 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 6028 *this, IRPosition::callsite_argument(*CB, ArgNo), 6029 /* TrackDependence */ true, DepClassTy::OPTIONAL); 6030 return !ArgNoCaptureAA.isAssumedNoCapture(); 6031 } 6032 6033 return true; 6034 } 6035 6036 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U, 6037 const Instruction *UserI) { 6038 assert(UserI->mayReadOrWriteMemory()); 6039 6040 switch (UserI->getOpcode()) { 6041 default: 6042 // TODO: Handle all atomics and other side-effect operations we know of. 6043 break; 6044 case Instruction::Load: 6045 // Loads cause the NO_READS property to disappear. 6046 removeAssumedBits(NO_READS); 6047 return; 6048 6049 case Instruction::Store: 6050 // Stores cause the NO_WRITES property to disappear if the use is the 6051 // pointer operand. Note that we do assume that capturing was taken care of 6052 // somewhere else. 6053 if (cast<StoreInst>(UserI)->getPointerOperand() == U->get()) 6054 removeAssumedBits(NO_WRITES); 6055 return; 6056 6057 case Instruction::Call: 6058 case Instruction::CallBr: 6059 case Instruction::Invoke: { 6060 // For call sites we look at the argument memory behavior attribute (this 6061 // could be recursive!) in order to restrict our own state. 6062 const auto *CB = cast<CallBase>(UserI); 6063 6064 // Give up on operand bundles. 6065 if (CB->isBundleOperand(U)) { 6066 indicatePessimisticFixpoint(); 6067 return; 6068 } 6069 6070 // Calling a function does read the function pointer, maybe write it if the 6071 // function is self-modifying. 6072 if (CB->isCallee(U)) { 6073 removeAssumedBits(NO_READS); 6074 break; 6075 } 6076 6077 // Adjust the possible access behavior based on the information on the 6078 // argument. 6079 IRPosition Pos; 6080 if (U->get()->getType()->isPointerTy()) 6081 Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U)); 6082 else 6083 Pos = IRPosition::callsite_function(*CB); 6084 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 6085 *this, Pos, 6086 /* TrackDependence */ true, DepClassTy::OPTIONAL); 6087 // "assumed" has at most the same bits as the MemBehaviorAA assumed 6088 // and at least "known". 6089 intersectAssumedBits(MemBehaviorAA.getAssumed()); 6090 return; 6091 } 6092 }; 6093 6094 // Generally, look at the "may-properties" and adjust the assumed state if we 6095 // did not trigger special handling before. 6096 if (UserI->mayReadFromMemory()) 6097 removeAssumedBits(NO_READS); 6098 if (UserI->mayWriteToMemory()) 6099 removeAssumedBits(NO_WRITES); 6100 } 6101 6102 } // namespace 6103 6104 /// -------------------- Memory Locations Attributes --------------------------- 6105 /// Includes read-none, argmemonly, inaccessiblememonly, 6106 /// inaccessiblememorargmemonly 6107 /// ---------------------------------------------------------------------------- 6108 6109 std::string AAMemoryLocation::getMemoryLocationsAsStr( 6110 AAMemoryLocation::MemoryLocationsKind MLK) { 6111 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) 6112 return "all memory"; 6113 if (MLK == AAMemoryLocation::NO_LOCATIONS) 6114 return "no memory"; 6115 std::string S = "memory:"; 6116 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) 6117 S += "stack,"; 6118 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) 6119 S += "constant,"; 6120 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) 6121 S += "internal global,"; 6122 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) 6123 S += "external global,"; 6124 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) 6125 S += "argument,"; 6126 if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) 6127 S += "inaccessible,"; 6128 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) 6129 S += "malloced,"; 6130 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) 6131 S += "unknown,"; 6132 S.pop_back(); 6133 return S; 6134 } 6135 6136 namespace { 6137 struct AAMemoryLocationImpl : public AAMemoryLocation { 6138 6139 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) 6140 : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { 6141 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 6142 AccessKind2Accesses[u] = nullptr; 6143 } 6144 6145 ~AAMemoryLocationImpl() { 6146 // The AccessSets are allocated via a BumpPtrAllocator, we call 6147 // the destructor manually. 6148 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 6149 if (AccessKind2Accesses[u]) 6150 AccessKind2Accesses[u]->~AccessSet(); 6151 } 6152 6153 /// See AbstractAttribute::initialize(...). 6154 void initialize(Attributor &A) override { 6155 intersectAssumedBits(BEST_STATE); 6156 getKnownStateFromValue(A, getIRPosition(), getState()); 6157 IRAttribute::initialize(A); 6158 } 6159 6160 /// Return the memory behavior information encoded in the IR for \p IRP. 6161 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, 6162 BitIntegerState &State, 6163 bool IgnoreSubsumingPositions = false) { 6164 // For internal functions we ignore `argmemonly` and 6165 // `inaccessiblememorargmemonly` as we might break it via interprocedural 6166 // constant propagation. It is unclear if this is the best way but it is 6167 // unlikely this will cause real performance problems. If we are deriving 6168 // attributes for the anchor function we even remove the attribute in 6169 // addition to ignoring it. 6170 bool UseArgMemOnly = true; 6171 Function *AnchorFn = IRP.getAnchorScope(); 6172 if (AnchorFn && A.isRunOn(*AnchorFn)) 6173 UseArgMemOnly = !AnchorFn->hasLocalLinkage(); 6174 6175 SmallVector<Attribute, 2> Attrs; 6176 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 6177 for (const Attribute &Attr : Attrs) { 6178 switch (Attr.getKindAsEnum()) { 6179 case Attribute::ReadNone: 6180 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM); 6181 break; 6182 case Attribute::InaccessibleMemOnly: 6183 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 6184 break; 6185 case Attribute::ArgMemOnly: 6186 if (UseArgMemOnly) 6187 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true)); 6188 else 6189 IRP.removeAttrs({Attribute::ArgMemOnly}); 6190 break; 6191 case Attribute::InaccessibleMemOrArgMemOnly: 6192 if (UseArgMemOnly) 6193 State.addKnownBits(inverseLocation( 6194 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 6195 else 6196 IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly}); 6197 break; 6198 default: 6199 llvm_unreachable("Unexpected attribute!"); 6200 } 6201 } 6202 } 6203 6204 /// See AbstractAttribute::getDeducedAttributes(...). 6205 void getDeducedAttributes(LLVMContext &Ctx, 6206 SmallVectorImpl<Attribute> &Attrs) const override { 6207 assert(Attrs.size() == 0); 6208 if (isAssumedReadNone()) { 6209 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 6210 } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { 6211 if (isAssumedInaccessibleMemOnly()) 6212 Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly)); 6213 else if (isAssumedArgMemOnly()) 6214 Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly)); 6215 else if (isAssumedInaccessibleOrArgMemOnly()) 6216 Attrs.push_back( 6217 Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly)); 6218 } 6219 assert(Attrs.size() <= 1); 6220 } 6221 6222 /// See AbstractAttribute::manifest(...). 6223 ChangeStatus manifest(Attributor &A) override { 6224 const IRPosition &IRP = getIRPosition(); 6225 6226 // Check if we would improve the existing attributes first. 6227 SmallVector<Attribute, 4> DeducedAttrs; 6228 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 6229 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 6230 return IRP.hasAttr(Attr.getKindAsEnum(), 6231 /* IgnoreSubsumingPositions */ true); 6232 })) 6233 return ChangeStatus::UNCHANGED; 6234 6235 // Clear existing attributes. 6236 IRP.removeAttrs(AttrKinds); 6237 if (isAssumedReadNone()) 6238 IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds); 6239 6240 // Use the generic manifest method. 6241 return IRAttribute::manifest(A); 6242 } 6243 6244 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). 6245 bool checkForAllAccessesToMemoryKind( 6246 function_ref<bool(const Instruction *, const Value *, AccessKind, 6247 MemoryLocationsKind)> 6248 Pred, 6249 MemoryLocationsKind RequestedMLK) const override { 6250 if (!isValidState()) 6251 return false; 6252 6253 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); 6254 if (AssumedMLK == NO_LOCATIONS) 6255 return true; 6256 6257 unsigned Idx = 0; 6258 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; 6259 CurMLK *= 2, ++Idx) { 6260 if (CurMLK & RequestedMLK) 6261 continue; 6262 6263 if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) 6264 for (const AccessInfo &AI : *Accesses) 6265 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) 6266 return false; 6267 } 6268 6269 return true; 6270 } 6271 6272 ChangeStatus indicatePessimisticFixpoint() override { 6273 // If we give up and indicate a pessimistic fixpoint this instruction will 6274 // become an access for all potential access kinds: 6275 // TODO: Add pointers for argmemonly and globals to improve the results of 6276 // checkForAllAccessesToMemoryKind. 6277 bool Changed = false; 6278 MemoryLocationsKind KnownMLK = getKnown(); 6279 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 6280 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) 6281 if (!(CurMLK & KnownMLK)) 6282 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed, 6283 getAccessKindFromInst(I)); 6284 return AAMemoryLocation::indicatePessimisticFixpoint(); 6285 } 6286 6287 protected: 6288 /// Helper struct to tie together an instruction that has a read or write 6289 /// effect with the pointer it accesses (if any). 6290 struct AccessInfo { 6291 6292 /// The instruction that caused the access. 6293 const Instruction *I; 6294 6295 /// The base pointer that is accessed, or null if unknown. 6296 const Value *Ptr; 6297 6298 /// The kind of access (read/write/read+write). 6299 AccessKind Kind; 6300 6301 bool operator==(const AccessInfo &RHS) const { 6302 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; 6303 } 6304 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { 6305 if (LHS.I != RHS.I) 6306 return LHS.I < RHS.I; 6307 if (LHS.Ptr != RHS.Ptr) 6308 return LHS.Ptr < RHS.Ptr; 6309 if (LHS.Kind != RHS.Kind) 6310 return LHS.Kind < RHS.Kind; 6311 return false; 6312 } 6313 }; 6314 6315 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the 6316 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. 6317 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; 6318 AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()]; 6319 6320 /// Return the kind(s) of location that may be accessed by \p V. 6321 AAMemoryLocation::MemoryLocationsKind 6322 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); 6323 6324 /// Return the access kind as determined by \p I. 6325 AccessKind getAccessKindFromInst(const Instruction *I) { 6326 AccessKind AK = READ_WRITE; 6327 if (I) { 6328 AK = I->mayReadFromMemory() ? READ : NONE; 6329 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); 6330 } 6331 return AK; 6332 } 6333 6334 /// Update the state \p State and the AccessKind2Accesses given that \p I is 6335 /// an access of kind \p AK to a \p MLK memory location with the access 6336 /// pointer \p Ptr. 6337 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, 6338 MemoryLocationsKind MLK, const Instruction *I, 6339 const Value *Ptr, bool &Changed, 6340 AccessKind AK = READ_WRITE) { 6341 6342 assert(isPowerOf2_32(MLK) && "Expected a single location set!"); 6343 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)]; 6344 if (!Accesses) 6345 Accesses = new (Allocator) AccessSet(); 6346 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second; 6347 State.removeAssumedBits(MLK); 6348 } 6349 6350 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or 6351 /// arguments, and update the state and access map accordingly. 6352 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, 6353 AAMemoryLocation::StateType &State, bool &Changed); 6354 6355 /// Used to allocate access sets. 6356 BumpPtrAllocator &Allocator; 6357 6358 /// The set of IR attributes AAMemoryLocation deals with. 6359 static const Attribute::AttrKind AttrKinds[4]; 6360 }; 6361 6362 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = { 6363 Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly, 6364 Attribute::InaccessibleMemOrArgMemOnly}; 6365 6366 void AAMemoryLocationImpl::categorizePtrValue( 6367 Attributor &A, const Instruction &I, const Value &Ptr, 6368 AAMemoryLocation::StateType &State, bool &Changed) { 6369 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for " 6370 << Ptr << " [" 6371 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n"); 6372 6373 auto StripGEPCB = [](Value *V) -> Value * { 6374 auto *GEP = dyn_cast<GEPOperator>(V); 6375 while (GEP) { 6376 V = GEP->getPointerOperand(); 6377 GEP = dyn_cast<GEPOperator>(V); 6378 } 6379 return V; 6380 }; 6381 6382 auto VisitValueCB = [&](Value &V, const Instruction *, 6383 AAMemoryLocation::StateType &T, 6384 bool Stripped) -> bool { 6385 MemoryLocationsKind MLK = NO_LOCATIONS; 6386 assert(!isa<GEPOperator>(V) && "GEPs should have been stripped."); 6387 if (isa<UndefValue>(V)) 6388 return true; 6389 if (auto *Arg = dyn_cast<Argument>(&V)) { 6390 if (Arg->hasByValAttr()) 6391 MLK = NO_LOCAL_MEM; 6392 else 6393 MLK = NO_ARGUMENT_MEM; 6394 } else if (auto *GV = dyn_cast<GlobalValue>(&V)) { 6395 if (GV->hasLocalLinkage()) 6396 MLK = NO_GLOBAL_INTERNAL_MEM; 6397 else 6398 MLK = NO_GLOBAL_EXTERNAL_MEM; 6399 } else if (isa<ConstantPointerNull>(V) && 6400 !NullPointerIsDefined(getAssociatedFunction(), 6401 V.getType()->getPointerAddressSpace())) { 6402 return true; 6403 } else if (isa<AllocaInst>(V)) { 6404 MLK = NO_LOCAL_MEM; 6405 } else if (const auto *CB = dyn_cast<CallBase>(&V)) { 6406 const auto &NoAliasAA = 6407 A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB)); 6408 if (NoAliasAA.isAssumedNoAlias()) 6409 MLK = NO_MALLOCED_MEM; 6410 else 6411 MLK = NO_UNKOWN_MEM; 6412 } else { 6413 MLK = NO_UNKOWN_MEM; 6414 } 6415 6416 assert(MLK != NO_LOCATIONS && "No location specified!"); 6417 updateStateAndAccessesMap(T, MLK, &I, &V, Changed, 6418 getAccessKindFromInst(&I)); 6419 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: " 6420 << V << " -> " << getMemoryLocationsAsStr(T.getAssumed()) 6421 << "\n"); 6422 return true; 6423 }; 6424 6425 if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>( 6426 A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(), 6427 /* UseValueSimplify */ true, 6428 /* MaxValues */ 32, StripGEPCB)) { 6429 LLVM_DEBUG( 6430 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"); 6431 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed, 6432 getAccessKindFromInst(&I)); 6433 } else { 6434 LLVM_DEBUG( 6435 dbgs() 6436 << "[AAMemoryLocation] Accessed locations with pointer locations: " 6437 << getMemoryLocationsAsStr(State.getAssumed()) << "\n"); 6438 } 6439 } 6440 6441 AAMemoryLocation::MemoryLocationsKind 6442 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, 6443 bool &Changed) { 6444 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for " 6445 << I << "\n"); 6446 6447 AAMemoryLocation::StateType AccessedLocs; 6448 AccessedLocs.intersectAssumedBits(NO_LOCATIONS); 6449 6450 if (auto *CB = dyn_cast<CallBase>(&I)) { 6451 6452 // First check if we assume any memory is access is visible. 6453 const auto &CBMemLocationAA = 6454 A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB)); 6455 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I 6456 << " [" << CBMemLocationAA << "]\n"); 6457 6458 if (CBMemLocationAA.isAssumedReadNone()) 6459 return NO_LOCATIONS; 6460 6461 if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) { 6462 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr, 6463 Changed, getAccessKindFromInst(&I)); 6464 return AccessedLocs.getAssumed(); 6465 } 6466 6467 uint32_t CBAssumedNotAccessedLocs = 6468 CBMemLocationAA.getAssumedNotAccessedLocation(); 6469 6470 // Set the argmemonly and global bit as we handle them separately below. 6471 uint32_t CBAssumedNotAccessedLocsNoArgMem = 6472 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; 6473 6474 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { 6475 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) 6476 continue; 6477 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed, 6478 getAccessKindFromInst(&I)); 6479 } 6480 6481 // Now handle global memory if it might be accessed. This is slightly tricky 6482 // as NO_GLOBAL_MEM has multiple bits set. 6483 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); 6484 if (HasGlobalAccesses) { 6485 auto AccessPred = [&](const Instruction *, const Value *Ptr, 6486 AccessKind Kind, MemoryLocationsKind MLK) { 6487 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed, 6488 getAccessKindFromInst(&I)); 6489 return true; 6490 }; 6491 if (!CBMemLocationAA.checkForAllAccessesToMemoryKind( 6492 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false))) 6493 return AccessedLocs.getWorstState(); 6494 } 6495 6496 LLVM_DEBUG( 6497 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " 6498 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 6499 6500 // Now handle argument memory if it might be accessed. 6501 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); 6502 if (HasArgAccesses) { 6503 for (unsigned ArgNo = 0, E = CB->getNumArgOperands(); ArgNo < E; 6504 ++ArgNo) { 6505 6506 // Skip non-pointer arguments. 6507 const Value *ArgOp = CB->getArgOperand(ArgNo); 6508 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 6509 continue; 6510 6511 // Skip readnone arguments. 6512 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(*CB, ArgNo); 6513 const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>( 6514 *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL); 6515 6516 if (ArgOpMemLocationAA.isAssumedReadNone()) 6517 continue; 6518 6519 // Categorize potentially accessed pointer arguments as if there was an 6520 // access instruction with them as pointer. 6521 categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed); 6522 } 6523 } 6524 6525 LLVM_DEBUG( 6526 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " 6527 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 6528 6529 return AccessedLocs.getAssumed(); 6530 } 6531 6532 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) { 6533 LLVM_DEBUG( 6534 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " 6535 << I << " [" << *Ptr << "]\n"); 6536 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed); 6537 return AccessedLocs.getAssumed(); 6538 } 6539 6540 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " 6541 << I << "\n"); 6542 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed, 6543 getAccessKindFromInst(&I)); 6544 return AccessedLocs.getAssumed(); 6545 } 6546 6547 /// An AA to represent the memory behavior function attributes. 6548 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { 6549 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) 6550 : AAMemoryLocationImpl(IRP, A) {} 6551 6552 /// See AbstractAttribute::updateImpl(Attributor &A). 6553 virtual ChangeStatus updateImpl(Attributor &A) override { 6554 6555 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 6556 *this, getIRPosition(), /* TrackDependence */ false); 6557 if (MemBehaviorAA.isAssumedReadNone()) { 6558 if (MemBehaviorAA.isKnownReadNone()) 6559 return indicateOptimisticFixpoint(); 6560 assert(isAssumedReadNone() && 6561 "AAMemoryLocation was not read-none but AAMemoryBehavior was!"); 6562 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 6563 return ChangeStatus::UNCHANGED; 6564 } 6565 6566 // The current assumed state used to determine a change. 6567 auto AssumedState = getAssumed(); 6568 bool Changed = false; 6569 6570 auto CheckRWInst = [&](Instruction &I) { 6571 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); 6572 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I 6573 << ": " << getMemoryLocationsAsStr(MLK) << "\n"); 6574 removeAssumedBits(inverseLocation(MLK, false, false)); 6575 return true; 6576 }; 6577 6578 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this)) 6579 return indicatePessimisticFixpoint(); 6580 6581 Changed |= AssumedState != getAssumed(); 6582 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 6583 } 6584 6585 /// See AbstractAttribute::trackStatistics() 6586 void trackStatistics() const override { 6587 if (isAssumedReadNone()) 6588 STATS_DECLTRACK_FN_ATTR(readnone) 6589 else if (isAssumedArgMemOnly()) 6590 STATS_DECLTRACK_FN_ATTR(argmemonly) 6591 else if (isAssumedInaccessibleMemOnly()) 6592 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly) 6593 else if (isAssumedInaccessibleOrArgMemOnly()) 6594 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly) 6595 } 6596 }; 6597 6598 /// AAMemoryLocation attribute for call sites. 6599 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { 6600 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) 6601 : AAMemoryLocationImpl(IRP, A) {} 6602 6603 /// See AbstractAttribute::initialize(...). 6604 void initialize(Attributor &A) override { 6605 AAMemoryLocationImpl::initialize(A); 6606 Function *F = getAssociatedFunction(); 6607 if (!F || !A.isFunctionIPOAmendable(*F)) { 6608 indicatePessimisticFixpoint(); 6609 return; 6610 } 6611 } 6612 6613 /// See AbstractAttribute::updateImpl(...). 6614 ChangeStatus updateImpl(Attributor &A) override { 6615 // TODO: Once we have call site specific value information we can provide 6616 // call site specific liveness liveness information and then it makes 6617 // sense to specialize attributes for call sites arguments instead of 6618 // redirecting requests to the callee argument. 6619 Function *F = getAssociatedFunction(); 6620 const IRPosition &FnPos = IRPosition::function(*F); 6621 auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos); 6622 bool Changed = false; 6623 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 6624 AccessKind Kind, MemoryLocationsKind MLK) { 6625 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed, 6626 getAccessKindFromInst(I)); 6627 return true; 6628 }; 6629 if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS)) 6630 return indicatePessimisticFixpoint(); 6631 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 6632 } 6633 6634 /// See AbstractAttribute::trackStatistics() 6635 void trackStatistics() const override { 6636 if (isAssumedReadNone()) 6637 STATS_DECLTRACK_CS_ATTR(readnone) 6638 } 6639 }; 6640 6641 /// ------------------ Value Constant Range Attribute ------------------------- 6642 6643 struct AAValueConstantRangeImpl : AAValueConstantRange { 6644 using StateType = IntegerRangeState; 6645 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) 6646 : AAValueConstantRange(IRP, A) {} 6647 6648 /// See AbstractAttribute::getAsStr(). 6649 const std::string getAsStr() const override { 6650 std::string Str; 6651 llvm::raw_string_ostream OS(Str); 6652 OS << "range(" << getBitWidth() << ")<"; 6653 getKnown().print(OS); 6654 OS << " / "; 6655 getAssumed().print(OS); 6656 OS << ">"; 6657 return OS.str(); 6658 } 6659 6660 /// Helper function to get a SCEV expr for the associated value at program 6661 /// point \p I. 6662 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { 6663 if (!getAnchorScope()) 6664 return nullptr; 6665 6666 ScalarEvolution *SE = 6667 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 6668 *getAnchorScope()); 6669 6670 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( 6671 *getAnchorScope()); 6672 6673 if (!SE || !LI) 6674 return nullptr; 6675 6676 const SCEV *S = SE->getSCEV(&getAssociatedValue()); 6677 if (!I) 6678 return S; 6679 6680 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent())); 6681 } 6682 6683 /// Helper function to get a range from SCEV for the associated value at 6684 /// program point \p I. 6685 ConstantRange getConstantRangeFromSCEV(Attributor &A, 6686 const Instruction *I = nullptr) const { 6687 if (!getAnchorScope()) 6688 return getWorstState(getBitWidth()); 6689 6690 ScalarEvolution *SE = 6691 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 6692 *getAnchorScope()); 6693 6694 const SCEV *S = getSCEV(A, I); 6695 if (!SE || !S) 6696 return getWorstState(getBitWidth()); 6697 6698 return SE->getUnsignedRange(S); 6699 } 6700 6701 /// Helper function to get a range from LVI for the associated value at 6702 /// program point \p I. 6703 ConstantRange 6704 getConstantRangeFromLVI(Attributor &A, 6705 const Instruction *CtxI = nullptr) const { 6706 if (!getAnchorScope()) 6707 return getWorstState(getBitWidth()); 6708 6709 LazyValueInfo *LVI = 6710 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( 6711 *getAnchorScope()); 6712 6713 if (!LVI || !CtxI) 6714 return getWorstState(getBitWidth()); 6715 return LVI->getConstantRange(&getAssociatedValue(), 6716 const_cast<BasicBlock *>(CtxI->getParent()), 6717 const_cast<Instruction *>(CtxI)); 6718 } 6719 6720 /// See AAValueConstantRange::getKnownConstantRange(..). 6721 ConstantRange 6722 getKnownConstantRange(Attributor &A, 6723 const Instruction *CtxI = nullptr) const override { 6724 if (!CtxI || CtxI == getCtxI()) 6725 return getKnown(); 6726 6727 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 6728 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 6729 return getKnown().intersectWith(SCEVR).intersectWith(LVIR); 6730 } 6731 6732 /// See AAValueConstantRange::getAssumedConstantRange(..). 6733 ConstantRange 6734 getAssumedConstantRange(Attributor &A, 6735 const Instruction *CtxI = nullptr) const override { 6736 // TODO: Make SCEV use Attributor assumption. 6737 // We may be able to bound a variable range via assumptions in 6738 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to 6739 // evolve to x^2 + x, then we can say that y is in [2, 12]. 6740 6741 if (!CtxI || CtxI == getCtxI()) 6742 return getAssumed(); 6743 6744 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 6745 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 6746 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR); 6747 } 6748 6749 /// See AbstractAttribute::initialize(..). 6750 void initialize(Attributor &A) override { 6751 // Intersect a range given by SCEV. 6752 intersectKnown(getConstantRangeFromSCEV(A, getCtxI())); 6753 6754 // Intersect a range given by LVI. 6755 intersectKnown(getConstantRangeFromLVI(A, getCtxI())); 6756 } 6757 6758 /// Helper function to create MDNode for range metadata. 6759 static MDNode * 6760 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, 6761 const ConstantRange &AssumedConstantRange) { 6762 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get( 6763 Ty, AssumedConstantRange.getLower())), 6764 ConstantAsMetadata::get(ConstantInt::get( 6765 Ty, AssumedConstantRange.getUpper()))}; 6766 return MDNode::get(Ctx, LowAndHigh); 6767 } 6768 6769 /// Return true if \p Assumed is included in \p KnownRanges. 6770 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { 6771 6772 if (Assumed.isFullSet()) 6773 return false; 6774 6775 if (!KnownRanges) 6776 return true; 6777 6778 // If multiple ranges are annotated in IR, we give up to annotate assumed 6779 // range for now. 6780 6781 // TODO: If there exists a known range which containts assumed range, we 6782 // can say assumed range is better. 6783 if (KnownRanges->getNumOperands() > 2) 6784 return false; 6785 6786 ConstantInt *Lower = 6787 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0)); 6788 ConstantInt *Upper = 6789 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1)); 6790 6791 ConstantRange Known(Lower->getValue(), Upper->getValue()); 6792 return Known.contains(Assumed) && Known != Assumed; 6793 } 6794 6795 /// Helper function to set range metadata. 6796 static bool 6797 setRangeMetadataIfisBetterRange(Instruction *I, 6798 const ConstantRange &AssumedConstantRange) { 6799 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range); 6800 if (isBetterRange(AssumedConstantRange, OldRangeMD)) { 6801 if (!AssumedConstantRange.isEmptySet()) { 6802 I->setMetadata(LLVMContext::MD_range, 6803 getMDNodeForConstantRange(I->getType(), I->getContext(), 6804 AssumedConstantRange)); 6805 return true; 6806 } 6807 } 6808 return false; 6809 } 6810 6811 /// See AbstractAttribute::manifest() 6812 ChangeStatus manifest(Attributor &A) override { 6813 ChangeStatus Changed = ChangeStatus::UNCHANGED; 6814 ConstantRange AssumedConstantRange = getAssumedConstantRange(A); 6815 assert(!AssumedConstantRange.isFullSet() && "Invalid state"); 6816 6817 auto &V = getAssociatedValue(); 6818 if (!AssumedConstantRange.isEmptySet() && 6819 !AssumedConstantRange.isSingleElement()) { 6820 if (Instruction *I = dyn_cast<Instruction>(&V)) 6821 if (isa<CallInst>(I) || isa<LoadInst>(I)) 6822 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) 6823 Changed = ChangeStatus::CHANGED; 6824 } 6825 6826 return Changed; 6827 } 6828 }; 6829 6830 struct AAValueConstantRangeArgument final 6831 : AAArgumentFromCallSiteArguments< 6832 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> { 6833 using Base = AAArgumentFromCallSiteArguments< 6834 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>; 6835 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) 6836 : Base(IRP, A) {} 6837 6838 /// See AbstractAttribute::initialize(..). 6839 void initialize(Attributor &A) override { 6840 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 6841 indicatePessimisticFixpoint(); 6842 } else { 6843 Base::initialize(A); 6844 } 6845 } 6846 6847 /// See AbstractAttribute::trackStatistics() 6848 void trackStatistics() const override { 6849 STATS_DECLTRACK_ARG_ATTR(value_range) 6850 } 6851 }; 6852 6853 struct AAValueConstantRangeReturned 6854 : AAReturnedFromReturnedValues<AAValueConstantRange, 6855 AAValueConstantRangeImpl> { 6856 using Base = AAReturnedFromReturnedValues<AAValueConstantRange, 6857 AAValueConstantRangeImpl>; 6858 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) 6859 : Base(IRP, A) {} 6860 6861 /// See AbstractAttribute::initialize(...). 6862 void initialize(Attributor &A) override {} 6863 6864 /// See AbstractAttribute::trackStatistics() 6865 void trackStatistics() const override { 6866 STATS_DECLTRACK_FNRET_ATTR(value_range) 6867 } 6868 }; 6869 6870 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { 6871 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) 6872 : AAValueConstantRangeImpl(IRP, A) {} 6873 6874 /// See AbstractAttribute::initialize(...). 6875 void initialize(Attributor &A) override { 6876 AAValueConstantRangeImpl::initialize(A); 6877 Value &V = getAssociatedValue(); 6878 6879 if (auto *C = dyn_cast<ConstantInt>(&V)) { 6880 unionAssumed(ConstantRange(C->getValue())); 6881 indicateOptimisticFixpoint(); 6882 return; 6883 } 6884 6885 if (isa<UndefValue>(&V)) { 6886 // Collapse the undef state to 0. 6887 unionAssumed(ConstantRange(APInt(getBitWidth(), 0))); 6888 indicateOptimisticFixpoint(); 6889 return; 6890 } 6891 6892 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V)) 6893 return; 6894 // If it is a load instruction with range metadata, use it. 6895 if (LoadInst *LI = dyn_cast<LoadInst>(&V)) 6896 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) { 6897 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 6898 return; 6899 } 6900 6901 // We can work with PHI and select instruction as we traverse their operands 6902 // during update. 6903 if (isa<SelectInst>(V) || isa<PHINode>(V)) 6904 return; 6905 6906 // Otherwise we give up. 6907 indicatePessimisticFixpoint(); 6908 6909 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: " 6910 << getAssociatedValue() << "\n"); 6911 } 6912 6913 bool calculateBinaryOperator( 6914 Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, 6915 const Instruction *CtxI, 6916 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 6917 Value *LHS = BinOp->getOperand(0); 6918 Value *RHS = BinOp->getOperand(1); 6919 // TODO: Allow non integers as well. 6920 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 6921 return false; 6922 6923 auto &LHSAA = 6924 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS)); 6925 QuerriedAAs.push_back(&LHSAA); 6926 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 6927 6928 auto &RHSAA = 6929 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS)); 6930 QuerriedAAs.push_back(&RHSAA); 6931 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 6932 6933 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange); 6934 6935 T.unionAssumed(AssumedRange); 6936 6937 // TODO: Track a known state too. 6938 6939 return T.isValidState(); 6940 } 6941 6942 bool calculateCastInst( 6943 Attributor &A, CastInst *CastI, IntegerRangeState &T, 6944 const Instruction *CtxI, 6945 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 6946 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!"); 6947 // TODO: Allow non integers as well. 6948 Value &OpV = *CastI->getOperand(0); 6949 if (!OpV.getType()->isIntegerTy()) 6950 return false; 6951 6952 auto &OpAA = 6953 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV)); 6954 QuerriedAAs.push_back(&OpAA); 6955 T.unionAssumed( 6956 OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth())); 6957 return T.isValidState(); 6958 } 6959 6960 bool 6961 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, 6962 const Instruction *CtxI, 6963 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 6964 Value *LHS = CmpI->getOperand(0); 6965 Value *RHS = CmpI->getOperand(1); 6966 // TODO: Allow non integers as well. 6967 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 6968 return false; 6969 6970 auto &LHSAA = 6971 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS)); 6972 QuerriedAAs.push_back(&LHSAA); 6973 auto &RHSAA = 6974 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS)); 6975 QuerriedAAs.push_back(&RHSAA); 6976 6977 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 6978 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 6979 6980 // If one of them is empty set, we can't decide. 6981 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) 6982 return true; 6983 6984 bool MustTrue = false, MustFalse = false; 6985 6986 auto AllowedRegion = 6987 ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange); 6988 6989 auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion( 6990 CmpI->getPredicate(), RHSAARange); 6991 6992 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet()) 6993 MustFalse = true; 6994 6995 if (SatisfyingRegion.contains(LHSAARange)) 6996 MustTrue = true; 6997 6998 assert((!MustTrue || !MustFalse) && 6999 "Either MustTrue or MustFalse should be false!"); 7000 7001 if (MustTrue) 7002 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); 7003 else if (MustFalse) 7004 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); 7005 else 7006 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); 7007 7008 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA 7009 << " " << RHSAA << "\n"); 7010 7011 // TODO: Track a known state too. 7012 return T.isValidState(); 7013 } 7014 7015 /// See AbstractAttribute::updateImpl(...). 7016 ChangeStatus updateImpl(Attributor &A) override { 7017 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 7018 IntegerRangeState &T, bool Stripped) -> bool { 7019 Instruction *I = dyn_cast<Instruction>(&V); 7020 if (!I || isa<CallBase>(I)) { 7021 7022 // If the value is not instruction, we query AA to Attributor. 7023 const auto &AA = 7024 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V)); 7025 7026 // Clamp operator is not used to utilize a program point CtxI. 7027 T.unionAssumed(AA.getAssumedConstantRange(A, CtxI)); 7028 7029 return T.isValidState(); 7030 } 7031 7032 SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; 7033 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) { 7034 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) 7035 return false; 7036 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) { 7037 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) 7038 return false; 7039 } else if (auto *CastI = dyn_cast<CastInst>(I)) { 7040 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) 7041 return false; 7042 } else { 7043 // Give up with other instructions. 7044 // TODO: Add other instructions 7045 7046 T.indicatePessimisticFixpoint(); 7047 return false; 7048 } 7049 7050 // Catch circular reasoning in a pessimistic way for now. 7051 // TODO: Check how the range evolves and if we stripped anything, see also 7052 // AADereferenceable or AAAlign for similar situations. 7053 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { 7054 if (QueriedAA != this) 7055 continue; 7056 // If we are in a stady state we do not need to worry. 7057 if (T.getAssumed() == getState().getAssumed()) 7058 continue; 7059 T.indicatePessimisticFixpoint(); 7060 } 7061 7062 return T.isValidState(); 7063 }; 7064 7065 IntegerRangeState T(getBitWidth()); 7066 7067 if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>( 7068 A, getIRPosition(), *this, T, VisitValueCB, getCtxI(), 7069 /* UseValueSimplify */ false)) 7070 return indicatePessimisticFixpoint(); 7071 7072 return clampStateAndIndicateChange(getState(), T); 7073 } 7074 7075 /// See AbstractAttribute::trackStatistics() 7076 void trackStatistics() const override { 7077 STATS_DECLTRACK_FLOATING_ATTR(value_range) 7078 } 7079 }; 7080 7081 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { 7082 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) 7083 : AAValueConstantRangeImpl(IRP, A) {} 7084 7085 /// See AbstractAttribute::initialize(...). 7086 ChangeStatus updateImpl(Attributor &A) override { 7087 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will " 7088 "not be called"); 7089 } 7090 7091 /// See AbstractAttribute::trackStatistics() 7092 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) } 7093 }; 7094 7095 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { 7096 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) 7097 : AAValueConstantRangeFunction(IRP, A) {} 7098 7099 /// See AbstractAttribute::trackStatistics() 7100 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) } 7101 }; 7102 7103 struct AAValueConstantRangeCallSiteReturned 7104 : AACallSiteReturnedFromReturned<AAValueConstantRange, 7105 AAValueConstantRangeImpl> { 7106 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) 7107 : AACallSiteReturnedFromReturned<AAValueConstantRange, 7108 AAValueConstantRangeImpl>(IRP, A) {} 7109 7110 /// See AbstractAttribute::initialize(...). 7111 void initialize(Attributor &A) override { 7112 // If it is a load instruction with range metadata, use the metadata. 7113 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue())) 7114 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range)) 7115 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 7116 7117 AAValueConstantRangeImpl::initialize(A); 7118 } 7119 7120 /// See AbstractAttribute::trackStatistics() 7121 void trackStatistics() const override { 7122 STATS_DECLTRACK_CSRET_ATTR(value_range) 7123 } 7124 }; 7125 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { 7126 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) 7127 : AAValueConstantRangeFloating(IRP, A) {} 7128 7129 /// See AbstractAttribute::trackStatistics() 7130 void trackStatistics() const override { 7131 STATS_DECLTRACK_CSARG_ATTR(value_range) 7132 } 7133 }; 7134 } // namespace 7135 7136 const char AAReturnedValues::ID = 0; 7137 const char AANoUnwind::ID = 0; 7138 const char AANoSync::ID = 0; 7139 const char AANoFree::ID = 0; 7140 const char AANonNull::ID = 0; 7141 const char AANoRecurse::ID = 0; 7142 const char AAWillReturn::ID = 0; 7143 const char AAUndefinedBehavior::ID = 0; 7144 const char AANoAlias::ID = 0; 7145 const char AAReachability::ID = 0; 7146 const char AANoReturn::ID = 0; 7147 const char AAIsDead::ID = 0; 7148 const char AADereferenceable::ID = 0; 7149 const char AAAlign::ID = 0; 7150 const char AANoCapture::ID = 0; 7151 const char AAValueSimplify::ID = 0; 7152 const char AAHeapToStack::ID = 0; 7153 const char AAPrivatizablePtr::ID = 0; 7154 const char AAMemoryBehavior::ID = 0; 7155 const char AAMemoryLocation::ID = 0; 7156 const char AAValueConstantRange::ID = 0; 7157 7158 // Macro magic to create the static generator function for attributes that 7159 // follow the naming scheme. 7160 7161 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ 7162 case IRPosition::PK: \ 7163 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); 7164 7165 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ 7166 case IRPosition::PK: \ 7167 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ 7168 ++NumAAs; \ 7169 break; 7170 7171 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7172 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7173 CLASS *AA = nullptr; \ 7174 switch (IRP.getPositionKind()) { \ 7175 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7176 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 7177 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 7178 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 7179 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 7180 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 7181 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7182 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 7183 } \ 7184 return *AA; \ 7185 } 7186 7187 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7188 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7189 CLASS *AA = nullptr; \ 7190 switch (IRP.getPositionKind()) { \ 7191 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7192 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ 7193 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 7194 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 7195 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 7196 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 7197 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 7198 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 7199 } \ 7200 return *AA; \ 7201 } 7202 7203 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7204 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7205 CLASS *AA = nullptr; \ 7206 switch (IRP.getPositionKind()) { \ 7207 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7208 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7209 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 7210 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 7211 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 7212 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 7213 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 7214 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 7215 } \ 7216 return *AA; \ 7217 } 7218 7219 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7220 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7221 CLASS *AA = nullptr; \ 7222 switch (IRP.getPositionKind()) { \ 7223 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7224 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 7225 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 7226 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 7227 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 7228 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 7229 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 7230 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7231 } \ 7232 return *AA; \ 7233 } 7234 7235 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7236 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7237 CLASS *AA = nullptr; \ 7238 switch (IRP.getPositionKind()) { \ 7239 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7240 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 7241 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7242 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 7243 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 7244 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 7245 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 7246 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 7247 } \ 7248 return *AA; \ 7249 } 7250 7251 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) 7252 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) 7253 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) 7254 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) 7255 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) 7256 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) 7257 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) 7258 7259 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) 7260 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) 7261 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) 7262 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) 7263 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) 7264 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) 7265 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) 7266 7267 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) 7268 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) 7269 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) 7270 7271 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) 7272 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability) 7273 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) 7274 7275 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) 7276 7277 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION 7278 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION 7279 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION 7280 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION 7281 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION 7282 #undef SWITCH_PK_CREATE 7283 #undef SWITCH_PK_INV 7284