1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // See the Attributor.h file comment and the class descriptions in that file for 10 // more information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/IPO/Attributor.h" 15 16 #include "llvm/ADT/SmallPtrSet.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/Analysis/AssumeBundleQueries.h" 19 #include "llvm/Analysis/CaptureTracking.h" 20 #include "llvm/Analysis/LazyValueInfo.h" 21 #include "llvm/Analysis/MemoryBuiltins.h" 22 #include "llvm/Analysis/ScalarEvolution.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/IR/IRBuilder.h" 25 #include "llvm/IR/IntrinsicInst.h" 26 #include "llvm/IR/NoFolder.h" 27 #include "llvm/Transforms/IPO/ArgumentPromotion.h" 28 #include "llvm/Transforms/Utils/Local.h" 29 30 #include <cassert> 31 32 using namespace llvm; 33 34 #define DEBUG_TYPE "attributor" 35 36 static cl::opt<bool> ManifestInternal( 37 "attributor-manifest-internal", cl::Hidden, 38 cl::desc("Manifest Attributor internal string attributes."), 39 cl::init(false)); 40 41 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), 42 cl::Hidden); 43 44 STATISTIC(NumAAs, "Number of abstract attributes created"); 45 46 // Some helper macros to deal with statistics tracking. 47 // 48 // Usage: 49 // For simple IR attribute tracking overload trackStatistics in the abstract 50 // attribute and choose the right STATS_DECLTRACK_********* macro, 51 // e.g.,: 52 // void trackStatistics() const override { 53 // STATS_DECLTRACK_ARG_ATTR(returned) 54 // } 55 // If there is a single "increment" side one can use the macro 56 // STATS_DECLTRACK with a custom message. If there are multiple increment 57 // sides, STATS_DECL and STATS_TRACK can also be used separately. 58 // 59 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ 60 ("Number of " #TYPE " marked '" #NAME "'") 61 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME 62 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); 63 #define STATS_DECL(NAME, TYPE, MSG) \ 64 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); 65 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); 66 #define STATS_DECLTRACK(NAME, TYPE, MSG) \ 67 { \ 68 STATS_DECL(NAME, TYPE, MSG) \ 69 STATS_TRACK(NAME, TYPE) \ 70 } 71 #define STATS_DECLTRACK_ARG_ATTR(NAME) \ 72 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) 73 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ 74 STATS_DECLTRACK(NAME, CSArguments, \ 75 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) 76 #define STATS_DECLTRACK_FN_ATTR(NAME) \ 77 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) 78 #define STATS_DECLTRACK_CS_ATTR(NAME) \ 79 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) 80 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ 81 STATS_DECLTRACK(NAME, FunctionReturn, \ 82 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) 83 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ 84 STATS_DECLTRACK(NAME, CSReturn, \ 85 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) 86 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ 87 STATS_DECLTRACK(NAME, Floating, \ 88 ("Number of floating values known to be '" #NAME "'")) 89 90 // Specialization of the operator<< for abstract attributes subclasses. This 91 // disambiguates situations where multiple operators are applicable. 92 namespace llvm { 93 #define PIPE_OPERATOR(CLASS) \ 94 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ 95 return OS << static_cast<const AbstractAttribute &>(AA); \ 96 } 97 98 PIPE_OPERATOR(AAIsDead) 99 PIPE_OPERATOR(AANoUnwind) 100 PIPE_OPERATOR(AANoSync) 101 PIPE_OPERATOR(AANoRecurse) 102 PIPE_OPERATOR(AAWillReturn) 103 PIPE_OPERATOR(AANoReturn) 104 PIPE_OPERATOR(AAReturnedValues) 105 PIPE_OPERATOR(AANonNull) 106 PIPE_OPERATOR(AANoAlias) 107 PIPE_OPERATOR(AADereferenceable) 108 PIPE_OPERATOR(AAAlign) 109 PIPE_OPERATOR(AANoCapture) 110 PIPE_OPERATOR(AAValueSimplify) 111 PIPE_OPERATOR(AANoFree) 112 PIPE_OPERATOR(AAHeapToStack) 113 PIPE_OPERATOR(AAReachability) 114 PIPE_OPERATOR(AAMemoryBehavior) 115 PIPE_OPERATOR(AAMemoryLocation) 116 PIPE_OPERATOR(AAValueConstantRange) 117 PIPE_OPERATOR(AAPrivatizablePtr) 118 PIPE_OPERATOR(AAUndefinedBehavior) 119 120 #undef PIPE_OPERATOR 121 } // namespace llvm 122 123 namespace { 124 125 static Optional<ConstantInt *> 126 getAssumedConstantInt(Attributor &A, const Value &V, 127 const AbstractAttribute &AA, 128 bool &UsedAssumedInformation) { 129 Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation); 130 if (C.hasValue()) 131 return dyn_cast_or_null<ConstantInt>(C.getValue()); 132 return llvm::None; 133 } 134 135 /// Get pointer operand of memory accessing instruction. If \p I is 136 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, 137 /// is set to false and the instruction is volatile, return nullptr. 138 static const Value *getPointerOperand(const Instruction *I, 139 bool AllowVolatile) { 140 if (auto *LI = dyn_cast<LoadInst>(I)) { 141 if (!AllowVolatile && LI->isVolatile()) 142 return nullptr; 143 return LI->getPointerOperand(); 144 } 145 146 if (auto *SI = dyn_cast<StoreInst>(I)) { 147 if (!AllowVolatile && SI->isVolatile()) 148 return nullptr; 149 return SI->getPointerOperand(); 150 } 151 152 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { 153 if (!AllowVolatile && CXI->isVolatile()) 154 return nullptr; 155 return CXI->getPointerOperand(); 156 } 157 158 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { 159 if (!AllowVolatile && RMWI->isVolatile()) 160 return nullptr; 161 return RMWI->getPointerOperand(); 162 } 163 164 return nullptr; 165 } 166 167 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and 168 /// advanced by \p Offset bytes. To aid later analysis the method tries to build 169 /// getelement pointer instructions that traverse the natural type of \p Ptr if 170 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence 171 /// through a cast to i8*. 172 /// 173 /// TODO: This could probably live somewhere more prominantly if it doesn't 174 /// already exist. 175 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset, 176 IRBuilder<NoFolder> &IRB, const DataLayout &DL) { 177 assert(Offset >= 0 && "Negative offset not supported yet!"); 178 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset 179 << "-bytes as " << *ResTy << "\n"); 180 181 // The initial type we are trying to traverse to get nice GEPs. 182 Type *Ty = Ptr->getType(); 183 184 SmallVector<Value *, 4> Indices; 185 std::string GEPName = Ptr->getName().str(); 186 while (Offset) { 187 uint64_t Idx, Rem; 188 189 if (auto *STy = dyn_cast<StructType>(Ty)) { 190 const StructLayout *SL = DL.getStructLayout(STy); 191 if (int64_t(SL->getSizeInBytes()) < Offset) 192 break; 193 Idx = SL->getElementContainingOffset(Offset); 194 assert(Idx < STy->getNumElements() && "Offset calculation error!"); 195 Rem = Offset - SL->getElementOffset(Idx); 196 Ty = STy->getElementType(Idx); 197 } else if (auto *PTy = dyn_cast<PointerType>(Ty)) { 198 Ty = PTy->getElementType(); 199 if (!Ty->isSized()) 200 break; 201 uint64_t ElementSize = DL.getTypeAllocSize(Ty); 202 assert(ElementSize && "Expected type with size!"); 203 Idx = Offset / ElementSize; 204 Rem = Offset % ElementSize; 205 } else { 206 // Non-aggregate type, we cast and make byte-wise progress now. 207 break; 208 } 209 210 LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset 211 << " Idx: " << Idx << " Rem: " << Rem << "\n"); 212 213 GEPName += "." + std::to_string(Idx); 214 Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx)); 215 Offset = Rem; 216 } 217 218 // Create a GEP if we collected indices above. 219 if (Indices.size()) 220 Ptr = IRB.CreateGEP(Ptr, Indices, GEPName); 221 222 // If an offset is left we use byte-wise adjustment. 223 if (Offset) { 224 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); 225 Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset), 226 GEPName + ".b" + Twine(Offset)); 227 } 228 229 // Ensure the result has the requested type. 230 Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast"); 231 232 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n"); 233 return Ptr; 234 } 235 236 /// Recursively visit all values that might become \p IRP at some point. This 237 /// will be done by looking through cast instructions, selects, phis, and calls 238 /// with the "returned" attribute. Once we cannot look through the value any 239 /// further, the callback \p VisitValueCB is invoked and passed the current 240 /// value, the \p State, and a flag to indicate if we stripped anything. 241 /// Stripped means that we unpacked the value associated with \p IRP at least 242 /// once. Note that the value used for the callback may still be the value 243 /// associated with \p IRP (due to PHIs). To limit how much effort is invested, 244 /// we will never visit more values than specified by \p MaxValues. 245 template <typename AAType, typename StateTy> 246 static bool genericValueTraversal( 247 Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State, 248 function_ref<bool(Value &, const Instruction *, StateTy &, bool)> 249 VisitValueCB, 250 const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16, 251 function_ref<Value *(Value *)> StripCB = nullptr) { 252 253 const AAIsDead *LivenessAA = nullptr; 254 if (IRP.getAnchorScope()) 255 LivenessAA = &A.getAAFor<AAIsDead>( 256 QueryingAA, IRPosition::function(*IRP.getAnchorScope()), 257 /* TrackDependence */ false); 258 bool AnyDead = false; 259 260 using Item = std::pair<Value *, const Instruction *>; 261 SmallSet<Item, 16> Visited; 262 SmallVector<Item, 16> Worklist; 263 Worklist.push_back({&IRP.getAssociatedValue(), CtxI}); 264 265 int Iteration = 0; 266 do { 267 Item I = Worklist.pop_back_val(); 268 Value *V = I.first; 269 CtxI = I.second; 270 if (StripCB) 271 V = StripCB(V); 272 273 // Check if we should process the current value. To prevent endless 274 // recursion keep a record of the values we followed! 275 if (!Visited.insert(I).second) 276 continue; 277 278 // Make sure we limit the compile time for complex expressions. 279 if (Iteration++ >= MaxValues) 280 return false; 281 282 // Explicitly look through calls with a "returned" attribute if we do 283 // not have a pointer as stripPointerCasts only works on them. 284 Value *NewV = nullptr; 285 if (V->getType()->isPointerTy()) { 286 NewV = V->stripPointerCasts(); 287 } else { 288 auto *CB = dyn_cast<CallBase>(V); 289 if (CB && CB->getCalledFunction()) { 290 for (Argument &Arg : CB->getCalledFunction()->args()) 291 if (Arg.hasReturnedAttr()) { 292 NewV = CB->getArgOperand(Arg.getArgNo()); 293 break; 294 } 295 } 296 } 297 if (NewV && NewV != V) { 298 Worklist.push_back({NewV, CtxI}); 299 continue; 300 } 301 302 // Look through select instructions, visit both potential values. 303 if (auto *SI = dyn_cast<SelectInst>(V)) { 304 Worklist.push_back({SI->getTrueValue(), CtxI}); 305 Worklist.push_back({SI->getFalseValue(), CtxI}); 306 continue; 307 } 308 309 // Look through phi nodes, visit all live operands. 310 if (auto *PHI = dyn_cast<PHINode>(V)) { 311 assert(LivenessAA && 312 "Expected liveness in the presence of instructions!"); 313 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 314 BasicBlock *IncomingBB = PHI->getIncomingBlock(u); 315 if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA, 316 LivenessAA, 317 /* CheckBBLivenessOnly */ true)) { 318 AnyDead = true; 319 continue; 320 } 321 Worklist.push_back( 322 {PHI->getIncomingValue(u), IncomingBB->getTerminator()}); 323 } 324 continue; 325 } 326 327 if (UseValueSimplify && !isa<Constant>(V)) { 328 bool UsedAssumedInformation = false; 329 Optional<Constant *> C = 330 A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation); 331 if (!C.hasValue()) 332 continue; 333 if (Value *NewV = C.getValue()) { 334 Worklist.push_back({NewV, CtxI}); 335 continue; 336 } 337 } 338 339 // Once a leaf is reached we inform the user through the callback. 340 if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) 341 return false; 342 } while (!Worklist.empty()); 343 344 // If we actually used liveness information so we have to record a dependence. 345 if (AnyDead) 346 A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL); 347 348 // All values have been visited. 349 return true; 350 } 351 352 const Value *stripAndAccumulateMinimalOffsets( 353 Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val, 354 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, 355 bool UseAssumed = false) { 356 357 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { 358 const IRPosition &Pos = IRPosition::value(V); 359 // Only track dependence if we are going to use the assumed info. 360 const AAValueConstantRange &ValueConstantRangeAA = 361 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, 362 /* TrackDependence */ UseAssumed); 363 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() 364 : ValueConstantRangeAA.getKnown(); 365 // We can only use the lower part of the range because the upper part can 366 // be higher than what the value can really be. 367 ROffset = Range.getSignedMin(); 368 return true; 369 }; 370 371 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, 372 AttributorAnalysis); 373 } 374 375 static const Value *getMinimalBaseOfAccsesPointerOperand( 376 Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I, 377 int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) { 378 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); 379 if (!Ptr) 380 return nullptr; 381 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 382 const Value *Base = stripAndAccumulateMinimalOffsets( 383 A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds); 384 385 BytesOffset = OffsetAPInt.getSExtValue(); 386 return Base; 387 } 388 389 static const Value * 390 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset, 391 const DataLayout &DL, 392 bool AllowNonInbounds = false) { 393 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); 394 if (!Ptr) 395 return nullptr; 396 397 return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL, 398 AllowNonInbounds); 399 } 400 401 /// Helper function to clamp a state \p S of type \p StateType with the 402 /// information in \p R and indicate/return if \p S did change (as-in update is 403 /// required to be run again). 404 template <typename StateType> 405 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) { 406 auto Assumed = S.getAssumed(); 407 S ^= R; 408 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 409 : ChangeStatus::CHANGED; 410 } 411 412 /// Clamp the information known for all returned values of a function 413 /// (identified by \p QueryingAA) into \p S. 414 template <typename AAType, typename StateType = typename AAType::StateType> 415 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA, 416 StateType &S) { 417 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " 418 << QueryingAA << " into " << S << "\n"); 419 420 assert((QueryingAA.getIRPosition().getPositionKind() == 421 IRPosition::IRP_RETURNED || 422 QueryingAA.getIRPosition().getPositionKind() == 423 IRPosition::IRP_CALL_SITE_RETURNED) && 424 "Can only clamp returned value states for a function returned or call " 425 "site returned position!"); 426 427 // Use an optional state as there might not be any return values and we want 428 // to join (IntegerState::operator&) the state of all there are. 429 Optional<StateType> T; 430 431 // Callback for each possibly returned value. 432 auto CheckReturnValue = [&](Value &RV) -> bool { 433 const IRPosition &RVPos = IRPosition::value(RV); 434 const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos); 435 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() 436 << " @ " << RVPos << "\n"); 437 const StateType &AAS = static_cast<const StateType &>(AA.getState()); 438 if (T.hasValue()) 439 *T &= AAS; 440 else 441 T = AAS; 442 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T 443 << "\n"); 444 return T->isValidState(); 445 }; 446 447 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) 448 S.indicatePessimisticFixpoint(); 449 else if (T.hasValue()) 450 S ^= *T; 451 } 452 453 /// Helper class for generic deduction: return value -> returned position. 454 template <typename AAType, typename BaseType, 455 typename StateType = typename BaseType::StateType> 456 struct AAReturnedFromReturnedValues : public BaseType { 457 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) 458 : BaseType(IRP, A) {} 459 460 /// See AbstractAttribute::updateImpl(...). 461 ChangeStatus updateImpl(Attributor &A) override { 462 StateType S(StateType::getBestState(this->getState())); 463 clampReturnedValueStates<AAType, StateType>(A, *this, S); 464 // TODO: If we know we visited all returned values, thus no are assumed 465 // dead, we can take the known information from the state T. 466 return clampStateAndIndicateChange<StateType>(this->getState(), S); 467 } 468 }; 469 470 /// Clamp the information known at all call sites for a given argument 471 /// (identified by \p QueryingAA) into \p S. 472 template <typename AAType, typename StateType = typename AAType::StateType> 473 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, 474 StateType &S) { 475 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " 476 << QueryingAA << " into " << S << "\n"); 477 478 assert(QueryingAA.getIRPosition().getPositionKind() == 479 IRPosition::IRP_ARGUMENT && 480 "Can only clamp call site argument states for an argument position!"); 481 482 // Use an optional state as there might not be any return values and we want 483 // to join (IntegerState::operator&) the state of all there are. 484 Optional<StateType> T; 485 486 // The argument number which is also the call site argument number. 487 unsigned ArgNo = QueryingAA.getIRPosition().getArgNo(); 488 489 auto CallSiteCheck = [&](AbstractCallSite ACS) { 490 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 491 // Check if a coresponding argument was found or if it is on not associated 492 // (which can happen for callback calls). 493 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 494 return false; 495 496 const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos); 497 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() 498 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n"); 499 const StateType &AAS = static_cast<const StateType &>(AA.getState()); 500 if (T.hasValue()) 501 *T &= AAS; 502 else 503 T = AAS; 504 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T 505 << "\n"); 506 return T->isValidState(); 507 }; 508 509 bool AllCallSitesKnown; 510 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, 511 AllCallSitesKnown)) 512 S.indicatePessimisticFixpoint(); 513 else if (T.hasValue()) 514 S ^= *T; 515 } 516 517 /// Helper class for generic deduction: call site argument -> argument position. 518 template <typename AAType, typename BaseType, 519 typename StateType = typename AAType::StateType> 520 struct AAArgumentFromCallSiteArguments : public BaseType { 521 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) 522 : BaseType(IRP, A) {} 523 524 /// See AbstractAttribute::updateImpl(...). 525 ChangeStatus updateImpl(Attributor &A) override { 526 StateType S(StateType::getBestState(this->getState())); 527 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); 528 // TODO: If we know we visited all incoming values, thus no are assumed 529 // dead, we can take the known information from the state T. 530 return clampStateAndIndicateChange<StateType>(this->getState(), S); 531 } 532 }; 533 534 /// Helper class for generic replication: function returned -> cs returned. 535 template <typename AAType, typename BaseType, 536 typename StateType = typename BaseType::StateType> 537 struct AACallSiteReturnedFromReturned : public BaseType { 538 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) 539 : BaseType(IRP, A) {} 540 541 /// See AbstractAttribute::updateImpl(...). 542 ChangeStatus updateImpl(Attributor &A) override { 543 assert(this->getIRPosition().getPositionKind() == 544 IRPosition::IRP_CALL_SITE_RETURNED && 545 "Can only wrap function returned positions for call site returned " 546 "positions!"); 547 auto &S = this->getState(); 548 549 const Function *AssociatedFunction = 550 this->getIRPosition().getAssociatedFunction(); 551 if (!AssociatedFunction) 552 return S.indicatePessimisticFixpoint(); 553 554 IRPosition FnPos = IRPosition::returned(*AssociatedFunction); 555 const AAType &AA = A.getAAFor<AAType>(*this, FnPos); 556 return clampStateAndIndicateChange( 557 S, static_cast<const StateType &>(AA.getState())); 558 } 559 }; 560 561 /// Helper function to accumulate uses. 562 template <class AAType, typename StateType = typename AAType::StateType> 563 static void followUsesInContext(AAType &AA, Attributor &A, 564 MustBeExecutedContextExplorer &Explorer, 565 const Instruction *CtxI, 566 SetVector<const Use *> &Uses, 567 StateType &State) { 568 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); 569 for (unsigned u = 0; u < Uses.size(); ++u) { 570 const Use *U = Uses[u]; 571 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { 572 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); 573 if (Found && AA.followUseInMBEC(A, U, UserI, State)) 574 for (const Use &Us : UserI->uses()) 575 Uses.insert(&Us); 576 } 577 } 578 } 579 580 /// Use the must-be-executed-context around \p I to add information into \p S. 581 /// The AAType class is required to have `followUseInMBEC` method with the 582 /// following signature and behaviour: 583 /// 584 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) 585 /// U - Underlying use. 586 /// I - The user of the \p U. 587 /// Returns true if the value should be tracked transitively. 588 /// 589 template <class AAType, typename StateType = typename AAType::StateType> 590 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, 591 Instruction &CtxI) { 592 593 // Container for (transitive) uses of the associated value. 594 SetVector<const Use *> Uses; 595 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) 596 Uses.insert(&U); 597 598 MustBeExecutedContextExplorer &Explorer = 599 A.getInfoCache().getMustBeExecutedContextExplorer(); 600 601 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); 602 603 if (S.isAtFixpoint()) 604 return; 605 606 SmallVector<const BranchInst *, 4> BrInsts; 607 auto Pred = [&](const Instruction *I) { 608 if (const BranchInst *Br = dyn_cast<BranchInst>(I)) 609 if (Br->isConditional()) 610 BrInsts.push_back(Br); 611 return true; 612 }; 613 614 // Here, accumulate conditional branch instructions in the context. We 615 // explore the child paths and collect the known states. The disjunction of 616 // those states can be merged to its own state. Let ParentState_i be a state 617 // to indicate the known information for an i-th branch instruction in the 618 // context. ChildStates are created for its successors respectively. 619 // 620 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} 621 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} 622 // ... 623 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} 624 // 625 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m 626 // 627 // FIXME: Currently, recursive branches are not handled. For example, we 628 // can't deduce that ptr must be dereferenced in below function. 629 // 630 // void f(int a, int c, int *ptr) { 631 // if(a) 632 // if (b) { 633 // *ptr = 0; 634 // } else { 635 // *ptr = 1; 636 // } 637 // else { 638 // if (b) { 639 // *ptr = 0; 640 // } else { 641 // *ptr = 1; 642 // } 643 // } 644 // } 645 646 Explorer.checkForAllContext(&CtxI, Pred); 647 for (const BranchInst *Br : BrInsts) { 648 StateType ParentState; 649 650 // The known state of the parent state is a conjunction of children's 651 // known states so it is initialized with a best state. 652 ParentState.indicateOptimisticFixpoint(); 653 654 for (const BasicBlock *BB : Br->successors()) { 655 StateType ChildState; 656 657 size_t BeforeSize = Uses.size(); 658 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); 659 660 // Erase uses which only appear in the child. 661 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) 662 It = Uses.erase(It); 663 664 ParentState &= ChildState; 665 } 666 667 // Use only known state. 668 S += ParentState; 669 } 670 } 671 672 /// -----------------------NoUnwind Function Attribute-------------------------- 673 674 struct AANoUnwindImpl : AANoUnwind { 675 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} 676 677 const std::string getAsStr() const override { 678 return getAssumed() ? "nounwind" : "may-unwind"; 679 } 680 681 /// See AbstractAttribute::updateImpl(...). 682 ChangeStatus updateImpl(Attributor &A) override { 683 auto Opcodes = { 684 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 685 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 686 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 687 688 auto CheckForNoUnwind = [&](Instruction &I) { 689 if (!I.mayThrow()) 690 return true; 691 692 if (const auto *CB = dyn_cast<CallBase>(&I)) { 693 const auto &NoUnwindAA = 694 A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB)); 695 return NoUnwindAA.isAssumedNoUnwind(); 696 } 697 return false; 698 }; 699 700 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes)) 701 return indicatePessimisticFixpoint(); 702 703 return ChangeStatus::UNCHANGED; 704 } 705 }; 706 707 struct AANoUnwindFunction final : public AANoUnwindImpl { 708 AANoUnwindFunction(const IRPosition &IRP, Attributor &A) 709 : AANoUnwindImpl(IRP, A) {} 710 711 /// See AbstractAttribute::trackStatistics() 712 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } 713 }; 714 715 /// NoUnwind attribute deduction for a call sites. 716 struct AANoUnwindCallSite final : AANoUnwindImpl { 717 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) 718 : AANoUnwindImpl(IRP, A) {} 719 720 /// See AbstractAttribute::initialize(...). 721 void initialize(Attributor &A) override { 722 AANoUnwindImpl::initialize(A); 723 Function *F = getAssociatedFunction(); 724 if (!F) 725 indicatePessimisticFixpoint(); 726 } 727 728 /// See AbstractAttribute::updateImpl(...). 729 ChangeStatus updateImpl(Attributor &A) override { 730 // TODO: Once we have call site specific value information we can provide 731 // call site specific liveness information and then it makes 732 // sense to specialize attributes for call sites arguments instead of 733 // redirecting requests to the callee argument. 734 Function *F = getAssociatedFunction(); 735 const IRPosition &FnPos = IRPosition::function(*F); 736 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos); 737 return clampStateAndIndicateChange( 738 getState(), 739 static_cast<const AANoUnwind::StateType &>(FnAA.getState())); 740 } 741 742 /// See AbstractAttribute::trackStatistics() 743 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } 744 }; 745 746 /// --------------------- Function Return Values ------------------------------- 747 748 /// "Attribute" that collects all potential returned values and the return 749 /// instructions that they arise from. 750 /// 751 /// If there is a unique returned value R, the manifest method will: 752 /// - mark R with the "returned" attribute, if R is an argument. 753 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { 754 755 /// Mapping of values potentially returned by the associated function to the 756 /// return instructions that might return them. 757 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; 758 759 /// Mapping to remember the number of returned values for a call site such 760 /// that we can avoid updates if nothing changed. 761 DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA; 762 763 /// Set of unresolved calls returned by the associated function. 764 SmallSetVector<CallBase *, 4> UnresolvedCalls; 765 766 /// State flags 767 /// 768 ///{ 769 bool IsFixed = false; 770 bool IsValidState = true; 771 ///} 772 773 public: 774 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) 775 : AAReturnedValues(IRP, A) {} 776 777 /// See AbstractAttribute::initialize(...). 778 void initialize(Attributor &A) override { 779 // Reset the state. 780 IsFixed = false; 781 IsValidState = true; 782 ReturnedValues.clear(); 783 784 Function *F = getAssociatedFunction(); 785 if (!F) { 786 indicatePessimisticFixpoint(); 787 return; 788 } 789 assert(!F->getReturnType()->isVoidTy() && 790 "Did not expect a void return type!"); 791 792 // The map from instruction opcodes to those instructions in the function. 793 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); 794 795 // Look through all arguments, if one is marked as returned we are done. 796 for (Argument &Arg : F->args()) { 797 if (Arg.hasReturnedAttr()) { 798 auto &ReturnInstSet = ReturnedValues[&Arg]; 799 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) 800 for (Instruction *RI : *Insts) 801 ReturnInstSet.insert(cast<ReturnInst>(RI)); 802 803 indicateOptimisticFixpoint(); 804 return; 805 } 806 } 807 808 if (!A.isFunctionIPOAmendable(*F)) 809 indicatePessimisticFixpoint(); 810 } 811 812 /// See AbstractAttribute::manifest(...). 813 ChangeStatus manifest(Attributor &A) override; 814 815 /// See AbstractAttribute::getState(...). 816 AbstractState &getState() override { return *this; } 817 818 /// See AbstractAttribute::getState(...). 819 const AbstractState &getState() const override { return *this; } 820 821 /// See AbstractAttribute::updateImpl(Attributor &A). 822 ChangeStatus updateImpl(Attributor &A) override; 823 824 llvm::iterator_range<iterator> returned_values() override { 825 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 826 } 827 828 llvm::iterator_range<const_iterator> returned_values() const override { 829 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 830 } 831 832 const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override { 833 return UnresolvedCalls; 834 } 835 836 /// Return the number of potential return values, -1 if unknown. 837 size_t getNumReturnValues() const override { 838 return isValidState() ? ReturnedValues.size() : -1; 839 } 840 841 /// Return an assumed unique return value if a single candidate is found. If 842 /// there cannot be one, return a nullptr. If it is not clear yet, return the 843 /// Optional::NoneType. 844 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 845 846 /// See AbstractState::checkForAllReturnedValues(...). 847 bool checkForAllReturnedValuesAndReturnInsts( 848 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 849 const override; 850 851 /// Pretty print the attribute similar to the IR representation. 852 const std::string getAsStr() const override; 853 854 /// See AbstractState::isAtFixpoint(). 855 bool isAtFixpoint() const override { return IsFixed; } 856 857 /// See AbstractState::isValidState(). 858 bool isValidState() const override { return IsValidState; } 859 860 /// See AbstractState::indicateOptimisticFixpoint(...). 861 ChangeStatus indicateOptimisticFixpoint() override { 862 IsFixed = true; 863 return ChangeStatus::UNCHANGED; 864 } 865 866 ChangeStatus indicatePessimisticFixpoint() override { 867 IsFixed = true; 868 IsValidState = false; 869 return ChangeStatus::CHANGED; 870 } 871 }; 872 873 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 874 ChangeStatus Changed = ChangeStatus::UNCHANGED; 875 876 // Bookkeeping. 877 assert(isValidState()); 878 STATS_DECLTRACK(KnownReturnValues, FunctionReturn, 879 "Number of function with known return values"); 880 881 // Check if we have an assumed unique return value that we could manifest. 882 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); 883 884 if (!UniqueRV.hasValue() || !UniqueRV.getValue()) 885 return Changed; 886 887 // Bookkeeping. 888 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, 889 "Number of function with unique return"); 890 891 // Callback to replace the uses of CB with the constant C. 892 auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) { 893 if (CB.use_empty()) 894 return ChangeStatus::UNCHANGED; 895 if (A.changeValueAfterManifest(CB, C)) 896 return ChangeStatus::CHANGED; 897 return ChangeStatus::UNCHANGED; 898 }; 899 900 // If the assumed unique return value is an argument, annotate it. 901 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { 902 if (UniqueRVArg->getType()->canLosslesslyBitCastTo( 903 getAssociatedFunction()->getReturnType())) { 904 getIRPosition() = IRPosition::argument(*UniqueRVArg); 905 Changed = IRAttribute::manifest(A); 906 } 907 } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) { 908 // We can replace the returned value with the unique returned constant. 909 Value &AnchorValue = getAnchorValue(); 910 if (Function *F = dyn_cast<Function>(&AnchorValue)) { 911 for (const Use &U : F->uses()) 912 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) 913 if (CB->isCallee(&U)) { 914 Constant *RVCCast = 915 CB->getType() == RVC->getType() 916 ? RVC 917 : ConstantExpr::getTruncOrBitCast(RVC, CB->getType()); 918 Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed; 919 } 920 } else { 921 assert(isa<CallBase>(AnchorValue) && 922 "Expcected a function or call base anchor!"); 923 Constant *RVCCast = 924 AnchorValue.getType() == RVC->getType() 925 ? RVC 926 : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType()); 927 Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast); 928 } 929 if (Changed == ChangeStatus::CHANGED) 930 STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn, 931 "Number of function returns replaced by constant return"); 932 } 933 934 return Changed; 935 } 936 937 const std::string AAReturnedValuesImpl::getAsStr() const { 938 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 939 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + 940 ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]"; 941 } 942 943 Optional<Value *> 944 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { 945 // If checkForAllReturnedValues provides a unique value, ignoring potential 946 // undef values that can also be present, it is assumed to be the actual 947 // return value and forwarded to the caller of this method. If there are 948 // multiple, a nullptr is returned indicating there cannot be a unique 949 // returned value. 950 Optional<Value *> UniqueRV; 951 952 auto Pred = [&](Value &RV) -> bool { 953 // If we found a second returned value and neither the current nor the saved 954 // one is an undef, there is no unique returned value. Undefs are special 955 // since we can pretend they have any value. 956 if (UniqueRV.hasValue() && UniqueRV != &RV && 957 !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) { 958 UniqueRV = nullptr; 959 return false; 960 } 961 962 // Do not overwrite a value with an undef. 963 if (!UniqueRV.hasValue() || !isa<UndefValue>(RV)) 964 UniqueRV = &RV; 965 966 return true; 967 }; 968 969 if (!A.checkForAllReturnedValues(Pred, *this)) 970 UniqueRV = nullptr; 971 972 return UniqueRV; 973 } 974 975 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( 976 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 977 const { 978 if (!isValidState()) 979 return false; 980 981 // Check all returned values but ignore call sites as long as we have not 982 // encountered an overdefined one during an update. 983 for (auto &It : ReturnedValues) { 984 Value *RV = It.first; 985 986 CallBase *CB = dyn_cast<CallBase>(RV); 987 if (CB && !UnresolvedCalls.count(CB)) 988 continue; 989 990 if (!Pred(*RV, It.second)) 991 return false; 992 } 993 994 return true; 995 } 996 997 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 998 size_t NumUnresolvedCalls = UnresolvedCalls.size(); 999 bool Changed = false; 1000 1001 // State used in the value traversals starting in returned values. 1002 struct RVState { 1003 // The map in which we collect return values -> return instrs. 1004 decltype(ReturnedValues) &RetValsMap; 1005 // The flag to indicate a change. 1006 bool &Changed; 1007 // The return instrs we come from. 1008 SmallSetVector<ReturnInst *, 4> RetInsts; 1009 }; 1010 1011 // Callback for a leaf value returned by the associated function. 1012 auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS, 1013 bool) -> bool { 1014 auto Size = RVS.RetValsMap[&Val].size(); 1015 RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end()); 1016 bool Inserted = RVS.RetValsMap[&Val].size() != Size; 1017 RVS.Changed |= Inserted; 1018 LLVM_DEBUG({ 1019 if (Inserted) 1020 dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val 1021 << " => " << RVS.RetInsts.size() << "\n"; 1022 }); 1023 return true; 1024 }; 1025 1026 // Helper method to invoke the generic value traversal. 1027 auto VisitReturnedValue = [&](Value &RV, RVState &RVS, 1028 const Instruction *CtxI) { 1029 IRPosition RetValPos = IRPosition::value(RV); 1030 return genericValueTraversal<AAReturnedValues, RVState>( 1031 A, RetValPos, *this, RVS, VisitValueCB, CtxI, 1032 /* UseValueSimplify */ false); 1033 }; 1034 1035 // Callback for all "return intructions" live in the associated function. 1036 auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) { 1037 ReturnInst &Ret = cast<ReturnInst>(I); 1038 RVState RVS({ReturnedValues, Changed, {}}); 1039 RVS.RetInsts.insert(&Ret); 1040 return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I); 1041 }; 1042 1043 // Start by discovering returned values from all live returned instructions in 1044 // the associated function. 1045 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret})) 1046 return indicatePessimisticFixpoint(); 1047 1048 // Once returned values "directly" present in the code are handled we try to 1049 // resolve returned calls. To avoid modifications to the ReturnedValues map 1050 // while we iterate over it we kept record of potential new entries in a copy 1051 // map, NewRVsMap. 1052 decltype(ReturnedValues) NewRVsMap; 1053 1054 auto HandleReturnValue = [&](Value *RV, SmallSetVector<ReturnInst *, 4> &RIs) { 1055 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV 1056 << " by #" << RIs.size() << " RIs\n"); 1057 CallBase *CB = dyn_cast<CallBase>(RV); 1058 if (!CB || UnresolvedCalls.count(CB)) 1059 return; 1060 1061 if (!CB->getCalledFunction()) { 1062 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB 1063 << "\n"); 1064 UnresolvedCalls.insert(CB); 1065 return; 1066 } 1067 1068 // TODO: use the function scope once we have call site AAReturnedValues. 1069 const auto &RetValAA = A.getAAFor<AAReturnedValues>( 1070 *this, IRPosition::function(*CB->getCalledFunction())); 1071 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: " 1072 << RetValAA << "\n"); 1073 1074 // Skip dead ends, thus if we do not know anything about the returned 1075 // call we mark it as unresolved and it will stay that way. 1076 if (!RetValAA.getState().isValidState()) { 1077 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB 1078 << "\n"); 1079 UnresolvedCalls.insert(CB); 1080 return; 1081 } 1082 1083 // Do not try to learn partial information. If the callee has unresolved 1084 // return values we will treat the call as unresolved/opaque. 1085 auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls(); 1086 if (!RetValAAUnresolvedCalls.empty()) { 1087 UnresolvedCalls.insert(CB); 1088 return; 1089 } 1090 1091 // Now check if we can track transitively returned values. If possible, thus 1092 // if all return value can be represented in the current scope, do so. 1093 bool Unresolved = false; 1094 for (auto &RetValAAIt : RetValAA.returned_values()) { 1095 Value *RetVal = RetValAAIt.first; 1096 if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) || 1097 isa<Constant>(RetVal)) 1098 continue; 1099 // Anything that did not fit in the above categories cannot be resolved, 1100 // mark the call as unresolved. 1101 LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value " 1102 "cannot be translated: " 1103 << *RetVal << "\n"); 1104 UnresolvedCalls.insert(CB); 1105 Unresolved = true; 1106 break; 1107 } 1108 1109 if (Unresolved) 1110 return; 1111 1112 // Now track transitively returned values. 1113 unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB]; 1114 if (NumRetAA == RetValAA.getNumReturnValues()) { 1115 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not " 1116 "changed since it was seen last\n"); 1117 return; 1118 } 1119 NumRetAA = RetValAA.getNumReturnValues(); 1120 1121 for (auto &RetValAAIt : RetValAA.returned_values()) { 1122 Value *RetVal = RetValAAIt.first; 1123 if (Argument *Arg = dyn_cast<Argument>(RetVal)) { 1124 // Arguments are mapped to call site operands and we begin the traversal 1125 // again. 1126 bool Unused = false; 1127 RVState RVS({NewRVsMap, Unused, RetValAAIt.second}); 1128 VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB); 1129 continue; 1130 } else if (isa<CallBase>(RetVal)) { 1131 // Call sites are resolved by the callee attribute over time, no need to 1132 // do anything for us. 1133 continue; 1134 } else if (isa<Constant>(RetVal)) { 1135 // Constants are valid everywhere, we can simply take them. 1136 NewRVsMap[RetVal].insert(RIs.begin(), RIs.end()); 1137 continue; 1138 } 1139 } 1140 }; 1141 1142 for (auto &It : ReturnedValues) 1143 HandleReturnValue(It.first, It.second); 1144 1145 // Because processing the new information can again lead to new return values 1146 // we have to be careful and iterate until this iteration is complete. The 1147 // idea is that we are in a stable state at the end of an update. All return 1148 // values have been handled and properly categorized. We might not update 1149 // again if we have not requested a non-fix attribute so we cannot "wait" for 1150 // the next update to analyze a new return value. 1151 while (!NewRVsMap.empty()) { 1152 auto It = std::move(NewRVsMap.back()); 1153 NewRVsMap.pop_back(); 1154 1155 assert(!It.second.empty() && "Entry does not add anything."); 1156 auto &ReturnInsts = ReturnedValues[It.first]; 1157 for (ReturnInst *RI : It.second) 1158 if (ReturnInsts.insert(RI)) { 1159 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value " 1160 << *It.first << " => " << *RI << "\n"); 1161 HandleReturnValue(It.first, ReturnInsts); 1162 Changed = true; 1163 } 1164 } 1165 1166 Changed |= (NumUnresolvedCalls != UnresolvedCalls.size()); 1167 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 1168 } 1169 1170 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { 1171 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) 1172 : AAReturnedValuesImpl(IRP, A) {} 1173 1174 /// See AbstractAttribute::trackStatistics() 1175 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } 1176 }; 1177 1178 /// Returned values information for a call sites. 1179 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { 1180 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) 1181 : AAReturnedValuesImpl(IRP, A) {} 1182 1183 /// See AbstractAttribute::initialize(...). 1184 void initialize(Attributor &A) override { 1185 // TODO: Once we have call site specific value information we can provide 1186 // call site specific liveness information and then it makes 1187 // sense to specialize attributes for call sites instead of 1188 // redirecting requests to the callee. 1189 llvm_unreachable("Abstract attributes for returned values are not " 1190 "supported for call sites yet!"); 1191 } 1192 1193 /// See AbstractAttribute::updateImpl(...). 1194 ChangeStatus updateImpl(Attributor &A) override { 1195 return indicatePessimisticFixpoint(); 1196 } 1197 1198 /// See AbstractAttribute::trackStatistics() 1199 void trackStatistics() const override {} 1200 }; 1201 1202 /// ------------------------ NoSync Function Attribute ------------------------- 1203 1204 struct AANoSyncImpl : AANoSync { 1205 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} 1206 1207 const std::string getAsStr() const override { 1208 return getAssumed() ? "nosync" : "may-sync"; 1209 } 1210 1211 /// See AbstractAttribute::updateImpl(...). 1212 ChangeStatus updateImpl(Attributor &A) override; 1213 1214 /// Helper function used to determine whether an instruction is non-relaxed 1215 /// atomic. In other words, if an atomic instruction does not have unordered 1216 /// or monotonic ordering 1217 static bool isNonRelaxedAtomic(Instruction *I); 1218 1219 /// Helper function used to determine whether an instruction is volatile. 1220 static bool isVolatile(Instruction *I); 1221 1222 /// Helper function uset to check if intrinsic is volatile (memcpy, memmove, 1223 /// memset). 1224 static bool isNoSyncIntrinsic(Instruction *I); 1225 }; 1226 1227 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) { 1228 if (!I->isAtomic()) 1229 return false; 1230 1231 AtomicOrdering Ordering; 1232 switch (I->getOpcode()) { 1233 case Instruction::AtomicRMW: 1234 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 1235 break; 1236 case Instruction::Store: 1237 Ordering = cast<StoreInst>(I)->getOrdering(); 1238 break; 1239 case Instruction::Load: 1240 Ordering = cast<LoadInst>(I)->getOrdering(); 1241 break; 1242 case Instruction::Fence: { 1243 auto *FI = cast<FenceInst>(I); 1244 if (FI->getSyncScopeID() == SyncScope::SingleThread) 1245 return false; 1246 Ordering = FI->getOrdering(); 1247 break; 1248 } 1249 case Instruction::AtomicCmpXchg: { 1250 AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering(); 1251 AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering(); 1252 // Only if both are relaxed, than it can be treated as relaxed. 1253 // Otherwise it is non-relaxed. 1254 if (Success != AtomicOrdering::Unordered && 1255 Success != AtomicOrdering::Monotonic) 1256 return true; 1257 if (Failure != AtomicOrdering::Unordered && 1258 Failure != AtomicOrdering::Monotonic) 1259 return true; 1260 return false; 1261 } 1262 default: 1263 llvm_unreachable( 1264 "New atomic operations need to be known in the attributor."); 1265 } 1266 1267 // Relaxed. 1268 if (Ordering == AtomicOrdering::Unordered || 1269 Ordering == AtomicOrdering::Monotonic) 1270 return false; 1271 return true; 1272 } 1273 1274 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics. 1275 /// FIXME: We should ipmrove the handling of intrinsics. 1276 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) { 1277 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 1278 switch (II->getIntrinsicID()) { 1279 /// Element wise atomic memory intrinsics are can only be unordered, 1280 /// therefore nosync. 1281 case Intrinsic::memset_element_unordered_atomic: 1282 case Intrinsic::memmove_element_unordered_atomic: 1283 case Intrinsic::memcpy_element_unordered_atomic: 1284 return true; 1285 case Intrinsic::memset: 1286 case Intrinsic::memmove: 1287 case Intrinsic::memcpy: 1288 if (!cast<MemIntrinsic>(II)->isVolatile()) 1289 return true; 1290 return false; 1291 default: 1292 return false; 1293 } 1294 } 1295 return false; 1296 } 1297 1298 bool AANoSyncImpl::isVolatile(Instruction *I) { 1299 assert(!isa<CallBase>(I) && "Calls should not be checked here"); 1300 1301 switch (I->getOpcode()) { 1302 case Instruction::AtomicRMW: 1303 return cast<AtomicRMWInst>(I)->isVolatile(); 1304 case Instruction::Store: 1305 return cast<StoreInst>(I)->isVolatile(); 1306 case Instruction::Load: 1307 return cast<LoadInst>(I)->isVolatile(); 1308 case Instruction::AtomicCmpXchg: 1309 return cast<AtomicCmpXchgInst>(I)->isVolatile(); 1310 default: 1311 return false; 1312 } 1313 } 1314 1315 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { 1316 1317 auto CheckRWInstForNoSync = [&](Instruction &I) { 1318 /// We are looking for volatile instructions or Non-Relaxed atomics. 1319 /// FIXME: We should improve the handling of intrinsics. 1320 1321 if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I)) 1322 return true; 1323 1324 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1325 if (CB->hasFnAttr(Attribute::NoSync)) 1326 return true; 1327 1328 const auto &NoSyncAA = 1329 A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB)); 1330 if (NoSyncAA.isAssumedNoSync()) 1331 return true; 1332 return false; 1333 } 1334 1335 if (!isVolatile(&I) && !isNonRelaxedAtomic(&I)) 1336 return true; 1337 1338 return false; 1339 }; 1340 1341 auto CheckForNoSync = [&](Instruction &I) { 1342 // At this point we handled all read/write effects and they are all 1343 // nosync, so they can be skipped. 1344 if (I.mayReadOrWriteMemory()) 1345 return true; 1346 1347 // non-convergent and readnone imply nosync. 1348 return !cast<CallBase>(I).isConvergent(); 1349 }; 1350 1351 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) || 1352 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this)) 1353 return indicatePessimisticFixpoint(); 1354 1355 return ChangeStatus::UNCHANGED; 1356 } 1357 1358 struct AANoSyncFunction final : public AANoSyncImpl { 1359 AANoSyncFunction(const IRPosition &IRP, Attributor &A) 1360 : AANoSyncImpl(IRP, A) {} 1361 1362 /// See AbstractAttribute::trackStatistics() 1363 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } 1364 }; 1365 1366 /// NoSync attribute deduction for a call sites. 1367 struct AANoSyncCallSite final : AANoSyncImpl { 1368 AANoSyncCallSite(const IRPosition &IRP, Attributor &A) 1369 : AANoSyncImpl(IRP, A) {} 1370 1371 /// See AbstractAttribute::initialize(...). 1372 void initialize(Attributor &A) override { 1373 AANoSyncImpl::initialize(A); 1374 Function *F = getAssociatedFunction(); 1375 if (!F) 1376 indicatePessimisticFixpoint(); 1377 } 1378 1379 /// See AbstractAttribute::updateImpl(...). 1380 ChangeStatus updateImpl(Attributor &A) override { 1381 // TODO: Once we have call site specific value information we can provide 1382 // call site specific liveness information and then it makes 1383 // sense to specialize attributes for call sites arguments instead of 1384 // redirecting requests to the callee argument. 1385 Function *F = getAssociatedFunction(); 1386 const IRPosition &FnPos = IRPosition::function(*F); 1387 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos); 1388 return clampStateAndIndicateChange( 1389 getState(), static_cast<const AANoSync::StateType &>(FnAA.getState())); 1390 } 1391 1392 /// See AbstractAttribute::trackStatistics() 1393 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } 1394 }; 1395 1396 /// ------------------------ No-Free Attributes ---------------------------- 1397 1398 struct AANoFreeImpl : public AANoFree { 1399 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} 1400 1401 /// See AbstractAttribute::updateImpl(...). 1402 ChangeStatus updateImpl(Attributor &A) override { 1403 auto CheckForNoFree = [&](Instruction &I) { 1404 const auto &CB = cast<CallBase>(I); 1405 if (CB.hasFnAttr(Attribute::NoFree)) 1406 return true; 1407 1408 const auto &NoFreeAA = 1409 A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB)); 1410 return NoFreeAA.isAssumedNoFree(); 1411 }; 1412 1413 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this)) 1414 return indicatePessimisticFixpoint(); 1415 return ChangeStatus::UNCHANGED; 1416 } 1417 1418 /// See AbstractAttribute::getAsStr(). 1419 const std::string getAsStr() const override { 1420 return getAssumed() ? "nofree" : "may-free"; 1421 } 1422 }; 1423 1424 struct AANoFreeFunction final : public AANoFreeImpl { 1425 AANoFreeFunction(const IRPosition &IRP, Attributor &A) 1426 : AANoFreeImpl(IRP, A) {} 1427 1428 /// See AbstractAttribute::trackStatistics() 1429 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } 1430 }; 1431 1432 /// NoFree attribute deduction for a call sites. 1433 struct AANoFreeCallSite final : AANoFreeImpl { 1434 AANoFreeCallSite(const IRPosition &IRP, Attributor &A) 1435 : AANoFreeImpl(IRP, A) {} 1436 1437 /// See AbstractAttribute::initialize(...). 1438 void initialize(Attributor &A) override { 1439 AANoFreeImpl::initialize(A); 1440 Function *F = getAssociatedFunction(); 1441 if (!F) 1442 indicatePessimisticFixpoint(); 1443 } 1444 1445 /// See AbstractAttribute::updateImpl(...). 1446 ChangeStatus updateImpl(Attributor &A) override { 1447 // TODO: Once we have call site specific value information we can provide 1448 // call site specific liveness information and then it makes 1449 // sense to specialize attributes for call sites arguments instead of 1450 // redirecting requests to the callee argument. 1451 Function *F = getAssociatedFunction(); 1452 const IRPosition &FnPos = IRPosition::function(*F); 1453 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos); 1454 return clampStateAndIndicateChange( 1455 getState(), static_cast<const AANoFree::StateType &>(FnAA.getState())); 1456 } 1457 1458 /// See AbstractAttribute::trackStatistics() 1459 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } 1460 }; 1461 1462 /// NoFree attribute for floating values. 1463 struct AANoFreeFloating : AANoFreeImpl { 1464 AANoFreeFloating(const IRPosition &IRP, Attributor &A) 1465 : AANoFreeImpl(IRP, A) {} 1466 1467 /// See AbstractAttribute::trackStatistics() 1468 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} 1469 1470 /// See Abstract Attribute::updateImpl(...). 1471 ChangeStatus updateImpl(Attributor &A) override { 1472 const IRPosition &IRP = getIRPosition(); 1473 1474 const auto &NoFreeAA = 1475 A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP)); 1476 if (NoFreeAA.isAssumedNoFree()) 1477 return ChangeStatus::UNCHANGED; 1478 1479 Value &AssociatedValue = getIRPosition().getAssociatedValue(); 1480 auto Pred = [&](const Use &U, bool &Follow) -> bool { 1481 Instruction *UserI = cast<Instruction>(U.getUser()); 1482 if (auto *CB = dyn_cast<CallBase>(UserI)) { 1483 if (CB->isBundleOperand(&U)) 1484 return false; 1485 if (!CB->isArgOperand(&U)) 1486 return true; 1487 unsigned ArgNo = CB->getArgOperandNo(&U); 1488 1489 const auto &NoFreeArg = A.getAAFor<AANoFree>( 1490 *this, IRPosition::callsite_argument(*CB, ArgNo)); 1491 return NoFreeArg.isAssumedNoFree(); 1492 } 1493 1494 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 1495 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 1496 Follow = true; 1497 return true; 1498 } 1499 if (isa<ReturnInst>(UserI)) 1500 return true; 1501 1502 // Unknown user. 1503 return false; 1504 }; 1505 if (!A.checkForAllUses(Pred, *this, AssociatedValue)) 1506 return indicatePessimisticFixpoint(); 1507 1508 return ChangeStatus::UNCHANGED; 1509 } 1510 }; 1511 1512 /// NoFree attribute for a call site argument. 1513 struct AANoFreeArgument final : AANoFreeFloating { 1514 AANoFreeArgument(const IRPosition &IRP, Attributor &A) 1515 : AANoFreeFloating(IRP, A) {} 1516 1517 /// See AbstractAttribute::trackStatistics() 1518 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } 1519 }; 1520 1521 /// NoFree attribute for call site arguments. 1522 struct AANoFreeCallSiteArgument final : AANoFreeFloating { 1523 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) 1524 : AANoFreeFloating(IRP, A) {} 1525 1526 /// See AbstractAttribute::updateImpl(...). 1527 ChangeStatus updateImpl(Attributor &A) override { 1528 // TODO: Once we have call site specific value information we can provide 1529 // call site specific liveness information and then it makes 1530 // sense to specialize attributes for call sites arguments instead of 1531 // redirecting requests to the callee argument. 1532 Argument *Arg = getAssociatedArgument(); 1533 if (!Arg) 1534 return indicatePessimisticFixpoint(); 1535 const IRPosition &ArgPos = IRPosition::argument(*Arg); 1536 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos); 1537 return clampStateAndIndicateChange( 1538 getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState())); 1539 } 1540 1541 /// See AbstractAttribute::trackStatistics() 1542 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)}; 1543 }; 1544 1545 /// NoFree attribute for function return value. 1546 struct AANoFreeReturned final : AANoFreeFloating { 1547 AANoFreeReturned(const IRPosition &IRP, Attributor &A) 1548 : AANoFreeFloating(IRP, A) { 1549 llvm_unreachable("NoFree is not applicable to function returns!"); 1550 } 1551 1552 /// See AbstractAttribute::initialize(...). 1553 void initialize(Attributor &A) override { 1554 llvm_unreachable("NoFree is not applicable to function returns!"); 1555 } 1556 1557 /// See AbstractAttribute::updateImpl(...). 1558 ChangeStatus updateImpl(Attributor &A) override { 1559 llvm_unreachable("NoFree is not applicable to function returns!"); 1560 } 1561 1562 /// See AbstractAttribute::trackStatistics() 1563 void trackStatistics() const override {} 1564 }; 1565 1566 /// NoFree attribute deduction for a call site return value. 1567 struct AANoFreeCallSiteReturned final : AANoFreeFloating { 1568 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) 1569 : AANoFreeFloating(IRP, A) {} 1570 1571 ChangeStatus manifest(Attributor &A) override { 1572 return ChangeStatus::UNCHANGED; 1573 } 1574 /// See AbstractAttribute::trackStatistics() 1575 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } 1576 }; 1577 1578 /// ------------------------ NonNull Argument Attribute ------------------------ 1579 static int64_t getKnownNonNullAndDerefBytesForUse( 1580 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, 1581 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { 1582 TrackUse = false; 1583 1584 const Value *UseV = U->get(); 1585 if (!UseV->getType()->isPointerTy()) 1586 return 0; 1587 1588 Type *PtrTy = UseV->getType(); 1589 const Function *F = I->getFunction(); 1590 bool NullPointerIsDefined = 1591 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; 1592 const DataLayout &DL = A.getInfoCache().getDL(); 1593 if (const auto *CB = dyn_cast<CallBase>(I)) { 1594 if (CB->isBundleOperand(U)) { 1595 if (RetainedKnowledge RK = getKnowledgeFromUse( 1596 U, {Attribute::NonNull, Attribute::Dereferenceable})) { 1597 IsNonNull |= 1598 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); 1599 return RK.ArgValue; 1600 } 1601 return 0; 1602 } 1603 1604 if (CB->isCallee(U)) { 1605 IsNonNull |= !NullPointerIsDefined; 1606 return 0; 1607 } 1608 1609 unsigned ArgNo = CB->getArgOperandNo(U); 1610 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 1611 // As long as we only use known information there is no need to track 1612 // dependences here. 1613 auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP, 1614 /* TrackDependence */ false); 1615 IsNonNull |= DerefAA.isKnownNonNull(); 1616 return DerefAA.getKnownDereferenceableBytes(); 1617 } 1618 1619 // We need to follow common pointer manipulation uses to the accesses they 1620 // feed into. We can try to be smart to avoid looking through things we do not 1621 // like for now, e.g., non-inbounds GEPs. 1622 if (isa<CastInst>(I)) { 1623 TrackUse = true; 1624 return 0; 1625 } 1626 1627 if (isa<GetElementPtrInst>(I)) { 1628 TrackUse = true; 1629 return 0; 1630 } 1631 1632 int64_t Offset; 1633 const Value *Base = 1634 getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL); 1635 if (Base) { 1636 if (Base == &AssociatedValue && 1637 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 1638 int64_t DerefBytes = 1639 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset; 1640 1641 IsNonNull |= !NullPointerIsDefined; 1642 return std::max(int64_t(0), DerefBytes); 1643 } 1644 } 1645 1646 /// Corner case when an offset is 0. 1647 Base = getBasePointerOfAccessPointerOperand(I, Offset, DL, 1648 /*AllowNonInbounds*/ true); 1649 if (Base) { 1650 if (Offset == 0 && Base == &AssociatedValue && 1651 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 1652 int64_t DerefBytes = 1653 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()); 1654 IsNonNull |= !NullPointerIsDefined; 1655 return std::max(int64_t(0), DerefBytes); 1656 } 1657 } 1658 1659 return 0; 1660 } 1661 1662 struct AANonNullImpl : AANonNull { 1663 AANonNullImpl(const IRPosition &IRP, Attributor &A) 1664 : AANonNull(IRP, A), 1665 NullIsDefined(NullPointerIsDefined( 1666 getAnchorScope(), 1667 getAssociatedValue().getType()->getPointerAddressSpace())) {} 1668 1669 /// See AbstractAttribute::initialize(...). 1670 void initialize(Attributor &A) override { 1671 Value &V = getAssociatedValue(); 1672 if (!NullIsDefined && 1673 hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, 1674 /* IgnoreSubsumingPositions */ false, &A)) 1675 indicateOptimisticFixpoint(); 1676 else if (isa<ConstantPointerNull>(V)) 1677 indicatePessimisticFixpoint(); 1678 else 1679 AANonNull::initialize(A); 1680 1681 bool CanBeNull = true; 1682 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull)) 1683 if (!CanBeNull) 1684 indicateOptimisticFixpoint(); 1685 1686 if (!getState().isAtFixpoint()) 1687 if (Instruction *CtxI = getCtxI()) 1688 followUsesInMBEC(*this, A, getState(), *CtxI); 1689 } 1690 1691 /// See followUsesInMBEC 1692 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 1693 AANonNull::StateType &State) { 1694 bool IsNonNull = false; 1695 bool TrackUse = false; 1696 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, 1697 IsNonNull, TrackUse); 1698 State.setKnown(IsNonNull); 1699 return TrackUse; 1700 } 1701 1702 /// See AbstractAttribute::getAsStr(). 1703 const std::string getAsStr() const override { 1704 return getAssumed() ? "nonnull" : "may-null"; 1705 } 1706 1707 /// Flag to determine if the underlying value can be null and still allow 1708 /// valid accesses. 1709 const bool NullIsDefined; 1710 }; 1711 1712 /// NonNull attribute for a floating value. 1713 struct AANonNullFloating : public AANonNullImpl { 1714 AANonNullFloating(const IRPosition &IRP, Attributor &A) 1715 : AANonNullImpl(IRP, A) {} 1716 1717 /// See AbstractAttribute::updateImpl(...). 1718 ChangeStatus updateImpl(Attributor &A) override { 1719 if (!NullIsDefined) { 1720 const auto &DerefAA = 1721 A.getAAFor<AADereferenceable>(*this, getIRPosition()); 1722 if (DerefAA.getAssumedDereferenceableBytes()) 1723 return ChangeStatus::UNCHANGED; 1724 } 1725 1726 const DataLayout &DL = A.getDataLayout(); 1727 1728 DominatorTree *DT = nullptr; 1729 AssumptionCache *AC = nullptr; 1730 InformationCache &InfoCache = A.getInfoCache(); 1731 if (const Function *Fn = getAnchorScope()) { 1732 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); 1733 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); 1734 } 1735 1736 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 1737 AANonNull::StateType &T, bool Stripped) -> bool { 1738 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V)); 1739 if (!Stripped && this == &AA) { 1740 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) 1741 T.indicatePessimisticFixpoint(); 1742 } else { 1743 // Use abstract attribute information. 1744 const AANonNull::StateType &NS = 1745 static_cast<const AANonNull::StateType &>(AA.getState()); 1746 T ^= NS; 1747 } 1748 return T.isValidState(); 1749 }; 1750 1751 StateType T; 1752 if (!genericValueTraversal<AANonNull, StateType>( 1753 A, getIRPosition(), *this, T, VisitValueCB, getCtxI())) 1754 return indicatePessimisticFixpoint(); 1755 1756 return clampStateAndIndicateChange(getState(), T); 1757 } 1758 1759 /// See AbstractAttribute::trackStatistics() 1760 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 1761 }; 1762 1763 /// NonNull attribute for function return value. 1764 struct AANonNullReturned final 1765 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> { 1766 AANonNullReturned(const IRPosition &IRP, Attributor &A) 1767 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP, A) {} 1768 1769 /// See AbstractAttribute::trackStatistics() 1770 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 1771 }; 1772 1773 /// NonNull attribute for function argument. 1774 struct AANonNullArgument final 1775 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { 1776 AANonNullArgument(const IRPosition &IRP, Attributor &A) 1777 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} 1778 1779 /// See AbstractAttribute::trackStatistics() 1780 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } 1781 }; 1782 1783 struct AANonNullCallSiteArgument final : AANonNullFloating { 1784 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) 1785 : AANonNullFloating(IRP, A) {} 1786 1787 /// See AbstractAttribute::trackStatistics() 1788 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } 1789 }; 1790 1791 /// NonNull attribute for a call site return position. 1792 struct AANonNullCallSiteReturned final 1793 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { 1794 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) 1795 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} 1796 1797 /// See AbstractAttribute::trackStatistics() 1798 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } 1799 }; 1800 1801 /// ------------------------ No-Recurse Attributes ---------------------------- 1802 1803 struct AANoRecurseImpl : public AANoRecurse { 1804 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} 1805 1806 /// See AbstractAttribute::getAsStr() 1807 const std::string getAsStr() const override { 1808 return getAssumed() ? "norecurse" : "may-recurse"; 1809 } 1810 }; 1811 1812 struct AANoRecurseFunction final : AANoRecurseImpl { 1813 AANoRecurseFunction(const IRPosition &IRP, Attributor &A) 1814 : AANoRecurseImpl(IRP, A) {} 1815 1816 /// See AbstractAttribute::initialize(...). 1817 void initialize(Attributor &A) override { 1818 AANoRecurseImpl::initialize(A); 1819 if (const Function *F = getAnchorScope()) 1820 if (A.getInfoCache().getSccSize(*F) != 1) 1821 indicatePessimisticFixpoint(); 1822 } 1823 1824 /// See AbstractAttribute::updateImpl(...). 1825 ChangeStatus updateImpl(Attributor &A) override { 1826 1827 // If all live call sites are known to be no-recurse, we are as well. 1828 auto CallSitePred = [&](AbstractCallSite ACS) { 1829 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1830 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 1831 /* TrackDependence */ false, DepClassTy::OPTIONAL); 1832 return NoRecurseAA.isKnownNoRecurse(); 1833 }; 1834 bool AllCallSitesKnown; 1835 if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) { 1836 // If we know all call sites and all are known no-recurse, we are done. 1837 // If all known call sites, which might not be all that exist, are known 1838 // to be no-recurse, we are not done but we can continue to assume 1839 // no-recurse. If one of the call sites we have not visited will become 1840 // live, another update is triggered. 1841 if (AllCallSitesKnown) 1842 indicateOptimisticFixpoint(); 1843 return ChangeStatus::UNCHANGED; 1844 } 1845 1846 // If the above check does not hold anymore we look at the calls. 1847 auto CheckForNoRecurse = [&](Instruction &I) { 1848 const auto &CB = cast<CallBase>(I); 1849 if (CB.hasFnAttr(Attribute::NoRecurse)) 1850 return true; 1851 1852 const auto &NoRecurseAA = 1853 A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB)); 1854 if (!NoRecurseAA.isAssumedNoRecurse()) 1855 return false; 1856 1857 // Recursion to the same function 1858 if (CB.getCalledFunction() == getAnchorScope()) 1859 return false; 1860 1861 return true; 1862 }; 1863 1864 if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this)) 1865 return indicatePessimisticFixpoint(); 1866 return ChangeStatus::UNCHANGED; 1867 } 1868 1869 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } 1870 }; 1871 1872 /// NoRecurse attribute deduction for a call sites. 1873 struct AANoRecurseCallSite final : AANoRecurseImpl { 1874 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) 1875 : AANoRecurseImpl(IRP, A) {} 1876 1877 /// See AbstractAttribute::initialize(...). 1878 void initialize(Attributor &A) override { 1879 AANoRecurseImpl::initialize(A); 1880 Function *F = getAssociatedFunction(); 1881 if (!F) 1882 indicatePessimisticFixpoint(); 1883 } 1884 1885 /// See AbstractAttribute::updateImpl(...). 1886 ChangeStatus updateImpl(Attributor &A) override { 1887 // TODO: Once we have call site specific value information we can provide 1888 // call site specific liveness information and then it makes 1889 // sense to specialize attributes for call sites arguments instead of 1890 // redirecting requests to the callee argument. 1891 Function *F = getAssociatedFunction(); 1892 const IRPosition &FnPos = IRPosition::function(*F); 1893 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos); 1894 return clampStateAndIndicateChange( 1895 getState(), 1896 static_cast<const AANoRecurse::StateType &>(FnAA.getState())); 1897 } 1898 1899 /// See AbstractAttribute::trackStatistics() 1900 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } 1901 }; 1902 1903 /// -------------------- Undefined-Behavior Attributes ------------------------ 1904 1905 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { 1906 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) 1907 : AAUndefinedBehavior(IRP, A) {} 1908 1909 /// See AbstractAttribute::updateImpl(...). 1910 // through a pointer (i.e. also branches etc.) 1911 ChangeStatus updateImpl(Attributor &A) override { 1912 const size_t UBPrevSize = KnownUBInsts.size(); 1913 const size_t NoUBPrevSize = AssumedNoUBInsts.size(); 1914 1915 auto InspectMemAccessInstForUB = [&](Instruction &I) { 1916 // Skip instructions that are already saved. 1917 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 1918 return true; 1919 1920 // If we reach here, we know we have an instruction 1921 // that accesses memory through a pointer operand, 1922 // for which getPointerOperand() should give it to us. 1923 const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true); 1924 assert(PtrOp && 1925 "Expected pointer operand of memory accessing instruction"); 1926 1927 // Either we stopped and the appropriate action was taken, 1928 // or we got back a simplified value to continue. 1929 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); 1930 if (!SimplifiedPtrOp.hasValue()) 1931 return true; 1932 const Value *PtrOpVal = SimplifiedPtrOp.getValue(); 1933 1934 // A memory access through a pointer is considered UB 1935 // only if the pointer has constant null value. 1936 // TODO: Expand it to not only check constant values. 1937 if (!isa<ConstantPointerNull>(PtrOpVal)) { 1938 AssumedNoUBInsts.insert(&I); 1939 return true; 1940 } 1941 const Type *PtrTy = PtrOpVal->getType(); 1942 1943 // Because we only consider instructions inside functions, 1944 // assume that a parent function exists. 1945 const Function *F = I.getFunction(); 1946 1947 // A memory access using constant null pointer is only considered UB 1948 // if null pointer is _not_ defined for the target platform. 1949 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) 1950 AssumedNoUBInsts.insert(&I); 1951 else 1952 KnownUBInsts.insert(&I); 1953 return true; 1954 }; 1955 1956 auto InspectBrInstForUB = [&](Instruction &I) { 1957 // A conditional branch instruction is considered UB if it has `undef` 1958 // condition. 1959 1960 // Skip instructions that are already saved. 1961 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 1962 return true; 1963 1964 // We know we have a branch instruction. 1965 auto BrInst = cast<BranchInst>(&I); 1966 1967 // Unconditional branches are never considered UB. 1968 if (BrInst->isUnconditional()) 1969 return true; 1970 1971 // Either we stopped and the appropriate action was taken, 1972 // or we got back a simplified value to continue. 1973 Optional<Value *> SimplifiedCond = 1974 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); 1975 if (!SimplifiedCond.hasValue()) 1976 return true; 1977 AssumedNoUBInsts.insert(&I); 1978 return true; 1979 }; 1980 1981 A.checkForAllInstructions(InspectMemAccessInstForUB, *this, 1982 {Instruction::Load, Instruction::Store, 1983 Instruction::AtomicCmpXchg, 1984 Instruction::AtomicRMW}, 1985 /* CheckBBLivenessOnly */ true); 1986 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, 1987 /* CheckBBLivenessOnly */ true); 1988 if (NoUBPrevSize != AssumedNoUBInsts.size() || 1989 UBPrevSize != KnownUBInsts.size()) 1990 return ChangeStatus::CHANGED; 1991 return ChangeStatus::UNCHANGED; 1992 } 1993 1994 bool isKnownToCauseUB(Instruction *I) const override { 1995 return KnownUBInsts.count(I); 1996 } 1997 1998 bool isAssumedToCauseUB(Instruction *I) const override { 1999 // In simple words, if an instruction is not in the assumed to _not_ 2000 // cause UB, then it is assumed UB (that includes those 2001 // in the KnownUBInsts set). The rest is boilerplate 2002 // is to ensure that it is one of the instructions we test 2003 // for UB. 2004 2005 switch (I->getOpcode()) { 2006 case Instruction::Load: 2007 case Instruction::Store: 2008 case Instruction::AtomicCmpXchg: 2009 case Instruction::AtomicRMW: 2010 return !AssumedNoUBInsts.count(I); 2011 case Instruction::Br: { 2012 auto BrInst = cast<BranchInst>(I); 2013 if (BrInst->isUnconditional()) 2014 return false; 2015 return !AssumedNoUBInsts.count(I); 2016 } break; 2017 default: 2018 return false; 2019 } 2020 return false; 2021 } 2022 2023 ChangeStatus manifest(Attributor &A) override { 2024 if (KnownUBInsts.empty()) 2025 return ChangeStatus::UNCHANGED; 2026 for (Instruction *I : KnownUBInsts) 2027 A.changeToUnreachableAfterManifest(I); 2028 return ChangeStatus::CHANGED; 2029 } 2030 2031 /// See AbstractAttribute::getAsStr() 2032 const std::string getAsStr() const override { 2033 return getAssumed() ? "undefined-behavior" : "no-ub"; 2034 } 2035 2036 /// Note: The correctness of this analysis depends on the fact that the 2037 /// following 2 sets will stop changing after some point. 2038 /// "Change" here means that their size changes. 2039 /// The size of each set is monotonically increasing 2040 /// (we only add items to them) and it is upper bounded by the number of 2041 /// instructions in the processed function (we can never save more 2042 /// elements in either set than this number). Hence, at some point, 2043 /// they will stop increasing. 2044 /// Consequently, at some point, both sets will have stopped 2045 /// changing, effectively making the analysis reach a fixpoint. 2046 2047 /// Note: These 2 sets are disjoint and an instruction can be considered 2048 /// one of 3 things: 2049 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in 2050 /// the KnownUBInsts set. 2051 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior 2052 /// has a reason to assume it). 2053 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior 2054 /// could not find a reason to assume or prove that it can cause UB, 2055 /// hence it assumes it doesn't. We have a set for these instructions 2056 /// so that we don't reprocess them in every update. 2057 /// Note however that instructions in this set may cause UB. 2058 2059 protected: 2060 /// A set of all live instructions _known_ to cause UB. 2061 SmallPtrSet<Instruction *, 8> KnownUBInsts; 2062 2063 private: 2064 /// A set of all the (live) instructions that are assumed to _not_ cause UB. 2065 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; 2066 2067 // Should be called on updates in which if we're processing an instruction 2068 // \p I that depends on a value \p V, one of the following has to happen: 2069 // - If the value is assumed, then stop. 2070 // - If the value is known but undef, then consider it UB. 2071 // - Otherwise, do specific processing with the simplified value. 2072 // We return None in the first 2 cases to signify that an appropriate 2073 // action was taken and the caller should stop. 2074 // Otherwise, we return the simplified value that the caller should 2075 // use for specific processing. 2076 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V, 2077 Instruction *I) { 2078 const auto &ValueSimplifyAA = 2079 A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V)); 2080 Optional<Value *> SimplifiedV = 2081 ValueSimplifyAA.getAssumedSimplifiedValue(A); 2082 if (!ValueSimplifyAA.isKnown()) { 2083 // Don't depend on assumed values. 2084 return llvm::None; 2085 } 2086 if (!SimplifiedV.hasValue()) { 2087 // If it is known (which we tested above) but it doesn't have a value, 2088 // then we can assume `undef` and hence the instruction is UB. 2089 KnownUBInsts.insert(I); 2090 return llvm::None; 2091 } 2092 Value *Val = SimplifiedV.getValue(); 2093 if (isa<UndefValue>(Val)) { 2094 KnownUBInsts.insert(I); 2095 return llvm::None; 2096 } 2097 return Val; 2098 } 2099 }; 2100 2101 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { 2102 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) 2103 : AAUndefinedBehaviorImpl(IRP, A) {} 2104 2105 /// See AbstractAttribute::trackStatistics() 2106 void trackStatistics() const override { 2107 STATS_DECL(UndefinedBehaviorInstruction, Instruction, 2108 "Number of instructions known to have UB"); 2109 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += 2110 KnownUBInsts.size(); 2111 } 2112 }; 2113 2114 /// ------------------------ Will-Return Attributes ---------------------------- 2115 2116 // Helper function that checks whether a function has any cycle which we don't 2117 // know if it is bounded or not. 2118 // Loops with maximum trip count are considered bounded, any other cycle not. 2119 static bool mayContainUnboundedCycle(Function &F, Attributor &A) { 2120 ScalarEvolution *SE = 2121 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); 2122 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); 2123 // If either SCEV or LoopInfo is not available for the function then we assume 2124 // any cycle to be unbounded cycle. 2125 // We use scc_iterator which uses Tarjan algorithm to find all the maximal 2126 // SCCs.To detect if there's a cycle, we only need to find the maximal ones. 2127 if (!SE || !LI) { 2128 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) 2129 if (SCCI.hasCycle()) 2130 return true; 2131 return false; 2132 } 2133 2134 // If there's irreducible control, the function may contain non-loop cycles. 2135 if (mayContainIrreducibleControl(F, LI)) 2136 return true; 2137 2138 // Any loop that does not have a max trip count is considered unbounded cycle. 2139 for (auto *L : LI->getLoopsInPreorder()) { 2140 if (!SE->getSmallConstantMaxTripCount(L)) 2141 return true; 2142 } 2143 return false; 2144 } 2145 2146 struct AAWillReturnImpl : public AAWillReturn { 2147 AAWillReturnImpl(const IRPosition &IRP, Attributor &A) 2148 : AAWillReturn(IRP, A) {} 2149 2150 /// See AbstractAttribute::initialize(...). 2151 void initialize(Attributor &A) override { 2152 AAWillReturn::initialize(A); 2153 2154 Function *F = getAnchorScope(); 2155 if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A)) 2156 indicatePessimisticFixpoint(); 2157 } 2158 2159 /// See AbstractAttribute::updateImpl(...). 2160 ChangeStatus updateImpl(Attributor &A) override { 2161 auto CheckForWillReturn = [&](Instruction &I) { 2162 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); 2163 const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos); 2164 if (WillReturnAA.isKnownWillReturn()) 2165 return true; 2166 if (!WillReturnAA.isAssumedWillReturn()) 2167 return false; 2168 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos); 2169 return NoRecurseAA.isAssumedNoRecurse(); 2170 }; 2171 2172 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this)) 2173 return indicatePessimisticFixpoint(); 2174 2175 return ChangeStatus::UNCHANGED; 2176 } 2177 2178 /// See AbstractAttribute::getAsStr() 2179 const std::string getAsStr() const override { 2180 return getAssumed() ? "willreturn" : "may-noreturn"; 2181 } 2182 }; 2183 2184 struct AAWillReturnFunction final : AAWillReturnImpl { 2185 AAWillReturnFunction(const IRPosition &IRP, Attributor &A) 2186 : AAWillReturnImpl(IRP, A) {} 2187 2188 /// See AbstractAttribute::trackStatistics() 2189 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } 2190 }; 2191 2192 /// WillReturn attribute deduction for a call sites. 2193 struct AAWillReturnCallSite final : AAWillReturnImpl { 2194 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) 2195 : AAWillReturnImpl(IRP, A) {} 2196 2197 /// See AbstractAttribute::initialize(...). 2198 void initialize(Attributor &A) override { 2199 AAWillReturnImpl::initialize(A); 2200 Function *F = getAssociatedFunction(); 2201 if (!F) 2202 indicatePessimisticFixpoint(); 2203 } 2204 2205 /// See AbstractAttribute::updateImpl(...). 2206 ChangeStatus updateImpl(Attributor &A) override { 2207 // TODO: Once we have call site specific value information we can provide 2208 // call site specific liveness information and then it makes 2209 // sense to specialize attributes for call sites arguments instead of 2210 // redirecting requests to the callee argument. 2211 Function *F = getAssociatedFunction(); 2212 const IRPosition &FnPos = IRPosition::function(*F); 2213 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos); 2214 return clampStateAndIndicateChange( 2215 getState(), 2216 static_cast<const AAWillReturn::StateType &>(FnAA.getState())); 2217 } 2218 2219 /// See AbstractAttribute::trackStatistics() 2220 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } 2221 }; 2222 2223 /// -------------------AAReachability Attribute-------------------------- 2224 2225 struct AAReachabilityImpl : AAReachability { 2226 AAReachabilityImpl(const IRPosition &IRP, Attributor &A) 2227 : AAReachability(IRP, A) {} 2228 2229 const std::string getAsStr() const override { 2230 // TODO: Return the number of reachable queries. 2231 return "reachable"; 2232 } 2233 2234 /// See AbstractAttribute::initialize(...). 2235 void initialize(Attributor &A) override { indicatePessimisticFixpoint(); } 2236 2237 /// See AbstractAttribute::updateImpl(...). 2238 ChangeStatus updateImpl(Attributor &A) override { 2239 return indicatePessimisticFixpoint(); 2240 } 2241 }; 2242 2243 struct AAReachabilityFunction final : public AAReachabilityImpl { 2244 AAReachabilityFunction(const IRPosition &IRP, Attributor &A) 2245 : AAReachabilityImpl(IRP, A) {} 2246 2247 /// See AbstractAttribute::trackStatistics() 2248 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); } 2249 }; 2250 2251 /// ------------------------ NoAlias Argument Attribute ------------------------ 2252 2253 struct AANoAliasImpl : AANoAlias { 2254 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { 2255 assert(getAssociatedType()->isPointerTy() && 2256 "Noalias is a pointer attribute"); 2257 } 2258 2259 const std::string getAsStr() const override { 2260 return getAssumed() ? "noalias" : "may-alias"; 2261 } 2262 }; 2263 2264 /// NoAlias attribute for a floating value. 2265 struct AANoAliasFloating final : AANoAliasImpl { 2266 AANoAliasFloating(const IRPosition &IRP, Attributor &A) 2267 : AANoAliasImpl(IRP, A) {} 2268 2269 /// See AbstractAttribute::initialize(...). 2270 void initialize(Attributor &A) override { 2271 AANoAliasImpl::initialize(A); 2272 Value *Val = &getAssociatedValue(); 2273 do { 2274 CastInst *CI = dyn_cast<CastInst>(Val); 2275 if (!CI) 2276 break; 2277 Value *Base = CI->getOperand(0); 2278 if (!Base->hasOneUse()) 2279 break; 2280 Val = Base; 2281 } while (true); 2282 2283 if (!Val->getType()->isPointerTy()) { 2284 indicatePessimisticFixpoint(); 2285 return; 2286 } 2287 2288 if (isa<AllocaInst>(Val)) 2289 indicateOptimisticFixpoint(); 2290 else if (isa<ConstantPointerNull>(Val) && 2291 !NullPointerIsDefined(getAnchorScope(), 2292 Val->getType()->getPointerAddressSpace())) 2293 indicateOptimisticFixpoint(); 2294 else if (Val != &getAssociatedValue()) { 2295 const auto &ValNoAliasAA = 2296 A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val)); 2297 if (ValNoAliasAA.isKnownNoAlias()) 2298 indicateOptimisticFixpoint(); 2299 } 2300 } 2301 2302 /// See AbstractAttribute::updateImpl(...). 2303 ChangeStatus updateImpl(Attributor &A) override { 2304 // TODO: Implement this. 2305 return indicatePessimisticFixpoint(); 2306 } 2307 2308 /// See AbstractAttribute::trackStatistics() 2309 void trackStatistics() const override { 2310 STATS_DECLTRACK_FLOATING_ATTR(noalias) 2311 } 2312 }; 2313 2314 /// NoAlias attribute for an argument. 2315 struct AANoAliasArgument final 2316 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { 2317 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; 2318 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 2319 2320 /// See AbstractAttribute::initialize(...). 2321 void initialize(Attributor &A) override { 2322 Base::initialize(A); 2323 // See callsite argument attribute and callee argument attribute. 2324 if (hasAttr({Attribute::ByVal})) 2325 indicateOptimisticFixpoint(); 2326 } 2327 2328 /// See AbstractAttribute::update(...). 2329 ChangeStatus updateImpl(Attributor &A) override { 2330 // We have to make sure no-alias on the argument does not break 2331 // synchronization when this is a callback argument, see also [1] below. 2332 // If synchronization cannot be affected, we delegate to the base updateImpl 2333 // function, otherwise we give up for now. 2334 2335 // If the function is no-sync, no-alias cannot break synchronization. 2336 const auto &NoSyncAA = A.getAAFor<AANoSync>( 2337 *this, IRPosition::function_scope(getIRPosition())); 2338 if (NoSyncAA.isAssumedNoSync()) 2339 return Base::updateImpl(A); 2340 2341 // If the argument is read-only, no-alias cannot break synchronization. 2342 const auto &MemBehaviorAA = 2343 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition()); 2344 if (MemBehaviorAA.isAssumedReadOnly()) 2345 return Base::updateImpl(A); 2346 2347 // If the argument is never passed through callbacks, no-alias cannot break 2348 // synchronization. 2349 bool AllCallSitesKnown; 2350 if (A.checkForAllCallSites( 2351 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, 2352 true, AllCallSitesKnown)) 2353 return Base::updateImpl(A); 2354 2355 // TODO: add no-alias but make sure it doesn't break synchronization by 2356 // introducing fake uses. See: 2357 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, 2358 // International Workshop on OpenMP 2018, 2359 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf 2360 2361 return indicatePessimisticFixpoint(); 2362 } 2363 2364 /// See AbstractAttribute::trackStatistics() 2365 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } 2366 }; 2367 2368 struct AANoAliasCallSiteArgument final : AANoAliasImpl { 2369 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) 2370 : AANoAliasImpl(IRP, A) {} 2371 2372 /// See AbstractAttribute::initialize(...). 2373 void initialize(Attributor &A) override { 2374 // See callsite argument attribute and callee argument attribute. 2375 const auto &CB = cast<CallBase>(getAnchorValue()); 2376 if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias)) 2377 indicateOptimisticFixpoint(); 2378 Value &Val = getAssociatedValue(); 2379 if (isa<ConstantPointerNull>(Val) && 2380 !NullPointerIsDefined(getAnchorScope(), 2381 Val.getType()->getPointerAddressSpace())) 2382 indicateOptimisticFixpoint(); 2383 } 2384 2385 /// Determine if the underlying value may alias with the call site argument 2386 /// \p OtherArgNo of \p ICS (= the underlying call site). 2387 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, 2388 const AAMemoryBehavior &MemBehaviorAA, 2389 const CallBase &CB, unsigned OtherArgNo) { 2390 // We do not need to worry about aliasing with the underlying IRP. 2391 if (this->getArgNo() == (int)OtherArgNo) 2392 return false; 2393 2394 // If it is not a pointer or pointer vector we do not alias. 2395 const Value *ArgOp = CB.getArgOperand(OtherArgNo); 2396 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 2397 return false; 2398 2399 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 2400 *this, IRPosition::callsite_argument(CB, OtherArgNo), 2401 /* TrackDependence */ false); 2402 2403 // If the argument is readnone, there is no read-write aliasing. 2404 if (CBArgMemBehaviorAA.isAssumedReadNone()) { 2405 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 2406 return false; 2407 } 2408 2409 // If the argument is readonly and the underlying value is readonly, there 2410 // is no read-write aliasing. 2411 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); 2412 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { 2413 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2414 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 2415 return false; 2416 } 2417 2418 // We have to utilize actual alias analysis queries so we need the object. 2419 if (!AAR) 2420 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); 2421 2422 // Try to rule it out at the call site. 2423 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); 2424 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " 2425 "callsite arguments: " 2426 << getAssociatedValue() << " " << *ArgOp << " => " 2427 << (IsAliasing ? "" : "no-") << "alias \n"); 2428 2429 return IsAliasing; 2430 } 2431 2432 bool 2433 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, 2434 const AAMemoryBehavior &MemBehaviorAA, 2435 const AANoAlias &NoAliasAA) { 2436 // We can deduce "noalias" if the following conditions hold. 2437 // (i) Associated value is assumed to be noalias in the definition. 2438 // (ii) Associated value is assumed to be no-capture in all the uses 2439 // possibly executed before this callsite. 2440 // (iii) There is no other pointer argument which could alias with the 2441 // value. 2442 2443 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); 2444 if (!AssociatedValueIsNoAliasAtDef) { 2445 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() 2446 << " is not no-alias at the definition\n"); 2447 return false; 2448 } 2449 2450 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); 2451 2452 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 2453 auto &NoCaptureAA = 2454 A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false); 2455 // Check whether the value is captured in the scope using AANoCapture. 2456 // Look at CFG and check only uses possibly executed before this 2457 // callsite. 2458 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 2459 Instruction *UserI = cast<Instruction>(U.getUser()); 2460 2461 // If user if curr instr and only use. 2462 if (UserI == getCtxI() && UserI->hasOneUse()) 2463 return true; 2464 2465 const Function *ScopeFn = VIRP.getAnchorScope(); 2466 if (ScopeFn) { 2467 const auto &ReachabilityAA = 2468 A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn)); 2469 2470 if (!ReachabilityAA.isAssumedReachable(UserI, getCtxI())) 2471 return true; 2472 2473 if (auto *CB = dyn_cast<CallBase>(UserI)) { 2474 if (CB->isArgOperand(&U)) { 2475 2476 unsigned ArgNo = CB->getArgOperandNo(&U); 2477 2478 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 2479 *this, IRPosition::callsite_argument(*CB, ArgNo)); 2480 2481 if (NoCaptureAA.isAssumedNoCapture()) 2482 return true; 2483 } 2484 } 2485 } 2486 2487 // For cases which can potentially have more users 2488 if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) || 2489 isa<SelectInst>(U)) { 2490 Follow = true; 2491 return true; 2492 } 2493 2494 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n"); 2495 return false; 2496 }; 2497 2498 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 2499 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { 2500 LLVM_DEBUG( 2501 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() 2502 << " cannot be noalias as it is potentially captured\n"); 2503 return false; 2504 } 2505 } 2506 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); 2507 2508 // Check there is no other pointer argument which could alias with the 2509 // value passed at this call site. 2510 // TODO: AbstractCallSite 2511 const auto &CB = cast<CallBase>(getAnchorValue()); 2512 for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands(); 2513 OtherArgNo++) 2514 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) 2515 return false; 2516 2517 return true; 2518 } 2519 2520 /// See AbstractAttribute::updateImpl(...). 2521 ChangeStatus updateImpl(Attributor &A) override { 2522 // If the argument is readnone we are done as there are no accesses via the 2523 // argument. 2524 auto &MemBehaviorAA = 2525 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), 2526 /* TrackDependence */ false); 2527 if (MemBehaviorAA.isAssumedReadNone()) { 2528 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2529 return ChangeStatus::UNCHANGED; 2530 } 2531 2532 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 2533 const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP, 2534 /* TrackDependence */ false); 2535 2536 AAResults *AAR = nullptr; 2537 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, 2538 NoAliasAA)) { 2539 LLVM_DEBUG( 2540 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"); 2541 return ChangeStatus::UNCHANGED; 2542 } 2543 2544 return indicatePessimisticFixpoint(); 2545 } 2546 2547 /// See AbstractAttribute::trackStatistics() 2548 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } 2549 }; 2550 2551 /// NoAlias attribute for function return value. 2552 struct AANoAliasReturned final : AANoAliasImpl { 2553 AANoAliasReturned(const IRPosition &IRP, Attributor &A) 2554 : AANoAliasImpl(IRP, A) {} 2555 2556 /// See AbstractAttribute::updateImpl(...). 2557 virtual ChangeStatus updateImpl(Attributor &A) override { 2558 2559 auto CheckReturnValue = [&](Value &RV) -> bool { 2560 if (Constant *C = dyn_cast<Constant>(&RV)) 2561 if (C->isNullValue() || isa<UndefValue>(C)) 2562 return true; 2563 2564 /// For now, we can only deduce noalias if we have call sites. 2565 /// FIXME: add more support. 2566 if (!isa<CallBase>(&RV)) 2567 return false; 2568 2569 const IRPosition &RVPos = IRPosition::value(RV); 2570 const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos); 2571 if (!NoAliasAA.isAssumedNoAlias()) 2572 return false; 2573 2574 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos); 2575 return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); 2576 }; 2577 2578 if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) 2579 return indicatePessimisticFixpoint(); 2580 2581 return ChangeStatus::UNCHANGED; 2582 } 2583 2584 /// See AbstractAttribute::trackStatistics() 2585 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } 2586 }; 2587 2588 /// NoAlias attribute deduction for a call site return value. 2589 struct AANoAliasCallSiteReturned final : AANoAliasImpl { 2590 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) 2591 : AANoAliasImpl(IRP, A) {} 2592 2593 /// See AbstractAttribute::initialize(...). 2594 void initialize(Attributor &A) override { 2595 AANoAliasImpl::initialize(A); 2596 Function *F = getAssociatedFunction(); 2597 if (!F) 2598 indicatePessimisticFixpoint(); 2599 } 2600 2601 /// See AbstractAttribute::updateImpl(...). 2602 ChangeStatus updateImpl(Attributor &A) override { 2603 // TODO: Once we have call site specific value information we can provide 2604 // call site specific liveness information and then it makes 2605 // sense to specialize attributes for call sites arguments instead of 2606 // redirecting requests to the callee argument. 2607 Function *F = getAssociatedFunction(); 2608 const IRPosition &FnPos = IRPosition::returned(*F); 2609 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos); 2610 return clampStateAndIndicateChange( 2611 getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState())); 2612 } 2613 2614 /// See AbstractAttribute::trackStatistics() 2615 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } 2616 }; 2617 2618 /// -------------------AAIsDead Function Attribute----------------------- 2619 2620 struct AAIsDeadValueImpl : public AAIsDead { 2621 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 2622 2623 /// See AAIsDead::isAssumedDead(). 2624 bool isAssumedDead() const override { return getAssumed(); } 2625 2626 /// See AAIsDead::isKnownDead(). 2627 bool isKnownDead() const override { return getKnown(); } 2628 2629 /// See AAIsDead::isAssumedDead(BasicBlock *). 2630 bool isAssumedDead(const BasicBlock *BB) const override { return false; } 2631 2632 /// See AAIsDead::isKnownDead(BasicBlock *). 2633 bool isKnownDead(const BasicBlock *BB) const override { return false; } 2634 2635 /// See AAIsDead::isAssumedDead(Instruction *I). 2636 bool isAssumedDead(const Instruction *I) const override { 2637 return I == getCtxI() && isAssumedDead(); 2638 } 2639 2640 /// See AAIsDead::isKnownDead(Instruction *I). 2641 bool isKnownDead(const Instruction *I) const override { 2642 return isAssumedDead(I) && getKnown(); 2643 } 2644 2645 /// See AbstractAttribute::getAsStr(). 2646 const std::string getAsStr() const override { 2647 return isAssumedDead() ? "assumed-dead" : "assumed-live"; 2648 } 2649 2650 /// Check if all uses are assumed dead. 2651 bool areAllUsesAssumedDead(Attributor &A, Value &V) { 2652 auto UsePred = [&](const Use &U, bool &Follow) { return false; }; 2653 // Explicitly set the dependence class to required because we want a long 2654 // chain of N dependent instructions to be considered live as soon as one is 2655 // without going through N update cycles. This is not required for 2656 // correctness. 2657 return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED); 2658 } 2659 2660 /// Determine if \p I is assumed to be side-effect free. 2661 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { 2662 if (!I || wouldInstructionBeTriviallyDead(I)) 2663 return true; 2664 2665 auto *CB = dyn_cast<CallBase>(I); 2666 if (!CB || isa<IntrinsicInst>(CB)) 2667 return false; 2668 2669 const IRPosition &CallIRP = IRPosition::callsite_function(*CB); 2670 const auto &NoUnwindAA = A.getAndUpdateAAFor<AANoUnwind>( 2671 *this, CallIRP, /* TrackDependence */ false); 2672 if (!NoUnwindAA.isAssumedNoUnwind()) 2673 return false; 2674 if (!NoUnwindAA.isKnownNoUnwind()) 2675 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); 2676 2677 const auto &MemBehaviorAA = A.getAndUpdateAAFor<AAMemoryBehavior>( 2678 *this, CallIRP, /* TrackDependence */ false); 2679 if (MemBehaviorAA.isAssumedReadOnly()) { 2680 if (!MemBehaviorAA.isKnownReadOnly()) 2681 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2682 return true; 2683 } 2684 return false; 2685 } 2686 }; 2687 2688 struct AAIsDeadFloating : public AAIsDeadValueImpl { 2689 AAIsDeadFloating(const IRPosition &IRP, Attributor &A) 2690 : AAIsDeadValueImpl(IRP, A) {} 2691 2692 /// See AbstractAttribute::initialize(...). 2693 void initialize(Attributor &A) override { 2694 if (isa<UndefValue>(getAssociatedValue())) { 2695 indicatePessimisticFixpoint(); 2696 return; 2697 } 2698 2699 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 2700 if (!isAssumedSideEffectFree(A, I)) 2701 indicatePessimisticFixpoint(); 2702 } 2703 2704 /// See AbstractAttribute::updateImpl(...). 2705 ChangeStatus updateImpl(Attributor &A) override { 2706 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 2707 if (!isAssumedSideEffectFree(A, I)) 2708 return indicatePessimisticFixpoint(); 2709 2710 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 2711 return indicatePessimisticFixpoint(); 2712 return ChangeStatus::UNCHANGED; 2713 } 2714 2715 /// See AbstractAttribute::manifest(...). 2716 ChangeStatus manifest(Attributor &A) override { 2717 Value &V = getAssociatedValue(); 2718 if (auto *I = dyn_cast<Instruction>(&V)) { 2719 // If we get here we basically know the users are all dead. We check if 2720 // isAssumedSideEffectFree returns true here again because it might not be 2721 // the case and only the users are dead but the instruction (=call) is 2722 // still needed. 2723 if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) { 2724 A.deleteAfterManifest(*I); 2725 return ChangeStatus::CHANGED; 2726 } 2727 } 2728 if (V.use_empty()) 2729 return ChangeStatus::UNCHANGED; 2730 2731 bool UsedAssumedInformation = false; 2732 Optional<Constant *> C = 2733 A.getAssumedConstant(V, *this, UsedAssumedInformation); 2734 if (C.hasValue() && C.getValue()) 2735 return ChangeStatus::UNCHANGED; 2736 2737 // Replace the value with undef as it is dead but keep droppable uses around 2738 // as they provide information we don't want to give up on just yet. 2739 UndefValue &UV = *UndefValue::get(V.getType()); 2740 bool AnyChange = 2741 A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false); 2742 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 2743 } 2744 2745 /// See AbstractAttribute::trackStatistics() 2746 void trackStatistics() const override { 2747 STATS_DECLTRACK_FLOATING_ATTR(IsDead) 2748 } 2749 }; 2750 2751 struct AAIsDeadArgument : public AAIsDeadFloating { 2752 AAIsDeadArgument(const IRPosition &IRP, Attributor &A) 2753 : AAIsDeadFloating(IRP, A) {} 2754 2755 /// See AbstractAttribute::initialize(...). 2756 void initialize(Attributor &A) override { 2757 if (!A.isFunctionIPOAmendable(*getAnchorScope())) 2758 indicatePessimisticFixpoint(); 2759 } 2760 2761 /// See AbstractAttribute::manifest(...). 2762 ChangeStatus manifest(Attributor &A) override { 2763 ChangeStatus Changed = AAIsDeadFloating::manifest(A); 2764 Argument &Arg = *getAssociatedArgument(); 2765 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) 2766 if (A.registerFunctionSignatureRewrite( 2767 Arg, /* ReplacementTypes */ {}, 2768 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, 2769 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { 2770 Arg.dropDroppableUses(); 2771 return ChangeStatus::CHANGED; 2772 } 2773 return Changed; 2774 } 2775 2776 /// See AbstractAttribute::trackStatistics() 2777 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) } 2778 }; 2779 2780 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { 2781 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) 2782 : AAIsDeadValueImpl(IRP, A) {} 2783 2784 /// See AbstractAttribute::initialize(...). 2785 void initialize(Attributor &A) override { 2786 if (isa<UndefValue>(getAssociatedValue())) 2787 indicatePessimisticFixpoint(); 2788 } 2789 2790 /// See AbstractAttribute::updateImpl(...). 2791 ChangeStatus updateImpl(Attributor &A) override { 2792 // TODO: Once we have call site specific value information we can provide 2793 // call site specific liveness information and then it makes 2794 // sense to specialize attributes for call sites arguments instead of 2795 // redirecting requests to the callee argument. 2796 Argument *Arg = getAssociatedArgument(); 2797 if (!Arg) 2798 return indicatePessimisticFixpoint(); 2799 const IRPosition &ArgPos = IRPosition::argument(*Arg); 2800 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos); 2801 return clampStateAndIndicateChange( 2802 getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState())); 2803 } 2804 2805 /// See AbstractAttribute::manifest(...). 2806 ChangeStatus manifest(Attributor &A) override { 2807 CallBase &CB = cast<CallBase>(getAnchorValue()); 2808 Use &U = CB.getArgOperandUse(getArgNo()); 2809 assert(!isa<UndefValue>(U.get()) && 2810 "Expected undef values to be filtered out!"); 2811 UndefValue &UV = *UndefValue::get(U->getType()); 2812 if (A.changeUseAfterManifest(U, UV)) 2813 return ChangeStatus::CHANGED; 2814 return ChangeStatus::UNCHANGED; 2815 } 2816 2817 /// See AbstractAttribute::trackStatistics() 2818 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) } 2819 }; 2820 2821 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { 2822 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) 2823 : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {} 2824 2825 /// See AAIsDead::isAssumedDead(). 2826 bool isAssumedDead() const override { 2827 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; 2828 } 2829 2830 /// See AbstractAttribute::initialize(...). 2831 void initialize(Attributor &A) override { 2832 if (isa<UndefValue>(getAssociatedValue())) { 2833 indicatePessimisticFixpoint(); 2834 return; 2835 } 2836 2837 // We track this separately as a secondary state. 2838 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); 2839 } 2840 2841 /// See AbstractAttribute::updateImpl(...). 2842 ChangeStatus updateImpl(Attributor &A) override { 2843 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2844 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { 2845 IsAssumedSideEffectFree = false; 2846 Changed = ChangeStatus::CHANGED; 2847 } 2848 2849 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 2850 return indicatePessimisticFixpoint(); 2851 return Changed; 2852 } 2853 2854 /// See AbstractAttribute::trackStatistics() 2855 void trackStatistics() const override { 2856 if (IsAssumedSideEffectFree) 2857 STATS_DECLTRACK_CSRET_ATTR(IsDead) 2858 else 2859 STATS_DECLTRACK_CSRET_ATTR(UnusedResult) 2860 } 2861 2862 /// See AbstractAttribute::getAsStr(). 2863 const std::string getAsStr() const override { 2864 return isAssumedDead() 2865 ? "assumed-dead" 2866 : (getAssumed() ? "assumed-dead-users" : "assumed-live"); 2867 } 2868 2869 private: 2870 bool IsAssumedSideEffectFree; 2871 }; 2872 2873 struct AAIsDeadReturned : public AAIsDeadValueImpl { 2874 AAIsDeadReturned(const IRPosition &IRP, Attributor &A) 2875 : AAIsDeadValueImpl(IRP, A) {} 2876 2877 /// See AbstractAttribute::updateImpl(...). 2878 ChangeStatus updateImpl(Attributor &A) override { 2879 2880 A.checkForAllInstructions([](Instruction &) { return true; }, *this, 2881 {Instruction::Ret}); 2882 2883 auto PredForCallSite = [&](AbstractCallSite ACS) { 2884 if (ACS.isCallbackCall() || !ACS.getInstruction()) 2885 return false; 2886 return areAllUsesAssumedDead(A, *ACS.getInstruction()); 2887 }; 2888 2889 bool AllCallSitesKnown; 2890 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 2891 AllCallSitesKnown)) 2892 return indicatePessimisticFixpoint(); 2893 2894 return ChangeStatus::UNCHANGED; 2895 } 2896 2897 /// See AbstractAttribute::manifest(...). 2898 ChangeStatus manifest(Attributor &A) override { 2899 // TODO: Rewrite the signature to return void? 2900 bool AnyChange = false; 2901 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); 2902 auto RetInstPred = [&](Instruction &I) { 2903 ReturnInst &RI = cast<ReturnInst>(I); 2904 if (!isa<UndefValue>(RI.getReturnValue())) 2905 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); 2906 return true; 2907 }; 2908 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}); 2909 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 2910 } 2911 2912 /// See AbstractAttribute::trackStatistics() 2913 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) } 2914 }; 2915 2916 struct AAIsDeadFunction : public AAIsDead { 2917 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 2918 2919 /// See AbstractAttribute::initialize(...). 2920 void initialize(Attributor &A) override { 2921 const Function *F = getAnchorScope(); 2922 if (F && !F->isDeclaration()) { 2923 ToBeExploredFrom.insert(&F->getEntryBlock().front()); 2924 assumeLive(A, F->getEntryBlock()); 2925 } 2926 } 2927 2928 /// See AbstractAttribute::getAsStr(). 2929 const std::string getAsStr() const override { 2930 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + 2931 std::to_string(getAnchorScope()->size()) + "][#TBEP " + 2932 std::to_string(ToBeExploredFrom.size()) + "][#KDE " + 2933 std::to_string(KnownDeadEnds.size()) + "]"; 2934 } 2935 2936 /// See AbstractAttribute::manifest(...). 2937 ChangeStatus manifest(Attributor &A) override { 2938 assert(getState().isValidState() && 2939 "Attempted to manifest an invalid state!"); 2940 2941 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 2942 Function &F = *getAnchorScope(); 2943 2944 if (AssumedLiveBlocks.empty()) { 2945 A.deleteAfterManifest(F); 2946 return ChangeStatus::CHANGED; 2947 } 2948 2949 // Flag to determine if we can change an invoke to a call assuming the 2950 // callee is nounwind. This is not possible if the personality of the 2951 // function allows to catch asynchronous exceptions. 2952 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 2953 2954 KnownDeadEnds.set_union(ToBeExploredFrom); 2955 for (const Instruction *DeadEndI : KnownDeadEnds) { 2956 auto *CB = dyn_cast<CallBase>(DeadEndI); 2957 if (!CB) 2958 continue; 2959 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 2960 *this, IRPosition::callsite_function(*CB), /* TrackDependence */ true, 2961 DepClassTy::OPTIONAL); 2962 bool MayReturn = !NoReturnAA.isAssumedNoReturn(); 2963 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) 2964 continue; 2965 2966 if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) 2967 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); 2968 else 2969 A.changeToUnreachableAfterManifest( 2970 const_cast<Instruction *>(DeadEndI->getNextNode())); 2971 HasChanged = ChangeStatus::CHANGED; 2972 } 2973 2974 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted."); 2975 for (BasicBlock &BB : F) 2976 if (!AssumedLiveBlocks.count(&BB)) { 2977 A.deleteAfterManifest(BB); 2978 ++BUILD_STAT_NAME(AAIsDead, BasicBlock); 2979 } 2980 2981 return HasChanged; 2982 } 2983 2984 /// See AbstractAttribute::updateImpl(...). 2985 ChangeStatus updateImpl(Attributor &A) override; 2986 2987 /// See AbstractAttribute::trackStatistics() 2988 void trackStatistics() const override {} 2989 2990 /// Returns true if the function is assumed dead. 2991 bool isAssumedDead() const override { return false; } 2992 2993 /// See AAIsDead::isKnownDead(). 2994 bool isKnownDead() const override { return false; } 2995 2996 /// See AAIsDead::isAssumedDead(BasicBlock *). 2997 bool isAssumedDead(const BasicBlock *BB) const override { 2998 assert(BB->getParent() == getAnchorScope() && 2999 "BB must be in the same anchor scope function."); 3000 3001 if (!getAssumed()) 3002 return false; 3003 return !AssumedLiveBlocks.count(BB); 3004 } 3005 3006 /// See AAIsDead::isKnownDead(BasicBlock *). 3007 bool isKnownDead(const BasicBlock *BB) const override { 3008 return getKnown() && isAssumedDead(BB); 3009 } 3010 3011 /// See AAIsDead::isAssumed(Instruction *I). 3012 bool isAssumedDead(const Instruction *I) const override { 3013 assert(I->getParent()->getParent() == getAnchorScope() && 3014 "Instruction must be in the same anchor scope function."); 3015 3016 if (!getAssumed()) 3017 return false; 3018 3019 // If it is not in AssumedLiveBlocks then it for sure dead. 3020 // Otherwise, it can still be after noreturn call in a live block. 3021 if (!AssumedLiveBlocks.count(I->getParent())) 3022 return true; 3023 3024 // If it is not after a liveness barrier it is live. 3025 const Instruction *PrevI = I->getPrevNode(); 3026 while (PrevI) { 3027 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) 3028 return true; 3029 PrevI = PrevI->getPrevNode(); 3030 } 3031 return false; 3032 } 3033 3034 /// See AAIsDead::isKnownDead(Instruction *I). 3035 bool isKnownDead(const Instruction *I) const override { 3036 return getKnown() && isAssumedDead(I); 3037 } 3038 3039 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A 3040 /// that internal function called from \p BB should now be looked at. 3041 bool assumeLive(Attributor &A, const BasicBlock &BB) { 3042 if (!AssumedLiveBlocks.insert(&BB).second) 3043 return false; 3044 3045 // We assume that all of BB is (probably) live now and if there are calls to 3046 // internal functions we will assume that those are now live as well. This 3047 // is a performance optimization for blocks with calls to a lot of internal 3048 // functions. It can however cause dead functions to be treated as live. 3049 for (const Instruction &I : BB) 3050 if (const auto *CB = dyn_cast<CallBase>(&I)) 3051 if (const Function *F = CB->getCalledFunction()) 3052 if (F->hasLocalLinkage()) 3053 A.markLiveInternalFunction(*F); 3054 return true; 3055 } 3056 3057 /// Collection of instructions that need to be explored again, e.g., we 3058 /// did assume they do not transfer control to (one of their) successors. 3059 SmallSetVector<const Instruction *, 8> ToBeExploredFrom; 3060 3061 /// Collection of instructions that are known to not transfer control. 3062 SmallSetVector<const Instruction *, 8> KnownDeadEnds; 3063 3064 /// Collection of all assumed live BasicBlocks. 3065 DenseSet<const BasicBlock *> AssumedLiveBlocks; 3066 }; 3067 3068 static bool 3069 identifyAliveSuccessors(Attributor &A, const CallBase &CB, 3070 AbstractAttribute &AA, 3071 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3072 const IRPosition &IPos = IRPosition::callsite_function(CB); 3073 3074 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3075 AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 3076 if (NoReturnAA.isAssumedNoReturn()) 3077 return !NoReturnAA.isKnownNoReturn(); 3078 if (CB.isTerminator()) 3079 AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); 3080 else 3081 AliveSuccessors.push_back(CB.getNextNode()); 3082 return false; 3083 } 3084 3085 static bool 3086 identifyAliveSuccessors(Attributor &A, const InvokeInst &II, 3087 AbstractAttribute &AA, 3088 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3089 bool UsedAssumedInformation = 3090 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); 3091 3092 // First, determine if we can change an invoke to a call assuming the 3093 // callee is nounwind. This is not possible if the personality of the 3094 // function allows to catch asynchronous exceptions. 3095 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { 3096 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3097 } else { 3098 const IRPosition &IPos = IRPosition::callsite_function(II); 3099 const auto &AANoUnw = A.getAndUpdateAAFor<AANoUnwind>( 3100 AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 3101 if (AANoUnw.isAssumedNoUnwind()) { 3102 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); 3103 } else { 3104 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3105 } 3106 } 3107 return UsedAssumedInformation; 3108 } 3109 3110 static bool 3111 identifyAliveSuccessors(Attributor &A, const BranchInst &BI, 3112 AbstractAttribute &AA, 3113 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3114 bool UsedAssumedInformation = false; 3115 if (BI.getNumSuccessors() == 1) { 3116 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3117 } else { 3118 Optional<ConstantInt *> CI = getAssumedConstantInt( 3119 A, *BI.getCondition(), AA, UsedAssumedInformation); 3120 if (!CI.hasValue()) { 3121 // No value yet, assume both edges are dead. 3122 } else if (CI.getValue()) { 3123 const BasicBlock *SuccBB = 3124 BI.getSuccessor(1 - CI.getValue()->getZExtValue()); 3125 AliveSuccessors.push_back(&SuccBB->front()); 3126 } else { 3127 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3128 AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); 3129 UsedAssumedInformation = false; 3130 } 3131 } 3132 return UsedAssumedInformation; 3133 } 3134 3135 static bool 3136 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, 3137 AbstractAttribute &AA, 3138 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3139 bool UsedAssumedInformation = false; 3140 Optional<ConstantInt *> CI = 3141 getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation); 3142 if (!CI.hasValue()) { 3143 // No value yet, assume all edges are dead. 3144 } else if (CI.getValue()) { 3145 for (auto &CaseIt : SI.cases()) { 3146 if (CaseIt.getCaseValue() == CI.getValue()) { 3147 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); 3148 return UsedAssumedInformation; 3149 } 3150 } 3151 AliveSuccessors.push_back(&SI.getDefaultDest()->front()); 3152 return UsedAssumedInformation; 3153 } else { 3154 for (const BasicBlock *SuccBB : successors(SI.getParent())) 3155 AliveSuccessors.push_back(&SuccBB->front()); 3156 } 3157 return UsedAssumedInformation; 3158 } 3159 3160 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { 3161 ChangeStatus Change = ChangeStatus::UNCHANGED; 3162 3163 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" 3164 << getAnchorScope()->size() << "] BBs and " 3165 << ToBeExploredFrom.size() << " exploration points and " 3166 << KnownDeadEnds.size() << " known dead ends\n"); 3167 3168 // Copy and clear the list of instructions we need to explore from. It is 3169 // refilled with instructions the next update has to look at. 3170 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), 3171 ToBeExploredFrom.end()); 3172 decltype(ToBeExploredFrom) NewToBeExploredFrom; 3173 3174 SmallVector<const Instruction *, 8> AliveSuccessors; 3175 while (!Worklist.empty()) { 3176 const Instruction *I = Worklist.pop_back_val(); 3177 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"); 3178 3179 AliveSuccessors.clear(); 3180 3181 bool UsedAssumedInformation = false; 3182 switch (I->getOpcode()) { 3183 // TODO: look for (assumed) UB to backwards propagate "deadness". 3184 default: 3185 if (I->isTerminator()) { 3186 for (const BasicBlock *SuccBB : successors(I->getParent())) 3187 AliveSuccessors.push_back(&SuccBB->front()); 3188 } else { 3189 AliveSuccessors.push_back(I->getNextNode()); 3190 } 3191 break; 3192 case Instruction::Call: 3193 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), 3194 *this, AliveSuccessors); 3195 break; 3196 case Instruction::Invoke: 3197 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), 3198 *this, AliveSuccessors); 3199 break; 3200 case Instruction::Br: 3201 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), 3202 *this, AliveSuccessors); 3203 break; 3204 case Instruction::Switch: 3205 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), 3206 *this, AliveSuccessors); 3207 break; 3208 } 3209 3210 if (UsedAssumedInformation) { 3211 NewToBeExploredFrom.insert(I); 3212 } else { 3213 Change = ChangeStatus::CHANGED; 3214 if (AliveSuccessors.empty() || 3215 (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors())) 3216 KnownDeadEnds.insert(I); 3217 } 3218 3219 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: " 3220 << AliveSuccessors.size() << " UsedAssumedInformation: " 3221 << UsedAssumedInformation << "\n"); 3222 3223 for (const Instruction *AliveSuccessor : AliveSuccessors) { 3224 if (!I->isTerminator()) { 3225 assert(AliveSuccessors.size() == 1 && 3226 "Non-terminator expected to have a single successor!"); 3227 Worklist.push_back(AliveSuccessor); 3228 } else { 3229 if (assumeLive(A, *AliveSuccessor->getParent())) 3230 Worklist.push_back(AliveSuccessor); 3231 } 3232 } 3233 } 3234 3235 ToBeExploredFrom = std::move(NewToBeExploredFrom); 3236 3237 // If we know everything is live there is no need to query for liveness. 3238 // Instead, indicating a pessimistic fixpoint will cause the state to be 3239 // "invalid" and all queries to be answered conservatively without lookups. 3240 // To be in this state we have to (1) finished the exploration and (3) not 3241 // discovered any non-trivial dead end and (2) not ruled unreachable code 3242 // dead. 3243 if (ToBeExploredFrom.empty() && 3244 getAnchorScope()->size() == AssumedLiveBlocks.size() && 3245 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { 3246 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; 3247 })) 3248 return indicatePessimisticFixpoint(); 3249 return Change; 3250 } 3251 3252 /// Liveness information for a call sites. 3253 struct AAIsDeadCallSite final : AAIsDeadFunction { 3254 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) 3255 : AAIsDeadFunction(IRP, A) {} 3256 3257 /// See AbstractAttribute::initialize(...). 3258 void initialize(Attributor &A) override { 3259 // TODO: Once we have call site specific value information we can provide 3260 // call site specific liveness information and then it makes 3261 // sense to specialize attributes for call sites instead of 3262 // redirecting requests to the callee. 3263 llvm_unreachable("Abstract attributes for liveness are not " 3264 "supported for call sites yet!"); 3265 } 3266 3267 /// See AbstractAttribute::updateImpl(...). 3268 ChangeStatus updateImpl(Attributor &A) override { 3269 return indicatePessimisticFixpoint(); 3270 } 3271 3272 /// See AbstractAttribute::trackStatistics() 3273 void trackStatistics() const override {} 3274 }; 3275 3276 /// -------------------- Dereferenceable Argument Attribute -------------------- 3277 3278 template <> 3279 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, 3280 const DerefState &R) { 3281 ChangeStatus CS0 = 3282 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); 3283 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); 3284 return CS0 | CS1; 3285 } 3286 3287 struct AADereferenceableImpl : AADereferenceable { 3288 AADereferenceableImpl(const IRPosition &IRP, Attributor &A) 3289 : AADereferenceable(IRP, A) {} 3290 using StateType = DerefState; 3291 3292 /// See AbstractAttribute::initialize(...). 3293 void initialize(Attributor &A) override { 3294 SmallVector<Attribute, 4> Attrs; 3295 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, 3296 Attrs, /* IgnoreSubsumingPositions */ false, &A); 3297 for (const Attribute &Attr : Attrs) 3298 takeKnownDerefBytesMaximum(Attr.getValueAsInt()); 3299 3300 const IRPosition &IRP = this->getIRPosition(); 3301 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, 3302 /* TrackDependence */ false); 3303 3304 bool CanBeNull; 3305 takeKnownDerefBytesMaximum( 3306 IRP.getAssociatedValue().getPointerDereferenceableBytes( 3307 A.getDataLayout(), CanBeNull)); 3308 3309 bool IsFnInterface = IRP.isFnInterfaceKind(); 3310 Function *FnScope = IRP.getAnchorScope(); 3311 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { 3312 indicatePessimisticFixpoint(); 3313 return; 3314 } 3315 3316 if (Instruction *CtxI = getCtxI()) 3317 followUsesInMBEC(*this, A, getState(), *CtxI); 3318 } 3319 3320 /// See AbstractAttribute::getState() 3321 /// { 3322 StateType &getState() override { return *this; } 3323 const StateType &getState() const override { return *this; } 3324 /// } 3325 3326 /// Helper function for collecting accessed bytes in must-be-executed-context 3327 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, 3328 DerefState &State) { 3329 const Value *UseV = U->get(); 3330 if (!UseV->getType()->isPointerTy()) 3331 return; 3332 3333 Type *PtrTy = UseV->getType(); 3334 const DataLayout &DL = A.getDataLayout(); 3335 int64_t Offset; 3336 if (const Value *Base = getBasePointerOfAccessPointerOperand( 3337 I, Offset, DL, /*AllowNonInbounds*/ true)) { 3338 if (Base == &getAssociatedValue() && 3339 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 3340 uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType()); 3341 State.addAccessedBytes(Offset, Size); 3342 } 3343 } 3344 return; 3345 } 3346 3347 /// See followUsesInMBEC 3348 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 3349 AADereferenceable::StateType &State) { 3350 bool IsNonNull = false; 3351 bool TrackUse = false; 3352 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( 3353 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); 3354 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes 3355 << " for instruction " << *I << "\n"); 3356 3357 addAccessedBytesForUse(A, U, I, State); 3358 State.takeKnownDerefBytesMaximum(DerefBytes); 3359 return TrackUse; 3360 } 3361 3362 /// See AbstractAttribute::manifest(...). 3363 ChangeStatus manifest(Attributor &A) override { 3364 ChangeStatus Change = AADereferenceable::manifest(A); 3365 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { 3366 removeAttrs({Attribute::DereferenceableOrNull}); 3367 return ChangeStatus::CHANGED; 3368 } 3369 return Change; 3370 } 3371 3372 void getDeducedAttributes(LLVMContext &Ctx, 3373 SmallVectorImpl<Attribute> &Attrs) const override { 3374 // TODO: Add *_globally support 3375 if (isAssumedNonNull()) 3376 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 3377 Ctx, getAssumedDereferenceableBytes())); 3378 else 3379 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 3380 Ctx, getAssumedDereferenceableBytes())); 3381 } 3382 3383 /// See AbstractAttribute::getAsStr(). 3384 const std::string getAsStr() const override { 3385 if (!getAssumedDereferenceableBytes()) 3386 return "unknown-dereferenceable"; 3387 return std::string("dereferenceable") + 3388 (isAssumedNonNull() ? "" : "_or_null") + 3389 (isAssumedGlobal() ? "_globally" : "") + "<" + 3390 std::to_string(getKnownDereferenceableBytes()) + "-" + 3391 std::to_string(getAssumedDereferenceableBytes()) + ">"; 3392 } 3393 }; 3394 3395 /// Dereferenceable attribute for a floating value. 3396 struct AADereferenceableFloating : AADereferenceableImpl { 3397 AADereferenceableFloating(const IRPosition &IRP, Attributor &A) 3398 : AADereferenceableImpl(IRP, A) {} 3399 3400 /// See AbstractAttribute::updateImpl(...). 3401 ChangeStatus updateImpl(Attributor &A) override { 3402 const DataLayout &DL = A.getDataLayout(); 3403 3404 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T, 3405 bool Stripped) -> bool { 3406 unsigned IdxWidth = 3407 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 3408 APInt Offset(IdxWidth, 0); 3409 const Value *Base = 3410 stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false); 3411 3412 const auto &AA = 3413 A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base)); 3414 int64_t DerefBytes = 0; 3415 if (!Stripped && this == &AA) { 3416 // Use IR information if we did not strip anything. 3417 // TODO: track globally. 3418 bool CanBeNull; 3419 DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull); 3420 T.GlobalState.indicatePessimisticFixpoint(); 3421 } else { 3422 const DerefState &DS = static_cast<const DerefState &>(AA.getState()); 3423 DerefBytes = DS.DerefBytesState.getAssumed(); 3424 T.GlobalState &= DS.GlobalState; 3425 } 3426 3427 3428 // For now we do not try to "increase" dereferenceability due to negative 3429 // indices as we first have to come up with code to deal with loops and 3430 // for overflows of the dereferenceable bytes. 3431 int64_t OffsetSExt = Offset.getSExtValue(); 3432 if (OffsetSExt < 0) 3433 OffsetSExt = 0; 3434 3435 T.takeAssumedDerefBytesMinimum( 3436 std::max(int64_t(0), DerefBytes - OffsetSExt)); 3437 3438 if (this == &AA) { 3439 if (!Stripped) { 3440 // If nothing was stripped IR information is all we got. 3441 T.takeKnownDerefBytesMaximum( 3442 std::max(int64_t(0), DerefBytes - OffsetSExt)); 3443 T.indicatePessimisticFixpoint(); 3444 } else if (OffsetSExt > 0) { 3445 // If something was stripped but there is circular reasoning we look 3446 // for the offset. If it is positive we basically decrease the 3447 // dereferenceable bytes in a circluar loop now, which will simply 3448 // drive them down to the known value in a very slow way which we 3449 // can accelerate. 3450 T.indicatePessimisticFixpoint(); 3451 } 3452 } 3453 3454 return T.isValidState(); 3455 }; 3456 3457 DerefState T; 3458 if (!genericValueTraversal<AADereferenceable, DerefState>( 3459 A, getIRPosition(), *this, T, VisitValueCB, getCtxI())) 3460 return indicatePessimisticFixpoint(); 3461 3462 return clampStateAndIndicateChange(getState(), T); 3463 } 3464 3465 /// See AbstractAttribute::trackStatistics() 3466 void trackStatistics() const override { 3467 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) 3468 } 3469 }; 3470 3471 /// Dereferenceable attribute for a return value. 3472 struct AADereferenceableReturned final 3473 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { 3474 AADereferenceableReturned(const IRPosition &IRP, Attributor &A) 3475 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( 3476 IRP, A) {} 3477 3478 /// See AbstractAttribute::trackStatistics() 3479 void trackStatistics() const override { 3480 STATS_DECLTRACK_FNRET_ATTR(dereferenceable) 3481 } 3482 }; 3483 3484 /// Dereferenceable attribute for an argument 3485 struct AADereferenceableArgument final 3486 : AAArgumentFromCallSiteArguments<AADereferenceable, 3487 AADereferenceableImpl> { 3488 using Base = 3489 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; 3490 AADereferenceableArgument(const IRPosition &IRP, Attributor &A) 3491 : Base(IRP, A) {} 3492 3493 /// See AbstractAttribute::trackStatistics() 3494 void trackStatistics() const override { 3495 STATS_DECLTRACK_ARG_ATTR(dereferenceable) 3496 } 3497 }; 3498 3499 /// Dereferenceable attribute for a call site argument. 3500 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { 3501 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) 3502 : AADereferenceableFloating(IRP, A) {} 3503 3504 /// See AbstractAttribute::trackStatistics() 3505 void trackStatistics() const override { 3506 STATS_DECLTRACK_CSARG_ATTR(dereferenceable) 3507 } 3508 }; 3509 3510 /// Dereferenceable attribute deduction for a call site return value. 3511 struct AADereferenceableCallSiteReturned final 3512 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { 3513 using Base = 3514 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; 3515 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) 3516 : Base(IRP, A) {} 3517 3518 /// See AbstractAttribute::trackStatistics() 3519 void trackStatistics() const override { 3520 STATS_DECLTRACK_CS_ATTR(dereferenceable); 3521 } 3522 }; 3523 3524 // ------------------------ Align Argument Attribute ------------------------ 3525 3526 static unsigned getKnownAlignForUse(Attributor &A, 3527 AbstractAttribute &QueryingAA, 3528 Value &AssociatedValue, const Use *U, 3529 const Instruction *I, bool &TrackUse) { 3530 // We need to follow common pointer manipulation uses to the accesses they 3531 // feed into. 3532 if (isa<CastInst>(I)) { 3533 // Follow all but ptr2int casts. 3534 TrackUse = !isa<PtrToIntInst>(I); 3535 return 0; 3536 } 3537 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 3538 if (GEP->hasAllConstantIndices()) { 3539 TrackUse = true; 3540 return 0; 3541 } 3542 } 3543 3544 MaybeAlign MA; 3545 if (const auto *CB = dyn_cast<CallBase>(I)) { 3546 if (CB->isBundleOperand(U) || CB->isCallee(U)) 3547 return 0; 3548 3549 unsigned ArgNo = CB->getArgOperandNo(U); 3550 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 3551 // As long as we only use known information there is no need to track 3552 // dependences here. 3553 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, 3554 /* TrackDependence */ false); 3555 MA = MaybeAlign(AlignAA.getKnownAlign()); 3556 } 3557 3558 const DataLayout &DL = A.getDataLayout(); 3559 const Value *UseV = U->get(); 3560 if (auto *SI = dyn_cast<StoreInst>(I)) { 3561 if (SI->getPointerOperand() == UseV) 3562 MA = SI->getAlign(); 3563 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 3564 if (LI->getPointerOperand() == UseV) 3565 MA = LI->getAlign(); 3566 } 3567 3568 if (!MA || *MA <= 1) 3569 return 0; 3570 3571 unsigned Alignment = MA->value(); 3572 int64_t Offset; 3573 3574 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { 3575 if (Base == &AssociatedValue) { 3576 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 3577 // So we can say that the maximum power of two which is a divisor of 3578 // gcd(Offset, Alignment) is an alignment. 3579 3580 uint32_t gcd = 3581 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment); 3582 Alignment = llvm::PowerOf2Floor(gcd); 3583 } 3584 } 3585 3586 return Alignment; 3587 } 3588 3589 struct AAAlignImpl : AAAlign { 3590 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} 3591 3592 /// See AbstractAttribute::initialize(...). 3593 void initialize(Attributor &A) override { 3594 SmallVector<Attribute, 4> Attrs; 3595 getAttrs({Attribute::Alignment}, Attrs); 3596 for (const Attribute &Attr : Attrs) 3597 takeKnownMaximum(Attr.getValueAsInt()); 3598 3599 Value &V = getAssociatedValue(); 3600 // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int 3601 // use of the function pointer. This was caused by D73131. We want to 3602 // avoid this for function pointers especially because we iterate 3603 // their uses and int2ptr is not handled. It is not a correctness 3604 // problem though! 3605 if (!V.getType()->getPointerElementType()->isFunctionTy()) 3606 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value()); 3607 3608 if (getIRPosition().isFnInterfaceKind() && 3609 (!getAnchorScope() || 3610 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { 3611 indicatePessimisticFixpoint(); 3612 return; 3613 } 3614 3615 if (Instruction *CtxI = getCtxI()) 3616 followUsesInMBEC(*this, A, getState(), *CtxI); 3617 } 3618 3619 /// See AbstractAttribute::manifest(...). 3620 ChangeStatus manifest(Attributor &A) override { 3621 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; 3622 3623 // Check for users that allow alignment annotations. 3624 Value &AssociatedValue = getAssociatedValue(); 3625 for (const Use &U : AssociatedValue.uses()) { 3626 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { 3627 if (SI->getPointerOperand() == &AssociatedValue) 3628 if (SI->getAlignment() < getAssumedAlign()) { 3629 STATS_DECLTRACK(AAAlign, Store, 3630 "Number of times alignment added to a store"); 3631 SI->setAlignment(Align(getAssumedAlign())); 3632 LoadStoreChanged = ChangeStatus::CHANGED; 3633 } 3634 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { 3635 if (LI->getPointerOperand() == &AssociatedValue) 3636 if (LI->getAlignment() < getAssumedAlign()) { 3637 LI->setAlignment(Align(getAssumedAlign())); 3638 STATS_DECLTRACK(AAAlign, Load, 3639 "Number of times alignment added to a load"); 3640 LoadStoreChanged = ChangeStatus::CHANGED; 3641 } 3642 } 3643 } 3644 3645 ChangeStatus Changed = AAAlign::manifest(A); 3646 3647 Align InheritAlign = 3648 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 3649 if (InheritAlign >= getAssumedAlign()) 3650 return LoadStoreChanged; 3651 return Changed | LoadStoreChanged; 3652 } 3653 3654 // TODO: Provide a helper to determine the implied ABI alignment and check in 3655 // the existing manifest method and a new one for AAAlignImpl that value 3656 // to avoid making the alignment explicit if it did not improve. 3657 3658 /// See AbstractAttribute::getDeducedAttributes 3659 virtual void 3660 getDeducedAttributes(LLVMContext &Ctx, 3661 SmallVectorImpl<Attribute> &Attrs) const override { 3662 if (getAssumedAlign() > 1) 3663 Attrs.emplace_back( 3664 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); 3665 } 3666 3667 /// See followUsesInMBEC 3668 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 3669 AAAlign::StateType &State) { 3670 bool TrackUse = false; 3671 3672 unsigned int KnownAlign = 3673 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); 3674 State.takeKnownMaximum(KnownAlign); 3675 3676 return TrackUse; 3677 } 3678 3679 /// See AbstractAttribute::getAsStr(). 3680 const std::string getAsStr() const override { 3681 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + 3682 "-" + std::to_string(getAssumedAlign()) + ">") 3683 : "unknown-align"; 3684 } 3685 }; 3686 3687 /// Align attribute for a floating value. 3688 struct AAAlignFloating : AAAlignImpl { 3689 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} 3690 3691 /// See AbstractAttribute::updateImpl(...). 3692 ChangeStatus updateImpl(Attributor &A) override { 3693 const DataLayout &DL = A.getDataLayout(); 3694 3695 auto VisitValueCB = [&](Value &V, const Instruction *, 3696 AAAlign::StateType &T, bool Stripped) -> bool { 3697 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V)); 3698 if (!Stripped && this == &AA) { 3699 // Use only IR information if we did not strip anything. 3700 Align PA = V.getPointerAlignment(DL); 3701 T.takeKnownMaximum(PA.value()); 3702 T.indicatePessimisticFixpoint(); 3703 } else { 3704 // Use abstract attribute information. 3705 const AAAlign::StateType &DS = 3706 static_cast<const AAAlign::StateType &>(AA.getState()); 3707 T ^= DS; 3708 } 3709 return T.isValidState(); 3710 }; 3711 3712 StateType T; 3713 if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T, 3714 VisitValueCB, getCtxI())) 3715 return indicatePessimisticFixpoint(); 3716 3717 // TODO: If we know we visited all incoming values, thus no are assumed 3718 // dead, we can take the known information from the state T. 3719 return clampStateAndIndicateChange(getState(), T); 3720 } 3721 3722 /// See AbstractAttribute::trackStatistics() 3723 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } 3724 }; 3725 3726 /// Align attribute for function return value. 3727 struct AAAlignReturned final 3728 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { 3729 AAAlignReturned(const IRPosition &IRP, Attributor &A) 3730 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP, A) {} 3731 3732 /// See AbstractAttribute::trackStatistics() 3733 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } 3734 }; 3735 3736 /// Align attribute for function argument. 3737 struct AAAlignArgument final 3738 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { 3739 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; 3740 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 3741 3742 /// See AbstractAttribute::manifest(...). 3743 ChangeStatus manifest(Attributor &A) override { 3744 // If the associated argument is involved in a must-tail call we give up 3745 // because we would need to keep the argument alignments of caller and 3746 // callee in-sync. Just does not seem worth the trouble right now. 3747 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) 3748 return ChangeStatus::UNCHANGED; 3749 return Base::manifest(A); 3750 } 3751 3752 /// See AbstractAttribute::trackStatistics() 3753 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } 3754 }; 3755 3756 struct AAAlignCallSiteArgument final : AAAlignFloating { 3757 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) 3758 : AAAlignFloating(IRP, A) {} 3759 3760 /// See AbstractAttribute::manifest(...). 3761 ChangeStatus manifest(Attributor &A) override { 3762 // If the associated argument is involved in a must-tail call we give up 3763 // because we would need to keep the argument alignments of caller and 3764 // callee in-sync. Just does not seem worth the trouble right now. 3765 if (Argument *Arg = getAssociatedArgument()) 3766 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) 3767 return ChangeStatus::UNCHANGED; 3768 ChangeStatus Changed = AAAlignImpl::manifest(A); 3769 Align InheritAlign = 3770 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 3771 if (InheritAlign >= getAssumedAlign()) 3772 Changed = ChangeStatus::UNCHANGED; 3773 return Changed; 3774 } 3775 3776 /// See AbstractAttribute::updateImpl(Attributor &A). 3777 ChangeStatus updateImpl(Attributor &A) override { 3778 ChangeStatus Changed = AAAlignFloating::updateImpl(A); 3779 if (Argument *Arg = getAssociatedArgument()) { 3780 // We only take known information from the argument 3781 // so we do not need to track a dependence. 3782 const auto &ArgAlignAA = A.getAAFor<AAAlign>( 3783 *this, IRPosition::argument(*Arg), /* TrackDependence */ false); 3784 takeKnownMaximum(ArgAlignAA.getKnownAlign()); 3785 } 3786 return Changed; 3787 } 3788 3789 /// See AbstractAttribute::trackStatistics() 3790 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } 3791 }; 3792 3793 /// Align attribute deduction for a call site return value. 3794 struct AAAlignCallSiteReturned final 3795 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { 3796 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; 3797 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) 3798 : Base(IRP, A) {} 3799 3800 /// See AbstractAttribute::initialize(...). 3801 void initialize(Attributor &A) override { 3802 Base::initialize(A); 3803 Function *F = getAssociatedFunction(); 3804 if (!F) 3805 indicatePessimisticFixpoint(); 3806 } 3807 3808 /// See AbstractAttribute::trackStatistics() 3809 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } 3810 }; 3811 3812 /// ------------------ Function No-Return Attribute ---------------------------- 3813 struct AANoReturnImpl : public AANoReturn { 3814 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} 3815 3816 /// See AbstractAttribute::initialize(...). 3817 void initialize(Attributor &A) override { 3818 AANoReturn::initialize(A); 3819 Function *F = getAssociatedFunction(); 3820 if (!F) 3821 indicatePessimisticFixpoint(); 3822 } 3823 3824 /// See AbstractAttribute::getAsStr(). 3825 const std::string getAsStr() const override { 3826 return getAssumed() ? "noreturn" : "may-return"; 3827 } 3828 3829 /// See AbstractAttribute::updateImpl(Attributor &A). 3830 virtual ChangeStatus updateImpl(Attributor &A) override { 3831 auto CheckForNoReturn = [](Instruction &) { return false; }; 3832 if (!A.checkForAllInstructions(CheckForNoReturn, *this, 3833 {(unsigned)Instruction::Ret})) 3834 return indicatePessimisticFixpoint(); 3835 return ChangeStatus::UNCHANGED; 3836 } 3837 }; 3838 3839 struct AANoReturnFunction final : AANoReturnImpl { 3840 AANoReturnFunction(const IRPosition &IRP, Attributor &A) 3841 : AANoReturnImpl(IRP, A) {} 3842 3843 /// See AbstractAttribute::trackStatistics() 3844 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } 3845 }; 3846 3847 /// NoReturn attribute deduction for a call sites. 3848 struct AANoReturnCallSite final : AANoReturnImpl { 3849 AANoReturnCallSite(const IRPosition &IRP, Attributor &A) 3850 : AANoReturnImpl(IRP, A) {} 3851 3852 /// See AbstractAttribute::updateImpl(...). 3853 ChangeStatus updateImpl(Attributor &A) override { 3854 // TODO: Once we have call site specific value information we can provide 3855 // call site specific liveness information and then it makes 3856 // sense to specialize attributes for call sites arguments instead of 3857 // redirecting requests to the callee argument. 3858 Function *F = getAssociatedFunction(); 3859 const IRPosition &FnPos = IRPosition::function(*F); 3860 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos); 3861 return clampStateAndIndicateChange( 3862 getState(), 3863 static_cast<const AANoReturn::StateType &>(FnAA.getState())); 3864 } 3865 3866 /// See AbstractAttribute::trackStatistics() 3867 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } 3868 }; 3869 3870 /// ----------------------- Variable Capturing --------------------------------- 3871 3872 /// A class to hold the state of for no-capture attributes. 3873 struct AANoCaptureImpl : public AANoCapture { 3874 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} 3875 3876 /// See AbstractAttribute::initialize(...). 3877 void initialize(Attributor &A) override { 3878 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { 3879 indicateOptimisticFixpoint(); 3880 return; 3881 } 3882 Function *AnchorScope = getAnchorScope(); 3883 if (isFnInterfaceKind() && 3884 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { 3885 indicatePessimisticFixpoint(); 3886 return; 3887 } 3888 3889 // You cannot "capture" null in the default address space. 3890 if (isa<ConstantPointerNull>(getAssociatedValue()) && 3891 getAssociatedValue().getType()->getPointerAddressSpace() == 0) { 3892 indicateOptimisticFixpoint(); 3893 return; 3894 } 3895 3896 const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope; 3897 3898 // Check what state the associated function can actually capture. 3899 if (F) 3900 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 3901 else 3902 indicatePessimisticFixpoint(); 3903 } 3904 3905 /// See AbstractAttribute::updateImpl(...). 3906 ChangeStatus updateImpl(Attributor &A) override; 3907 3908 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). 3909 virtual void 3910 getDeducedAttributes(LLVMContext &Ctx, 3911 SmallVectorImpl<Attribute> &Attrs) const override { 3912 if (!isAssumedNoCaptureMaybeReturned()) 3913 return; 3914 3915 if (getArgNo() >= 0) { 3916 if (isAssumedNoCapture()) 3917 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); 3918 else if (ManifestInternal) 3919 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); 3920 } 3921 } 3922 3923 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known 3924 /// depending on the ability of the function associated with \p IRP to capture 3925 /// state in memory and through "returning/throwing", respectively. 3926 static void determineFunctionCaptureCapabilities(const IRPosition &IRP, 3927 const Function &F, 3928 BitIntegerState &State) { 3929 // TODO: Once we have memory behavior attributes we should use them here. 3930 3931 // If we know we cannot communicate or write to memory, we do not care about 3932 // ptr2int anymore. 3933 if (F.onlyReadsMemory() && F.doesNotThrow() && 3934 F.getReturnType()->isVoidTy()) { 3935 State.addKnownBits(NO_CAPTURE); 3936 return; 3937 } 3938 3939 // A function cannot capture state in memory if it only reads memory, it can 3940 // however return/throw state and the state might be influenced by the 3941 // pointer value, e.g., loading from a returned pointer might reveal a bit. 3942 if (F.onlyReadsMemory()) 3943 State.addKnownBits(NOT_CAPTURED_IN_MEM); 3944 3945 // A function cannot communicate state back if it does not through 3946 // exceptions and doesn not return values. 3947 if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) 3948 State.addKnownBits(NOT_CAPTURED_IN_RET); 3949 3950 // Check existing "returned" attributes. 3951 int ArgNo = IRP.getArgNo(); 3952 if (F.doesNotThrow() && ArgNo >= 0) { 3953 for (unsigned u = 0, e = F.arg_size(); u < e; ++u) 3954 if (F.hasParamAttribute(u, Attribute::Returned)) { 3955 if (u == unsigned(ArgNo)) 3956 State.removeAssumedBits(NOT_CAPTURED_IN_RET); 3957 else if (F.onlyReadsMemory()) 3958 State.addKnownBits(NO_CAPTURE); 3959 else 3960 State.addKnownBits(NOT_CAPTURED_IN_RET); 3961 break; 3962 } 3963 } 3964 } 3965 3966 /// See AbstractState::getAsStr(). 3967 const std::string getAsStr() const override { 3968 if (isKnownNoCapture()) 3969 return "known not-captured"; 3970 if (isAssumedNoCapture()) 3971 return "assumed not-captured"; 3972 if (isKnownNoCaptureMaybeReturned()) 3973 return "known not-captured-maybe-returned"; 3974 if (isAssumedNoCaptureMaybeReturned()) 3975 return "assumed not-captured-maybe-returned"; 3976 return "assumed-captured"; 3977 } 3978 }; 3979 3980 /// Attributor-aware capture tracker. 3981 struct AACaptureUseTracker final : public CaptureTracker { 3982 3983 /// Create a capture tracker that can lookup in-flight abstract attributes 3984 /// through the Attributor \p A. 3985 /// 3986 /// If a use leads to a potential capture, \p CapturedInMemory is set and the 3987 /// search is stopped. If a use leads to a return instruction, 3988 /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed. 3989 /// If a use leads to a ptr2int which may capture the value, 3990 /// \p CapturedInInteger is set. If a use is found that is currently assumed 3991 /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies 3992 /// set. All values in \p PotentialCopies are later tracked as well. For every 3993 /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0, 3994 /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger 3995 /// conservatively set to true. 3996 AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA, 3997 const AAIsDead &IsDeadAA, AANoCapture::StateType &State, 3998 SmallVectorImpl<const Value *> &PotentialCopies, 3999 unsigned &RemainingUsesToExplore) 4000 : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State), 4001 PotentialCopies(PotentialCopies), 4002 RemainingUsesToExplore(RemainingUsesToExplore) {} 4003 4004 /// Determine if \p V maybe captured. *Also updates the state!* 4005 bool valueMayBeCaptured(const Value *V) { 4006 if (V->getType()->isPointerTy()) { 4007 PointerMayBeCaptured(V, this); 4008 } else { 4009 State.indicatePessimisticFixpoint(); 4010 } 4011 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4012 } 4013 4014 /// See CaptureTracker::tooManyUses(). 4015 void tooManyUses() override { 4016 State.removeAssumedBits(AANoCapture::NO_CAPTURE); 4017 } 4018 4019 bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override { 4020 if (CaptureTracker::isDereferenceableOrNull(O, DL)) 4021 return true; 4022 const auto &DerefAA = A.getAAFor<AADereferenceable>( 4023 NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true, 4024 DepClassTy::OPTIONAL); 4025 return DerefAA.getAssumedDereferenceableBytes(); 4026 } 4027 4028 /// See CaptureTracker::captured(...). 4029 bool captured(const Use *U) override { 4030 Instruction *UInst = cast<Instruction>(U->getUser()); 4031 LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst 4032 << "\n"); 4033 4034 // Because we may reuse the tracker multiple times we keep track of the 4035 // number of explored uses ourselves as well. 4036 if (RemainingUsesToExplore-- == 0) { 4037 LLVM_DEBUG(dbgs() << " - too many uses to explore!\n"); 4038 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4039 /* Return */ true); 4040 } 4041 4042 // Deal with ptr2int by following uses. 4043 if (isa<PtrToIntInst>(UInst)) { 4044 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n"); 4045 return valueMayBeCaptured(UInst); 4046 } 4047 4048 // Explicitly catch return instructions. 4049 if (isa<ReturnInst>(UInst)) 4050 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4051 /* Return */ true); 4052 4053 // For now we only use special logic for call sites. However, the tracker 4054 // itself knows about a lot of other non-capturing cases already. 4055 auto *CB = dyn_cast<CallBase>(UInst); 4056 if (!CB || !CB->isArgOperand(U)) 4057 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4058 /* Return */ true); 4059 4060 unsigned ArgNo = CB->getArgOperandNo(U); 4061 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); 4062 // If we have a abstract no-capture attribute for the argument we can use 4063 // it to justify a non-capture attribute here. This allows recursion! 4064 auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos); 4065 if (ArgNoCaptureAA.isAssumedNoCapture()) 4066 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4067 /* Return */ false); 4068 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 4069 addPotentialCopy(*CB); 4070 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4071 /* Return */ false); 4072 } 4073 4074 // Lastly, we could not find a reason no-capture can be assumed so we don't. 4075 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4076 /* Return */ true); 4077 } 4078 4079 /// Register \p CS as potential copy of the value we are checking. 4080 void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); } 4081 4082 /// See CaptureTracker::shouldExplore(...). 4083 bool shouldExplore(const Use *U) override { 4084 // Check liveness and ignore droppable users. 4085 return !U->getUser()->isDroppable() && 4086 !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA); 4087 } 4088 4089 /// Update the state according to \p CapturedInMem, \p CapturedInInt, and 4090 /// \p CapturedInRet, then return the appropriate value for use in the 4091 /// CaptureTracker::captured() interface. 4092 bool isCapturedIn(bool CapturedInMem, bool CapturedInInt, 4093 bool CapturedInRet) { 4094 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " 4095 << CapturedInInt << "|Ret " << CapturedInRet << "]\n"); 4096 if (CapturedInMem) 4097 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); 4098 if (CapturedInInt) 4099 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); 4100 if (CapturedInRet) 4101 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); 4102 return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4103 } 4104 4105 private: 4106 /// The attributor providing in-flight abstract attributes. 4107 Attributor &A; 4108 4109 /// The abstract attribute currently updated. 4110 AANoCapture &NoCaptureAA; 4111 4112 /// The abstract liveness state. 4113 const AAIsDead &IsDeadAA; 4114 4115 /// The state currently updated. 4116 AANoCapture::StateType &State; 4117 4118 /// Set of potential copies of the tracked value. 4119 SmallVectorImpl<const Value *> &PotentialCopies; 4120 4121 /// Global counter to limit the number of explored uses. 4122 unsigned &RemainingUsesToExplore; 4123 }; 4124 4125 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { 4126 const IRPosition &IRP = getIRPosition(); 4127 const Value *V = 4128 getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue(); 4129 if (!V) 4130 return indicatePessimisticFixpoint(); 4131 4132 const Function *F = 4133 getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 4134 assert(F && "Expected a function!"); 4135 const IRPosition &FnPos = IRPosition::function(*F); 4136 const auto &IsDeadAA = 4137 A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false); 4138 4139 AANoCapture::StateType T; 4140 4141 // Readonly means we cannot capture through memory. 4142 const auto &FnMemAA = 4143 A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false); 4144 if (FnMemAA.isAssumedReadOnly()) { 4145 T.addKnownBits(NOT_CAPTURED_IN_MEM); 4146 if (FnMemAA.isKnownReadOnly()) 4147 addKnownBits(NOT_CAPTURED_IN_MEM); 4148 else 4149 A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL); 4150 } 4151 4152 // Make sure all returned values are different than the underlying value. 4153 // TODO: we could do this in a more sophisticated way inside 4154 // AAReturnedValues, e.g., track all values that escape through returns 4155 // directly somehow. 4156 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { 4157 bool SeenConstant = false; 4158 for (auto &It : RVAA.returned_values()) { 4159 if (isa<Constant>(It.first)) { 4160 if (SeenConstant) 4161 return false; 4162 SeenConstant = true; 4163 } else if (!isa<Argument>(It.first) || 4164 It.first == getAssociatedArgument()) 4165 return false; 4166 } 4167 return true; 4168 }; 4169 4170 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( 4171 *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 4172 if (NoUnwindAA.isAssumedNoUnwind()) { 4173 bool IsVoidTy = F->getReturnType()->isVoidTy(); 4174 const AAReturnedValues *RVAA = 4175 IsVoidTy ? nullptr 4176 : &A.getAAFor<AAReturnedValues>(*this, FnPos, 4177 /* TrackDependence */ true, 4178 DepClassTy::OPTIONAL); 4179 if (IsVoidTy || CheckReturnedArgs(*RVAA)) { 4180 T.addKnownBits(NOT_CAPTURED_IN_RET); 4181 if (T.isKnown(NOT_CAPTURED_IN_MEM)) 4182 return ChangeStatus::UNCHANGED; 4183 if (NoUnwindAA.isKnownNoUnwind() && 4184 (IsVoidTy || RVAA->getState().isAtFixpoint())) { 4185 addKnownBits(NOT_CAPTURED_IN_RET); 4186 if (isKnown(NOT_CAPTURED_IN_MEM)) 4187 return indicateOptimisticFixpoint(); 4188 } 4189 } 4190 } 4191 4192 // Use the CaptureTracker interface and logic with the specialized tracker, 4193 // defined in AACaptureUseTracker, that can look at in-flight abstract 4194 // attributes and directly updates the assumed state. 4195 SmallVector<const Value *, 4> PotentialCopies; 4196 unsigned RemainingUsesToExplore = 4197 getDefaultMaxUsesToExploreForCaptureTracking(); 4198 AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies, 4199 RemainingUsesToExplore); 4200 4201 // Check all potential copies of the associated value until we can assume 4202 // none will be captured or we have to assume at least one might be. 4203 unsigned Idx = 0; 4204 PotentialCopies.push_back(V); 4205 while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size()) 4206 Tracker.valueMayBeCaptured(PotentialCopies[Idx++]); 4207 4208 AANoCapture::StateType &S = getState(); 4209 auto Assumed = S.getAssumed(); 4210 S.intersectAssumedBits(T.getAssumed()); 4211 if (!isAssumedNoCaptureMaybeReturned()) 4212 return indicatePessimisticFixpoint(); 4213 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 4214 : ChangeStatus::CHANGED; 4215 } 4216 4217 /// NoCapture attribute for function arguments. 4218 struct AANoCaptureArgument final : AANoCaptureImpl { 4219 AANoCaptureArgument(const IRPosition &IRP, Attributor &A) 4220 : AANoCaptureImpl(IRP, A) {} 4221 4222 /// See AbstractAttribute::trackStatistics() 4223 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } 4224 }; 4225 4226 /// NoCapture attribute for call site arguments. 4227 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { 4228 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) 4229 : AANoCaptureImpl(IRP, A) {} 4230 4231 /// See AbstractAttribute::initialize(...). 4232 void initialize(Attributor &A) override { 4233 if (Argument *Arg = getAssociatedArgument()) 4234 if (Arg->hasByValAttr()) 4235 indicateOptimisticFixpoint(); 4236 AANoCaptureImpl::initialize(A); 4237 } 4238 4239 /// See AbstractAttribute::updateImpl(...). 4240 ChangeStatus updateImpl(Attributor &A) override { 4241 // TODO: Once we have call site specific value information we can provide 4242 // call site specific liveness information and then it makes 4243 // sense to specialize attributes for call sites arguments instead of 4244 // redirecting requests to the callee argument. 4245 Argument *Arg = getAssociatedArgument(); 4246 if (!Arg) 4247 return indicatePessimisticFixpoint(); 4248 const IRPosition &ArgPos = IRPosition::argument(*Arg); 4249 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos); 4250 return clampStateAndIndicateChange( 4251 getState(), 4252 static_cast<const AANoCapture::StateType &>(ArgAA.getState())); 4253 } 4254 4255 /// See AbstractAttribute::trackStatistics() 4256 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)}; 4257 }; 4258 4259 /// NoCapture attribute for floating values. 4260 struct AANoCaptureFloating final : AANoCaptureImpl { 4261 AANoCaptureFloating(const IRPosition &IRP, Attributor &A) 4262 : AANoCaptureImpl(IRP, A) {} 4263 4264 /// See AbstractAttribute::trackStatistics() 4265 void trackStatistics() const override { 4266 STATS_DECLTRACK_FLOATING_ATTR(nocapture) 4267 } 4268 }; 4269 4270 /// NoCapture attribute for function return value. 4271 struct AANoCaptureReturned final : AANoCaptureImpl { 4272 AANoCaptureReturned(const IRPosition &IRP, Attributor &A) 4273 : AANoCaptureImpl(IRP, A) { 4274 llvm_unreachable("NoCapture is not applicable to function returns!"); 4275 } 4276 4277 /// See AbstractAttribute::initialize(...). 4278 void initialize(Attributor &A) override { 4279 llvm_unreachable("NoCapture is not applicable to function returns!"); 4280 } 4281 4282 /// See AbstractAttribute::updateImpl(...). 4283 ChangeStatus updateImpl(Attributor &A) override { 4284 llvm_unreachable("NoCapture is not applicable to function returns!"); 4285 } 4286 4287 /// See AbstractAttribute::trackStatistics() 4288 void trackStatistics() const override {} 4289 }; 4290 4291 /// NoCapture attribute deduction for a call site return value. 4292 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { 4293 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) 4294 : AANoCaptureImpl(IRP, A) {} 4295 4296 /// See AbstractAttribute::trackStatistics() 4297 void trackStatistics() const override { 4298 STATS_DECLTRACK_CSRET_ATTR(nocapture) 4299 } 4300 }; 4301 4302 /// ------------------ Value Simplify Attribute ---------------------------- 4303 struct AAValueSimplifyImpl : AAValueSimplify { 4304 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) 4305 : AAValueSimplify(IRP, A) {} 4306 4307 /// See AbstractAttribute::initialize(...). 4308 void initialize(Attributor &A) override { 4309 if (getAssociatedValue().getType()->isVoidTy()) 4310 indicatePessimisticFixpoint(); 4311 } 4312 4313 /// See AbstractAttribute::getAsStr(). 4314 const std::string getAsStr() const override { 4315 return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple") 4316 : "not-simple"; 4317 } 4318 4319 /// See AbstractAttribute::trackStatistics() 4320 void trackStatistics() const override {} 4321 4322 /// See AAValueSimplify::getAssumedSimplifiedValue() 4323 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 4324 if (!getAssumed()) 4325 return const_cast<Value *>(&getAssociatedValue()); 4326 return SimplifiedAssociatedValue; 4327 } 4328 4329 /// Helper function for querying AAValueSimplify and updating candicate. 4330 /// \param QueryingValue Value trying to unify with SimplifiedValue 4331 /// \param AccumulatedSimplifiedValue Current simplification result. 4332 static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, 4333 Value &QueryingValue, 4334 Optional<Value *> &AccumulatedSimplifiedValue) { 4335 // FIXME: Add a typecast support. 4336 4337 auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>( 4338 QueryingAA, IRPosition::value(QueryingValue)); 4339 4340 Optional<Value *> QueryingValueSimplified = 4341 ValueSimplifyAA.getAssumedSimplifiedValue(A); 4342 4343 if (!QueryingValueSimplified.hasValue()) 4344 return true; 4345 4346 if (!QueryingValueSimplified.getValue()) 4347 return false; 4348 4349 Value &QueryingValueSimplifiedUnwrapped = 4350 *QueryingValueSimplified.getValue(); 4351 4352 if (AccumulatedSimplifiedValue.hasValue() && 4353 !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) && 4354 !isa<UndefValue>(QueryingValueSimplifiedUnwrapped)) 4355 return AccumulatedSimplifiedValue == QueryingValueSimplified; 4356 if (AccumulatedSimplifiedValue.hasValue() && 4357 isa<UndefValue>(QueryingValueSimplifiedUnwrapped)) 4358 return true; 4359 4360 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue 4361 << " is assumed to be " 4362 << QueryingValueSimplifiedUnwrapped << "\n"); 4363 4364 AccumulatedSimplifiedValue = QueryingValueSimplified; 4365 return true; 4366 } 4367 4368 bool askSimplifiedValueForAAValueConstantRange(Attributor &A) { 4369 if (!getAssociatedValue().getType()->isIntegerTy()) 4370 return false; 4371 4372 const auto &ValueConstantRangeAA = 4373 A.getAAFor<AAValueConstantRange>(*this, getIRPosition()); 4374 4375 Optional<ConstantInt *> COpt = 4376 ValueConstantRangeAA.getAssumedConstantInt(A); 4377 if (COpt.hasValue()) { 4378 if (auto *C = COpt.getValue()) 4379 SimplifiedAssociatedValue = C; 4380 else 4381 return false; 4382 } else { 4383 SimplifiedAssociatedValue = llvm::None; 4384 } 4385 return true; 4386 } 4387 4388 /// See AbstractAttribute::manifest(...). 4389 ChangeStatus manifest(Attributor &A) override { 4390 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4391 4392 if (SimplifiedAssociatedValue.hasValue() && 4393 !SimplifiedAssociatedValue.getValue()) 4394 return Changed; 4395 4396 Value &V = getAssociatedValue(); 4397 auto *C = SimplifiedAssociatedValue.hasValue() 4398 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) 4399 : UndefValue::get(V.getType()); 4400 if (C) { 4401 // We can replace the AssociatedValue with the constant. 4402 if (!V.user_empty() && &V != C && V.getType() == C->getType()) { 4403 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C 4404 << " :: " << *this << "\n"); 4405 if (A.changeValueAfterManifest(V, *C)) 4406 Changed = ChangeStatus::CHANGED; 4407 } 4408 } 4409 4410 return Changed | AAValueSimplify::manifest(A); 4411 } 4412 4413 /// See AbstractState::indicatePessimisticFixpoint(...). 4414 ChangeStatus indicatePessimisticFixpoint() override { 4415 // NOTE: Associated value will be returned in a pessimistic fixpoint and is 4416 // regarded as known. That's why`indicateOptimisticFixpoint` is called. 4417 SimplifiedAssociatedValue = &getAssociatedValue(); 4418 indicateOptimisticFixpoint(); 4419 return ChangeStatus::CHANGED; 4420 } 4421 4422 protected: 4423 // An assumed simplified value. Initially, it is set to Optional::None, which 4424 // means that the value is not clear under current assumption. If in the 4425 // pessimistic state, getAssumedSimplifiedValue doesn't return this value but 4426 // returns orignal associated value. 4427 Optional<Value *> SimplifiedAssociatedValue; 4428 }; 4429 4430 struct AAValueSimplifyArgument final : AAValueSimplifyImpl { 4431 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) 4432 : AAValueSimplifyImpl(IRP, A) {} 4433 4434 void initialize(Attributor &A) override { 4435 AAValueSimplifyImpl::initialize(A); 4436 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) 4437 indicatePessimisticFixpoint(); 4438 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated, 4439 Attribute::StructRet, Attribute::Nest}, 4440 /* IgnoreSubsumingPositions */ true)) 4441 indicatePessimisticFixpoint(); 4442 4443 // FIXME: This is a hack to prevent us from propagating function poiner in 4444 // the new pass manager CGSCC pass as it creates call edges the 4445 // CallGraphUpdater cannot handle yet. 4446 Value &V = getAssociatedValue(); 4447 if (V.getType()->isPointerTy() && 4448 V.getType()->getPointerElementType()->isFunctionTy() && 4449 !A.isModulePass()) 4450 indicatePessimisticFixpoint(); 4451 } 4452 4453 /// See AbstractAttribute::updateImpl(...). 4454 ChangeStatus updateImpl(Attributor &A) override { 4455 // Byval is only replacable if it is readonly otherwise we would write into 4456 // the replaced value and not the copy that byval creates implicitly. 4457 Argument *Arg = getAssociatedArgument(); 4458 if (Arg->hasByValAttr()) { 4459 // TODO: We probably need to verify synchronization is not an issue, e.g., 4460 // there is no race by not copying a constant byval. 4461 const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition()); 4462 if (!MemAA.isAssumedReadOnly()) 4463 return indicatePessimisticFixpoint(); 4464 } 4465 4466 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4467 4468 auto PredForCallSite = [&](AbstractCallSite ACS) { 4469 const IRPosition &ACSArgPos = 4470 IRPosition::callsite_argument(ACS, getArgNo()); 4471 // Check if a coresponding argument was found or if it is on not 4472 // associated (which can happen for callback calls). 4473 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 4474 return false; 4475 4476 // We can only propagate thread independent values through callbacks. 4477 // This is different to direct/indirect call sites because for them we 4478 // know the thread executing the caller and callee is the same. For 4479 // callbacks this is not guaranteed, thus a thread dependent value could 4480 // be different for the caller and callee, making it invalid to propagate. 4481 Value &ArgOp = ACSArgPos.getAssociatedValue(); 4482 if (ACS.isCallbackCall()) 4483 if (auto *C = dyn_cast<Constant>(&ArgOp)) 4484 if (C->isThreadDependent()) 4485 return false; 4486 return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue); 4487 }; 4488 4489 bool AllCallSitesKnown; 4490 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 4491 AllCallSitesKnown)) 4492 if (!askSimplifiedValueForAAValueConstantRange(A)) 4493 return indicatePessimisticFixpoint(); 4494 4495 // If a candicate was found in this update, return CHANGED. 4496 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4497 ? ChangeStatus::UNCHANGED 4498 : ChangeStatus ::CHANGED; 4499 } 4500 4501 /// See AbstractAttribute::trackStatistics() 4502 void trackStatistics() const override { 4503 STATS_DECLTRACK_ARG_ATTR(value_simplify) 4504 } 4505 }; 4506 4507 struct AAValueSimplifyReturned : AAValueSimplifyImpl { 4508 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) 4509 : AAValueSimplifyImpl(IRP, A) {} 4510 4511 /// See AbstractAttribute::updateImpl(...). 4512 ChangeStatus updateImpl(Attributor &A) override { 4513 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4514 4515 auto PredForReturned = [&](Value &V) { 4516 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue); 4517 }; 4518 4519 if (!A.checkForAllReturnedValues(PredForReturned, *this)) 4520 if (!askSimplifiedValueForAAValueConstantRange(A)) 4521 return indicatePessimisticFixpoint(); 4522 4523 // If a candicate was found in this update, return CHANGED. 4524 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4525 ? ChangeStatus::UNCHANGED 4526 : ChangeStatus ::CHANGED; 4527 } 4528 4529 ChangeStatus manifest(Attributor &A) override { 4530 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4531 4532 if (SimplifiedAssociatedValue.hasValue() && 4533 !SimplifiedAssociatedValue.getValue()) 4534 return Changed; 4535 4536 Value &V = getAssociatedValue(); 4537 auto *C = SimplifiedAssociatedValue.hasValue() 4538 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) 4539 : UndefValue::get(V.getType()); 4540 if (C) { 4541 auto PredForReturned = 4542 [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 4543 // We can replace the AssociatedValue with the constant. 4544 if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V)) 4545 return true; 4546 4547 for (ReturnInst *RI : RetInsts) { 4548 if (RI->getFunction() != getAnchorScope()) 4549 continue; 4550 auto *RC = C; 4551 if (RC->getType() != RI->getReturnValue()->getType()) 4552 RC = ConstantExpr::getBitCast(RC, 4553 RI->getReturnValue()->getType()); 4554 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC 4555 << " in " << *RI << " :: " << *this << "\n"); 4556 if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC)) 4557 Changed = ChangeStatus::CHANGED; 4558 } 4559 return true; 4560 }; 4561 A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this); 4562 } 4563 4564 return Changed | AAValueSimplify::manifest(A); 4565 } 4566 4567 /// See AbstractAttribute::trackStatistics() 4568 void trackStatistics() const override { 4569 STATS_DECLTRACK_FNRET_ATTR(value_simplify) 4570 } 4571 }; 4572 4573 struct AAValueSimplifyFloating : AAValueSimplifyImpl { 4574 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) 4575 : AAValueSimplifyImpl(IRP, A) {} 4576 4577 /// See AbstractAttribute::initialize(...). 4578 void initialize(Attributor &A) override { 4579 // FIXME: This might have exposed a SCC iterator update bug in the old PM. 4580 // Needs investigation. 4581 // AAValueSimplifyImpl::initialize(A); 4582 Value &V = getAnchorValue(); 4583 4584 // TODO: add other stuffs 4585 if (isa<Constant>(V)) 4586 indicatePessimisticFixpoint(); 4587 } 4588 4589 /// See AbstractAttribute::updateImpl(...). 4590 ChangeStatus updateImpl(Attributor &A) override { 4591 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4592 4593 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &, 4594 bool Stripped) -> bool { 4595 auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V)); 4596 if (!Stripped && this == &AA) { 4597 // TODO: Look the instruction and check recursively. 4598 4599 LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V 4600 << "\n"); 4601 return false; 4602 } 4603 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue); 4604 }; 4605 4606 bool Dummy = false; 4607 if (!genericValueTraversal<AAValueSimplify, bool>( 4608 A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(), 4609 /* UseValueSimplify */ false)) 4610 if (!askSimplifiedValueForAAValueConstantRange(A)) 4611 return indicatePessimisticFixpoint(); 4612 4613 // If a candicate was found in this update, return CHANGED. 4614 4615 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4616 ? ChangeStatus::UNCHANGED 4617 : ChangeStatus ::CHANGED; 4618 } 4619 4620 /// See AbstractAttribute::trackStatistics() 4621 void trackStatistics() const override { 4622 STATS_DECLTRACK_FLOATING_ATTR(value_simplify) 4623 } 4624 }; 4625 4626 struct AAValueSimplifyFunction : AAValueSimplifyImpl { 4627 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) 4628 : AAValueSimplifyImpl(IRP, A) {} 4629 4630 /// See AbstractAttribute::initialize(...). 4631 void initialize(Attributor &A) override { 4632 SimplifiedAssociatedValue = &getAnchorValue(); 4633 indicateOptimisticFixpoint(); 4634 } 4635 /// See AbstractAttribute::initialize(...). 4636 ChangeStatus updateImpl(Attributor &A) override { 4637 llvm_unreachable( 4638 "AAValueSimplify(Function|CallSite)::updateImpl will not be called"); 4639 } 4640 /// See AbstractAttribute::trackStatistics() 4641 void trackStatistics() const override { 4642 STATS_DECLTRACK_FN_ATTR(value_simplify) 4643 } 4644 }; 4645 4646 struct AAValueSimplifyCallSite : AAValueSimplifyFunction { 4647 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) 4648 : AAValueSimplifyFunction(IRP, A) {} 4649 /// See AbstractAttribute::trackStatistics() 4650 void trackStatistics() const override { 4651 STATS_DECLTRACK_CS_ATTR(value_simplify) 4652 } 4653 }; 4654 4655 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned { 4656 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) 4657 : AAValueSimplifyReturned(IRP, A) {} 4658 4659 /// See AbstractAttribute::manifest(...). 4660 ChangeStatus manifest(Attributor &A) override { 4661 return AAValueSimplifyImpl::manifest(A); 4662 } 4663 4664 void trackStatistics() const override { 4665 STATS_DECLTRACK_CSRET_ATTR(value_simplify) 4666 } 4667 }; 4668 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { 4669 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) 4670 : AAValueSimplifyFloating(IRP, A) {} 4671 4672 void trackStatistics() const override { 4673 STATS_DECLTRACK_CSARG_ATTR(value_simplify) 4674 } 4675 }; 4676 4677 /// ----------------------- Heap-To-Stack Conversion --------------------------- 4678 struct AAHeapToStackImpl : public AAHeapToStack { 4679 AAHeapToStackImpl(const IRPosition &IRP, Attributor &A) 4680 : AAHeapToStack(IRP, A) {} 4681 4682 const std::string getAsStr() const override { 4683 return "[H2S] Mallocs: " + std::to_string(MallocCalls.size()); 4684 } 4685 4686 ChangeStatus manifest(Attributor &A) override { 4687 assert(getState().isValidState() && 4688 "Attempted to manifest an invalid state!"); 4689 4690 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 4691 Function *F = getAnchorScope(); 4692 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 4693 4694 for (Instruction *MallocCall : MallocCalls) { 4695 // This malloc cannot be replaced. 4696 if (BadMallocCalls.count(MallocCall)) 4697 continue; 4698 4699 for (Instruction *FreeCall : FreesForMalloc[MallocCall]) { 4700 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); 4701 A.deleteAfterManifest(*FreeCall); 4702 HasChanged = ChangeStatus::CHANGED; 4703 } 4704 4705 LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall 4706 << "\n"); 4707 4708 Align Alignment; 4709 Constant *Size; 4710 if (isCallocLikeFn(MallocCall, TLI)) { 4711 auto *Num = cast<ConstantInt>(MallocCall->getOperand(0)); 4712 auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1)); 4713 APInt TotalSize = SizeT->getValue() * Num->getValue(); 4714 Size = 4715 ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize); 4716 } else if (isAlignedAllocLikeFn(MallocCall, TLI)) { 4717 Size = cast<ConstantInt>(MallocCall->getOperand(1)); 4718 Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0)) 4719 ->getValue() 4720 .getZExtValue()) 4721 .valueOrOne(); 4722 } else { 4723 Size = cast<ConstantInt>(MallocCall->getOperand(0)); 4724 } 4725 4726 unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace(); 4727 Instruction *AI = 4728 new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment, 4729 "", MallocCall->getNextNode()); 4730 4731 if (AI->getType() != MallocCall->getType()) 4732 AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc", 4733 AI->getNextNode()); 4734 4735 A.changeValueAfterManifest(*MallocCall, *AI); 4736 4737 if (auto *II = dyn_cast<InvokeInst>(MallocCall)) { 4738 auto *NBB = II->getNormalDest(); 4739 BranchInst::Create(NBB, MallocCall->getParent()); 4740 A.deleteAfterManifest(*MallocCall); 4741 } else { 4742 A.deleteAfterManifest(*MallocCall); 4743 } 4744 4745 // Zero out the allocated memory if it was a calloc. 4746 if (isCallocLikeFn(MallocCall, TLI)) { 4747 auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc", 4748 AI->getNextNode()); 4749 Value *Ops[] = { 4750 BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size, 4751 ConstantInt::get(Type::getInt1Ty(F->getContext()), false)}; 4752 4753 Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()}; 4754 Module *M = F->getParent(); 4755 Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys); 4756 CallInst::Create(Fn, Ops, "", BI->getNextNode()); 4757 } 4758 HasChanged = ChangeStatus::CHANGED; 4759 } 4760 4761 return HasChanged; 4762 } 4763 4764 /// Collection of all malloc calls in a function. 4765 SmallSetVector<Instruction *, 4> MallocCalls; 4766 4767 /// Collection of malloc calls that cannot be converted. 4768 DenseSet<const Instruction *> BadMallocCalls; 4769 4770 /// A map for each malloc call to the set of associated free calls. 4771 DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc; 4772 4773 ChangeStatus updateImpl(Attributor &A) override; 4774 }; 4775 4776 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) { 4777 const Function *F = getAnchorScope(); 4778 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 4779 4780 MustBeExecutedContextExplorer &Explorer = 4781 A.getInfoCache().getMustBeExecutedContextExplorer(); 4782 4783 auto FreeCheck = [&](Instruction &I) { 4784 const auto &Frees = FreesForMalloc.lookup(&I); 4785 if (Frees.size() != 1) 4786 return false; 4787 Instruction *UniqueFree = *Frees.begin(); 4788 return Explorer.findInContextOf(UniqueFree, I.getNextNode()); 4789 }; 4790 4791 auto UsesCheck = [&](Instruction &I) { 4792 bool ValidUsesOnly = true; 4793 bool MustUse = true; 4794 auto Pred = [&](const Use &U, bool &Follow) -> bool { 4795 Instruction *UserI = cast<Instruction>(U.getUser()); 4796 if (isa<LoadInst>(UserI)) 4797 return true; 4798 if (auto *SI = dyn_cast<StoreInst>(UserI)) { 4799 if (SI->getValueOperand() == U.get()) { 4800 LLVM_DEBUG(dbgs() 4801 << "[H2S] escaping store to memory: " << *UserI << "\n"); 4802 ValidUsesOnly = false; 4803 } else { 4804 // A store into the malloc'ed memory is fine. 4805 } 4806 return true; 4807 } 4808 if (auto *CB = dyn_cast<CallBase>(UserI)) { 4809 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) 4810 return true; 4811 // Record malloc. 4812 if (isFreeCall(UserI, TLI)) { 4813 if (MustUse) { 4814 FreesForMalloc[&I].insert(UserI); 4815 } else { 4816 LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: " 4817 << *UserI << "\n"); 4818 ValidUsesOnly = false; 4819 } 4820 return true; 4821 } 4822 4823 unsigned ArgNo = CB->getArgOperandNo(&U); 4824 4825 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 4826 *this, IRPosition::callsite_argument(*CB, ArgNo)); 4827 4828 // If a callsite argument use is nofree, we are fine. 4829 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( 4830 *this, IRPosition::callsite_argument(*CB, ArgNo)); 4831 4832 if (!NoCaptureAA.isAssumedNoCapture() || 4833 !ArgNoFreeAA.isAssumedNoFree()) { 4834 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n"); 4835 ValidUsesOnly = false; 4836 } 4837 return true; 4838 } 4839 4840 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 4841 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 4842 MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI)); 4843 Follow = true; 4844 return true; 4845 } 4846 // Unknown user for which we can not track uses further (in a way that 4847 // makes sense). 4848 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n"); 4849 ValidUsesOnly = false; 4850 return true; 4851 }; 4852 A.checkForAllUses(Pred, *this, I); 4853 return ValidUsesOnly; 4854 }; 4855 4856 auto MallocCallocCheck = [&](Instruction &I) { 4857 if (BadMallocCalls.count(&I)) 4858 return true; 4859 4860 bool IsMalloc = isMallocLikeFn(&I, TLI); 4861 bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI); 4862 bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI); 4863 if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) { 4864 BadMallocCalls.insert(&I); 4865 return true; 4866 } 4867 4868 if (IsMalloc) { 4869 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0))) 4870 if (Size->getValue().ule(MaxHeapToStackSize)) 4871 if (UsesCheck(I) || FreeCheck(I)) { 4872 MallocCalls.insert(&I); 4873 return true; 4874 } 4875 } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) { 4876 // Only if the alignment and sizes are constant. 4877 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1))) 4878 if (Size->getValue().ule(MaxHeapToStackSize)) 4879 if (UsesCheck(I) || FreeCheck(I)) { 4880 MallocCalls.insert(&I); 4881 return true; 4882 } 4883 } else if (IsCalloc) { 4884 bool Overflow = false; 4885 if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0))) 4886 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1))) 4887 if ((Size->getValue().umul_ov(Num->getValue(), Overflow)) 4888 .ule(MaxHeapToStackSize)) 4889 if (!Overflow && (UsesCheck(I) || FreeCheck(I))) { 4890 MallocCalls.insert(&I); 4891 return true; 4892 } 4893 } 4894 4895 BadMallocCalls.insert(&I); 4896 return true; 4897 }; 4898 4899 size_t NumBadMallocs = BadMallocCalls.size(); 4900 4901 A.checkForAllCallLikeInstructions(MallocCallocCheck, *this); 4902 4903 if (NumBadMallocs != BadMallocCalls.size()) 4904 return ChangeStatus::CHANGED; 4905 4906 return ChangeStatus::UNCHANGED; 4907 } 4908 4909 struct AAHeapToStackFunction final : public AAHeapToStackImpl { 4910 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) 4911 : AAHeapToStackImpl(IRP, A) {} 4912 4913 /// See AbstractAttribute::trackStatistics(). 4914 void trackStatistics() const override { 4915 STATS_DECL( 4916 MallocCalls, Function, 4917 "Number of malloc/calloc/aligned_alloc calls converted to allocas"); 4918 for (auto *C : MallocCalls) 4919 if (!BadMallocCalls.count(C)) 4920 ++BUILD_STAT_NAME(MallocCalls, Function); 4921 } 4922 }; 4923 4924 /// ----------------------- Privatizable Pointers ------------------------------ 4925 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { 4926 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) 4927 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} 4928 4929 ChangeStatus indicatePessimisticFixpoint() override { 4930 AAPrivatizablePtr::indicatePessimisticFixpoint(); 4931 PrivatizableType = nullptr; 4932 return ChangeStatus::CHANGED; 4933 } 4934 4935 /// Identify the type we can chose for a private copy of the underlying 4936 /// argument. None means it is not clear yet, nullptr means there is none. 4937 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; 4938 4939 /// Return a privatizable type that encloses both T0 and T1. 4940 /// TODO: This is merely a stub for now as we should manage a mapping as well. 4941 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { 4942 if (!T0.hasValue()) 4943 return T1; 4944 if (!T1.hasValue()) 4945 return T0; 4946 if (T0 == T1) 4947 return T0; 4948 return nullptr; 4949 } 4950 4951 Optional<Type *> getPrivatizableType() const override { 4952 return PrivatizableType; 4953 } 4954 4955 const std::string getAsStr() const override { 4956 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; 4957 } 4958 4959 protected: 4960 Optional<Type *> PrivatizableType; 4961 }; 4962 4963 // TODO: Do this for call site arguments (probably also other values) as well. 4964 4965 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { 4966 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) 4967 : AAPrivatizablePtrImpl(IRP, A) {} 4968 4969 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 4970 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 4971 // If this is a byval argument and we know all the call sites (so we can 4972 // rewrite them), there is no need to check them explicitly. 4973 bool AllCallSitesKnown; 4974 if (getIRPosition().hasAttr(Attribute::ByVal) && 4975 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, 4976 true, AllCallSitesKnown)) 4977 return getAssociatedValue().getType()->getPointerElementType(); 4978 4979 Optional<Type *> Ty; 4980 unsigned ArgNo = getIRPosition().getArgNo(); 4981 4982 // Make sure the associated call site argument has the same type at all call 4983 // sites and it is an allocation we know is safe to privatize, for now that 4984 // means we only allow alloca instructions. 4985 // TODO: We can additionally analyze the accesses in the callee to create 4986 // the type from that information instead. That is a little more 4987 // involved and will be done in a follow up patch. 4988 auto CallSiteCheck = [&](AbstractCallSite ACS) { 4989 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 4990 // Check if a coresponding argument was found or if it is one not 4991 // associated (which can happen for callback calls). 4992 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 4993 return false; 4994 4995 // Check that all call sites agree on a type. 4996 auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos); 4997 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); 4998 4999 LLVM_DEBUG({ 5000 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; 5001 if (CSTy.hasValue() && CSTy.getValue()) 5002 CSTy.getValue()->print(dbgs()); 5003 else if (CSTy.hasValue()) 5004 dbgs() << "<nullptr>"; 5005 else 5006 dbgs() << "<none>"; 5007 }); 5008 5009 Ty = combineTypes(Ty, CSTy); 5010 5011 LLVM_DEBUG({ 5012 dbgs() << " : New Type: "; 5013 if (Ty.hasValue() && Ty.getValue()) 5014 Ty.getValue()->print(dbgs()); 5015 else if (Ty.hasValue()) 5016 dbgs() << "<nullptr>"; 5017 else 5018 dbgs() << "<none>"; 5019 dbgs() << "\n"; 5020 }); 5021 5022 return !Ty.hasValue() || Ty.getValue(); 5023 }; 5024 5025 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown)) 5026 return nullptr; 5027 return Ty; 5028 } 5029 5030 /// See AbstractAttribute::updateImpl(...). 5031 ChangeStatus updateImpl(Attributor &A) override { 5032 PrivatizableType = identifyPrivatizableType(A); 5033 if (!PrivatizableType.hasValue()) 5034 return ChangeStatus::UNCHANGED; 5035 if (!PrivatizableType.getValue()) 5036 return indicatePessimisticFixpoint(); 5037 5038 // The dependence is optional so we don't give up once we give up on the 5039 // alignment. 5040 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), 5041 /* TrackDependence */ true, DepClassTy::OPTIONAL); 5042 5043 // Avoid arguments with padding for now. 5044 if (!getIRPosition().hasAttr(Attribute::ByVal) && 5045 !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(), 5046 A.getInfoCache().getDL())) { 5047 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n"); 5048 return indicatePessimisticFixpoint(); 5049 } 5050 5051 // Verify callee and caller agree on how the promoted argument would be 5052 // passed. 5053 // TODO: The use of the ArgumentPromotion interface here is ugly, we need a 5054 // specialized form of TargetTransformInfo::areFunctionArgsABICompatible 5055 // which doesn't require the arguments ArgumentPromotion wanted to pass. 5056 Function &Fn = *getIRPosition().getAnchorScope(); 5057 SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy; 5058 ArgsToPromote.insert(getAssociatedArgument()); 5059 const auto *TTI = 5060 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); 5061 if (!TTI || 5062 !ArgumentPromotionPass::areFunctionArgsABICompatible( 5063 Fn, *TTI, ArgsToPromote, Dummy) || 5064 ArgsToPromote.empty()) { 5065 LLVM_DEBUG( 5066 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " 5067 << Fn.getName() << "\n"); 5068 return indicatePessimisticFixpoint(); 5069 } 5070 5071 // Collect the types that will replace the privatizable type in the function 5072 // signature. 5073 SmallVector<Type *, 16> ReplacementTypes; 5074 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 5075 5076 // Register a rewrite of the argument. 5077 Argument *Arg = getAssociatedArgument(); 5078 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { 5079 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"); 5080 return indicatePessimisticFixpoint(); 5081 } 5082 5083 unsigned ArgNo = Arg->getArgNo(); 5084 5085 // Helper to check if for the given call site the associated argument is 5086 // passed to a callback where the privatization would be different. 5087 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { 5088 SmallVector<const Use *, 4> CallbackUses; 5089 AbstractCallSite::getCallbackUses(CB, CallbackUses); 5090 for (const Use *U : CallbackUses) { 5091 AbstractCallSite CBACS(U); 5092 assert(CBACS && CBACS.isCallbackCall()); 5093 for (Argument &CBArg : CBACS.getCalledFunction()->args()) { 5094 int CBArgNo = CBACS.getCallArgOperandNo(CBArg); 5095 5096 LLVM_DEBUG({ 5097 dbgs() 5098 << "[AAPrivatizablePtr] Argument " << *Arg 5099 << "check if can be privatized in the context of its parent (" 5100 << Arg->getParent()->getName() 5101 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5102 "callback (" 5103 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 5104 << ")\n[AAPrivatizablePtr] " << CBArg << " : " 5105 << CBACS.getCallArgOperand(CBArg) << " vs " 5106 << CB.getArgOperand(ArgNo) << "\n" 5107 << "[AAPrivatizablePtr] " << CBArg << " : " 5108 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; 5109 }); 5110 5111 if (CBArgNo != int(ArgNo)) 5112 continue; 5113 const auto &CBArgPrivAA = 5114 A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg)); 5115 if (CBArgPrivAA.isValidState()) { 5116 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); 5117 if (!CBArgPrivTy.hasValue()) 5118 continue; 5119 if (CBArgPrivTy.getValue() == PrivatizableType) 5120 continue; 5121 } 5122 5123 LLVM_DEBUG({ 5124 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5125 << " cannot be privatized in the context of its parent (" 5126 << Arg->getParent()->getName() 5127 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5128 "callback (" 5129 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 5130 << ").\n[AAPrivatizablePtr] for which the argument " 5131 "privatization is not compatible.\n"; 5132 }); 5133 return false; 5134 } 5135 } 5136 return true; 5137 }; 5138 5139 // Helper to check if for the given call site the associated argument is 5140 // passed to a direct call where the privatization would be different. 5141 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { 5142 CallBase *DC = cast<CallBase>(ACS.getInstruction()); 5143 int DCArgNo = ACS.getCallArgOperandNo(ArgNo); 5144 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() && 5145 "Expected a direct call operand for callback call operand"); 5146 5147 LLVM_DEBUG({ 5148 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5149 << " check if be privatized in the context of its parent (" 5150 << Arg->getParent()->getName() 5151 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5152 "direct call of (" 5153 << DCArgNo << "@" << DC->getCalledFunction()->getName() 5154 << ").\n"; 5155 }); 5156 5157 Function *DCCallee = DC->getCalledFunction(); 5158 if (unsigned(DCArgNo) < DCCallee->arg_size()) { 5159 const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 5160 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo))); 5161 if (DCArgPrivAA.isValidState()) { 5162 auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); 5163 if (!DCArgPrivTy.hasValue()) 5164 return true; 5165 if (DCArgPrivTy.getValue() == PrivatizableType) 5166 return true; 5167 } 5168 } 5169 5170 LLVM_DEBUG({ 5171 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5172 << " cannot be privatized in the context of its parent (" 5173 << Arg->getParent()->getName() 5174 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5175 "direct call of (" 5176 << ACS.getInstruction()->getCalledFunction()->getName() 5177 << ").\n[AAPrivatizablePtr] for which the argument " 5178 "privatization is not compatible.\n"; 5179 }); 5180 return false; 5181 }; 5182 5183 // Helper to check if the associated argument is used at the given abstract 5184 // call site in a way that is incompatible with the privatization assumed 5185 // here. 5186 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { 5187 if (ACS.isDirectCall()) 5188 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); 5189 if (ACS.isCallbackCall()) 5190 return IsCompatiblePrivArgOfDirectCS(ACS); 5191 return false; 5192 }; 5193 5194 bool AllCallSitesKnown; 5195 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true, 5196 AllCallSitesKnown)) 5197 return indicatePessimisticFixpoint(); 5198 5199 return ChangeStatus::UNCHANGED; 5200 } 5201 5202 /// Given a type to private \p PrivType, collect the constituates (which are 5203 /// used) in \p ReplacementTypes. 5204 static void 5205 identifyReplacementTypes(Type *PrivType, 5206 SmallVectorImpl<Type *> &ReplacementTypes) { 5207 // TODO: For now we expand the privatization type to the fullest which can 5208 // lead to dead arguments that need to be removed later. 5209 assert(PrivType && "Expected privatizable type!"); 5210 5211 // Traverse the type, extract constituate types on the outermost level. 5212 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5213 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) 5214 ReplacementTypes.push_back(PrivStructType->getElementType(u)); 5215 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5216 ReplacementTypes.append(PrivArrayType->getNumElements(), 5217 PrivArrayType->getElementType()); 5218 } else { 5219 ReplacementTypes.push_back(PrivType); 5220 } 5221 } 5222 5223 /// Initialize \p Base according to the type \p PrivType at position \p IP. 5224 /// The values needed are taken from the arguments of \p F starting at 5225 /// position \p ArgNo. 5226 static void createInitialization(Type *PrivType, Value &Base, Function &F, 5227 unsigned ArgNo, Instruction &IP) { 5228 assert(PrivType && "Expected privatizable type!"); 5229 5230 IRBuilder<NoFolder> IRB(&IP); 5231 const DataLayout &DL = F.getParent()->getDataLayout(); 5232 5233 // Traverse the type, build GEPs and stores. 5234 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5235 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 5236 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 5237 Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo(); 5238 Value *Ptr = constructPointer( 5239 PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL); 5240 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 5241 } 5242 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5243 Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo(); 5244 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy); 5245 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 5246 Value *Ptr = 5247 constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL); 5248 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 5249 } 5250 } else { 5251 new StoreInst(F.getArg(ArgNo), &Base, &IP); 5252 } 5253 } 5254 5255 /// Extract values from \p Base according to the type \p PrivType at the 5256 /// call position \p ACS. The values are appended to \p ReplacementValues. 5257 void createReplacementValues(Align Alignment, Type *PrivType, 5258 AbstractCallSite ACS, Value *Base, 5259 SmallVectorImpl<Value *> &ReplacementValues) { 5260 assert(Base && "Expected base value!"); 5261 assert(PrivType && "Expected privatizable type!"); 5262 Instruction *IP = ACS.getInstruction(); 5263 5264 IRBuilder<NoFolder> IRB(IP); 5265 const DataLayout &DL = IP->getModule()->getDataLayout(); 5266 5267 if (Base->getType()->getPointerElementType() != PrivType) 5268 Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(), 5269 "", ACS.getInstruction()); 5270 5271 // Traverse the type, build GEPs and loads. 5272 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5273 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 5274 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 5275 Type *PointeeTy = PrivStructType->getElementType(u); 5276 Value *Ptr = 5277 constructPointer(PointeeTy->getPointerTo(), Base, 5278 PrivStructLayout->getElementOffset(u), IRB, DL); 5279 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 5280 L->setAlignment(Alignment); 5281 ReplacementValues.push_back(L); 5282 } 5283 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5284 Type *PointeeTy = PrivArrayType->getElementType(); 5285 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 5286 Type *PointeePtrTy = PointeeTy->getPointerTo(); 5287 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 5288 Value *Ptr = 5289 constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL); 5290 LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP); 5291 L->setAlignment(Alignment); 5292 ReplacementValues.push_back(L); 5293 } 5294 } else { 5295 LoadInst *L = new LoadInst(PrivType, Base, "", IP); 5296 L->setAlignment(Alignment); 5297 ReplacementValues.push_back(L); 5298 } 5299 } 5300 5301 /// See AbstractAttribute::manifest(...) 5302 ChangeStatus manifest(Attributor &A) override { 5303 if (!PrivatizableType.hasValue()) 5304 return ChangeStatus::UNCHANGED; 5305 assert(PrivatizableType.getValue() && "Expected privatizable type!"); 5306 5307 // Collect all tail calls in the function as we cannot allow new allocas to 5308 // escape into tail recursion. 5309 // TODO: Be smarter about new allocas escaping into tail calls. 5310 SmallVector<CallInst *, 16> TailCalls; 5311 if (!A.checkForAllInstructions( 5312 [&](Instruction &I) { 5313 CallInst &CI = cast<CallInst>(I); 5314 if (CI.isTailCall()) 5315 TailCalls.push_back(&CI); 5316 return true; 5317 }, 5318 *this, {Instruction::Call})) 5319 return ChangeStatus::UNCHANGED; 5320 5321 Argument *Arg = getAssociatedArgument(); 5322 // Query AAAlign attribute for alignment of associated argument to 5323 // determine the best alignment of loads. 5324 const auto &AlignAA = A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg)); 5325 5326 // Callback to repair the associated function. A new alloca is placed at the 5327 // beginning and initialized with the values passed through arguments. The 5328 // new alloca replaces the use of the old pointer argument. 5329 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = 5330 [=](const Attributor::ArgumentReplacementInfo &ARI, 5331 Function &ReplacementFn, Function::arg_iterator ArgIt) { 5332 BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); 5333 Instruction *IP = &*EntryBB.getFirstInsertionPt(); 5334 auto *AI = new AllocaInst(PrivatizableType.getValue(), 0, 5335 Arg->getName() + ".priv", IP); 5336 createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn, 5337 ArgIt->getArgNo(), *IP); 5338 Arg->replaceAllUsesWith(AI); 5339 5340 for (CallInst *CI : TailCalls) 5341 CI->setTailCall(false); 5342 }; 5343 5344 // Callback to repair a call site of the associated function. The elements 5345 // of the privatizable type are loaded prior to the call and passed to the 5346 // new function version. 5347 Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = 5348 [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI, 5349 AbstractCallSite ACS, 5350 SmallVectorImpl<Value *> &NewArgOperands) { 5351 // When no alignment is specified for the load instruction, 5352 // natural alignment is assumed. 5353 createReplacementValues( 5354 assumeAligned(AlignAA.getAssumedAlign()), 5355 PrivatizableType.getValue(), ACS, 5356 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()), 5357 NewArgOperands); 5358 }; 5359 5360 // Collect the types that will replace the privatizable type in the function 5361 // signature. 5362 SmallVector<Type *, 16> ReplacementTypes; 5363 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 5364 5365 // Register a rewrite of the argument. 5366 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes, 5367 std::move(FnRepairCB), 5368 std::move(ACSRepairCB))) 5369 return ChangeStatus::CHANGED; 5370 return ChangeStatus::UNCHANGED; 5371 } 5372 5373 /// See AbstractAttribute::trackStatistics() 5374 void trackStatistics() const override { 5375 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr); 5376 } 5377 }; 5378 5379 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { 5380 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) 5381 : AAPrivatizablePtrImpl(IRP, A) {} 5382 5383 /// See AbstractAttribute::initialize(...). 5384 virtual void initialize(Attributor &A) override { 5385 // TODO: We can privatize more than arguments. 5386 indicatePessimisticFixpoint(); 5387 } 5388 5389 ChangeStatus updateImpl(Attributor &A) override { 5390 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" 5391 "updateImpl will not be called"); 5392 } 5393 5394 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 5395 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 5396 Value *Obj = 5397 GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL()); 5398 if (!Obj) { 5399 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n"); 5400 return nullptr; 5401 } 5402 5403 if (auto *AI = dyn_cast<AllocaInst>(Obj)) 5404 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 5405 if (CI->isOne()) 5406 return Obj->getType()->getPointerElementType(); 5407 if (auto *Arg = dyn_cast<Argument>(Obj)) { 5408 auto &PrivArgAA = 5409 A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg)); 5410 if (PrivArgAA.isAssumedPrivatizablePtr()) 5411 return Obj->getType()->getPointerElementType(); 5412 } 5413 5414 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " 5415 "alloca nor privatizable argument: " 5416 << *Obj << "!\n"); 5417 return nullptr; 5418 } 5419 5420 /// See AbstractAttribute::trackStatistics() 5421 void trackStatistics() const override { 5422 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr); 5423 } 5424 }; 5425 5426 struct AAPrivatizablePtrCallSiteArgument final 5427 : public AAPrivatizablePtrFloating { 5428 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) 5429 : AAPrivatizablePtrFloating(IRP, A) {} 5430 5431 /// See AbstractAttribute::initialize(...). 5432 void initialize(Attributor &A) override { 5433 if (getIRPosition().hasAttr(Attribute::ByVal)) 5434 indicateOptimisticFixpoint(); 5435 } 5436 5437 /// See AbstractAttribute::updateImpl(...). 5438 ChangeStatus updateImpl(Attributor &A) override { 5439 PrivatizableType = identifyPrivatizableType(A); 5440 if (!PrivatizableType.hasValue()) 5441 return ChangeStatus::UNCHANGED; 5442 if (!PrivatizableType.getValue()) 5443 return indicatePessimisticFixpoint(); 5444 5445 const IRPosition &IRP = getIRPosition(); 5446 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP); 5447 if (!NoCaptureAA.isAssumedNoCapture()) { 5448 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n"); 5449 return indicatePessimisticFixpoint(); 5450 } 5451 5452 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP); 5453 if (!NoAliasAA.isAssumedNoAlias()) { 5454 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n"); 5455 return indicatePessimisticFixpoint(); 5456 } 5457 5458 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP); 5459 if (!MemBehaviorAA.isAssumedReadOnly()) { 5460 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n"); 5461 return indicatePessimisticFixpoint(); 5462 } 5463 5464 return ChangeStatus::UNCHANGED; 5465 } 5466 5467 /// See AbstractAttribute::trackStatistics() 5468 void trackStatistics() const override { 5469 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr); 5470 } 5471 }; 5472 5473 struct AAPrivatizablePtrCallSiteReturned final 5474 : public AAPrivatizablePtrFloating { 5475 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) 5476 : AAPrivatizablePtrFloating(IRP, A) {} 5477 5478 /// See AbstractAttribute::initialize(...). 5479 void initialize(Attributor &A) override { 5480 // TODO: We can privatize more than arguments. 5481 indicatePessimisticFixpoint(); 5482 } 5483 5484 /// See AbstractAttribute::trackStatistics() 5485 void trackStatistics() const override { 5486 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr); 5487 } 5488 }; 5489 5490 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { 5491 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) 5492 : AAPrivatizablePtrFloating(IRP, A) {} 5493 5494 /// See AbstractAttribute::initialize(...). 5495 void initialize(Attributor &A) override { 5496 // TODO: We can privatize more than arguments. 5497 indicatePessimisticFixpoint(); 5498 } 5499 5500 /// See AbstractAttribute::trackStatistics() 5501 void trackStatistics() const override { 5502 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr); 5503 } 5504 }; 5505 5506 /// -------------------- Memory Behavior Attributes ---------------------------- 5507 /// Includes read-none, read-only, and write-only. 5508 /// ---------------------------------------------------------------------------- 5509 struct AAMemoryBehaviorImpl : public AAMemoryBehavior { 5510 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) 5511 : AAMemoryBehavior(IRP, A) {} 5512 5513 /// See AbstractAttribute::initialize(...). 5514 void initialize(Attributor &A) override { 5515 intersectAssumedBits(BEST_STATE); 5516 getKnownStateFromValue(getIRPosition(), getState()); 5517 IRAttribute::initialize(A); 5518 } 5519 5520 /// Return the memory behavior information encoded in the IR for \p IRP. 5521 static void getKnownStateFromValue(const IRPosition &IRP, 5522 BitIntegerState &State, 5523 bool IgnoreSubsumingPositions = false) { 5524 SmallVector<Attribute, 2> Attrs; 5525 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 5526 for (const Attribute &Attr : Attrs) { 5527 switch (Attr.getKindAsEnum()) { 5528 case Attribute::ReadNone: 5529 State.addKnownBits(NO_ACCESSES); 5530 break; 5531 case Attribute::ReadOnly: 5532 State.addKnownBits(NO_WRITES); 5533 break; 5534 case Attribute::WriteOnly: 5535 State.addKnownBits(NO_READS); 5536 break; 5537 default: 5538 llvm_unreachable("Unexpected attribute!"); 5539 } 5540 } 5541 5542 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) { 5543 if (!I->mayReadFromMemory()) 5544 State.addKnownBits(NO_READS); 5545 if (!I->mayWriteToMemory()) 5546 State.addKnownBits(NO_WRITES); 5547 } 5548 } 5549 5550 /// See AbstractAttribute::getDeducedAttributes(...). 5551 void getDeducedAttributes(LLVMContext &Ctx, 5552 SmallVectorImpl<Attribute> &Attrs) const override { 5553 assert(Attrs.size() == 0); 5554 if (isAssumedReadNone()) 5555 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 5556 else if (isAssumedReadOnly()) 5557 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly)); 5558 else if (isAssumedWriteOnly()) 5559 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly)); 5560 assert(Attrs.size() <= 1); 5561 } 5562 5563 /// See AbstractAttribute::manifest(...). 5564 ChangeStatus manifest(Attributor &A) override { 5565 if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true)) 5566 return ChangeStatus::UNCHANGED; 5567 5568 const IRPosition &IRP = getIRPosition(); 5569 5570 // Check if we would improve the existing attributes first. 5571 SmallVector<Attribute, 4> DeducedAttrs; 5572 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 5573 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 5574 return IRP.hasAttr(Attr.getKindAsEnum(), 5575 /* IgnoreSubsumingPositions */ true); 5576 })) 5577 return ChangeStatus::UNCHANGED; 5578 5579 // Clear existing attributes. 5580 IRP.removeAttrs(AttrKinds); 5581 5582 // Use the generic manifest method. 5583 return IRAttribute::manifest(A); 5584 } 5585 5586 /// See AbstractState::getAsStr(). 5587 const std::string getAsStr() const override { 5588 if (isAssumedReadNone()) 5589 return "readnone"; 5590 if (isAssumedReadOnly()) 5591 return "readonly"; 5592 if (isAssumedWriteOnly()) 5593 return "writeonly"; 5594 return "may-read/write"; 5595 } 5596 5597 /// The set of IR attributes AAMemoryBehavior deals with. 5598 static const Attribute::AttrKind AttrKinds[3]; 5599 }; 5600 5601 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { 5602 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; 5603 5604 /// Memory behavior attribute for a floating value. 5605 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { 5606 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) 5607 : AAMemoryBehaviorImpl(IRP, A) {} 5608 5609 /// See AbstractAttribute::initialize(...). 5610 void initialize(Attributor &A) override { 5611 AAMemoryBehaviorImpl::initialize(A); 5612 // Initialize the use vector with all direct uses of the associated value. 5613 for (const Use &U : getAssociatedValue().uses()) 5614 Uses.insert(&U); 5615 } 5616 5617 /// See AbstractAttribute::updateImpl(...). 5618 ChangeStatus updateImpl(Attributor &A) override; 5619 5620 /// See AbstractAttribute::trackStatistics() 5621 void trackStatistics() const override { 5622 if (isAssumedReadNone()) 5623 STATS_DECLTRACK_FLOATING_ATTR(readnone) 5624 else if (isAssumedReadOnly()) 5625 STATS_DECLTRACK_FLOATING_ATTR(readonly) 5626 else if (isAssumedWriteOnly()) 5627 STATS_DECLTRACK_FLOATING_ATTR(writeonly) 5628 } 5629 5630 private: 5631 /// Return true if users of \p UserI might access the underlying 5632 /// variable/location described by \p U and should therefore be analyzed. 5633 bool followUsersOfUseIn(Attributor &A, const Use *U, 5634 const Instruction *UserI); 5635 5636 /// Update the state according to the effect of use \p U in \p UserI. 5637 void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI); 5638 5639 protected: 5640 /// Container for (transitive) uses of the associated argument. 5641 SetVector<const Use *> Uses; 5642 }; 5643 5644 /// Memory behavior attribute for function argument. 5645 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { 5646 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) 5647 : AAMemoryBehaviorFloating(IRP, A) {} 5648 5649 /// See AbstractAttribute::initialize(...). 5650 void initialize(Attributor &A) override { 5651 intersectAssumedBits(BEST_STATE); 5652 const IRPosition &IRP = getIRPosition(); 5653 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we 5654 // can query it when we use has/getAttr. That would allow us to reuse the 5655 // initialize of the base class here. 5656 bool HasByVal = 5657 IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true); 5658 getKnownStateFromValue(IRP, getState(), 5659 /* IgnoreSubsumingPositions */ HasByVal); 5660 5661 // Initialize the use vector with all direct uses of the associated value. 5662 Argument *Arg = getAssociatedArgument(); 5663 if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) { 5664 indicatePessimisticFixpoint(); 5665 } else { 5666 // Initialize the use vector with all direct uses of the associated value. 5667 for (const Use &U : Arg->uses()) 5668 Uses.insert(&U); 5669 } 5670 } 5671 5672 ChangeStatus manifest(Attributor &A) override { 5673 // TODO: Pointer arguments are not supported on vectors of pointers yet. 5674 if (!getAssociatedValue().getType()->isPointerTy()) 5675 return ChangeStatus::UNCHANGED; 5676 5677 // TODO: From readattrs.ll: "inalloca parameters are always 5678 // considered written" 5679 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) { 5680 removeKnownBits(NO_WRITES); 5681 removeAssumedBits(NO_WRITES); 5682 } 5683 return AAMemoryBehaviorFloating::manifest(A); 5684 } 5685 5686 /// See AbstractAttribute::trackStatistics() 5687 void trackStatistics() const override { 5688 if (isAssumedReadNone()) 5689 STATS_DECLTRACK_ARG_ATTR(readnone) 5690 else if (isAssumedReadOnly()) 5691 STATS_DECLTRACK_ARG_ATTR(readonly) 5692 else if (isAssumedWriteOnly()) 5693 STATS_DECLTRACK_ARG_ATTR(writeonly) 5694 } 5695 }; 5696 5697 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { 5698 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) 5699 : AAMemoryBehaviorArgument(IRP, A) {} 5700 5701 /// See AbstractAttribute::initialize(...). 5702 void initialize(Attributor &A) override { 5703 if (Argument *Arg = getAssociatedArgument()) { 5704 if (Arg->hasByValAttr()) { 5705 addKnownBits(NO_WRITES); 5706 removeKnownBits(NO_READS); 5707 removeAssumedBits(NO_READS); 5708 } 5709 } 5710 AAMemoryBehaviorArgument::initialize(A); 5711 } 5712 5713 /// See AbstractAttribute::updateImpl(...). 5714 ChangeStatus updateImpl(Attributor &A) override { 5715 // TODO: Once we have call site specific value information we can provide 5716 // call site specific liveness liveness information and then it makes 5717 // sense to specialize attributes for call sites arguments instead of 5718 // redirecting requests to the callee argument. 5719 Argument *Arg = getAssociatedArgument(); 5720 const IRPosition &ArgPos = IRPosition::argument(*Arg); 5721 auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos); 5722 return clampStateAndIndicateChange( 5723 getState(), 5724 static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState())); 5725 } 5726 5727 /// See AbstractAttribute::trackStatistics() 5728 void trackStatistics() const override { 5729 if (isAssumedReadNone()) 5730 STATS_DECLTRACK_CSARG_ATTR(readnone) 5731 else if (isAssumedReadOnly()) 5732 STATS_DECLTRACK_CSARG_ATTR(readonly) 5733 else if (isAssumedWriteOnly()) 5734 STATS_DECLTRACK_CSARG_ATTR(writeonly) 5735 } 5736 }; 5737 5738 /// Memory behavior attribute for a call site return position. 5739 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { 5740 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) 5741 : AAMemoryBehaviorFloating(IRP, A) {} 5742 5743 /// See AbstractAttribute::manifest(...). 5744 ChangeStatus manifest(Attributor &A) override { 5745 // We do not annotate returned values. 5746 return ChangeStatus::UNCHANGED; 5747 } 5748 5749 /// See AbstractAttribute::trackStatistics() 5750 void trackStatistics() const override {} 5751 }; 5752 5753 /// An AA to represent the memory behavior function attributes. 5754 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { 5755 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) 5756 : AAMemoryBehaviorImpl(IRP, A) {} 5757 5758 /// See AbstractAttribute::updateImpl(Attributor &A). 5759 virtual ChangeStatus updateImpl(Attributor &A) override; 5760 5761 /// See AbstractAttribute::manifest(...). 5762 ChangeStatus manifest(Attributor &A) override { 5763 Function &F = cast<Function>(getAnchorValue()); 5764 if (isAssumedReadNone()) { 5765 F.removeFnAttr(Attribute::ArgMemOnly); 5766 F.removeFnAttr(Attribute::InaccessibleMemOnly); 5767 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); 5768 } 5769 return AAMemoryBehaviorImpl::manifest(A); 5770 } 5771 5772 /// See AbstractAttribute::trackStatistics() 5773 void trackStatistics() const override { 5774 if (isAssumedReadNone()) 5775 STATS_DECLTRACK_FN_ATTR(readnone) 5776 else if (isAssumedReadOnly()) 5777 STATS_DECLTRACK_FN_ATTR(readonly) 5778 else if (isAssumedWriteOnly()) 5779 STATS_DECLTRACK_FN_ATTR(writeonly) 5780 } 5781 }; 5782 5783 /// AAMemoryBehavior attribute for call sites. 5784 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl { 5785 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) 5786 : AAMemoryBehaviorImpl(IRP, A) {} 5787 5788 /// See AbstractAttribute::initialize(...). 5789 void initialize(Attributor &A) override { 5790 AAMemoryBehaviorImpl::initialize(A); 5791 Function *F = getAssociatedFunction(); 5792 if (!F || !A.isFunctionIPOAmendable(*F)) { 5793 indicatePessimisticFixpoint(); 5794 return; 5795 } 5796 } 5797 5798 /// See AbstractAttribute::updateImpl(...). 5799 ChangeStatus updateImpl(Attributor &A) override { 5800 // TODO: Once we have call site specific value information we can provide 5801 // call site specific liveness liveness information and then it makes 5802 // sense to specialize attributes for call sites arguments instead of 5803 // redirecting requests to the callee argument. 5804 Function *F = getAssociatedFunction(); 5805 const IRPosition &FnPos = IRPosition::function(*F); 5806 auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos); 5807 return clampStateAndIndicateChange( 5808 getState(), 5809 static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState())); 5810 } 5811 5812 /// See AbstractAttribute::trackStatistics() 5813 void trackStatistics() const override { 5814 if (isAssumedReadNone()) 5815 STATS_DECLTRACK_CS_ATTR(readnone) 5816 else if (isAssumedReadOnly()) 5817 STATS_DECLTRACK_CS_ATTR(readonly) 5818 else if (isAssumedWriteOnly()) 5819 STATS_DECLTRACK_CS_ATTR(writeonly) 5820 } 5821 }; 5822 5823 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { 5824 5825 // The current assumed state used to determine a change. 5826 auto AssumedState = getAssumed(); 5827 5828 auto CheckRWInst = [&](Instruction &I) { 5829 // If the instruction has an own memory behavior state, use it to restrict 5830 // the local state. No further analysis is required as the other memory 5831 // state is as optimistic as it gets. 5832 if (const auto *CB = dyn_cast<CallBase>(&I)) { 5833 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 5834 *this, IRPosition::callsite_function(*CB)); 5835 intersectAssumedBits(MemBehaviorAA.getAssumed()); 5836 return !isAtFixpoint(); 5837 } 5838 5839 // Remove access kind modifiers if necessary. 5840 if (I.mayReadFromMemory()) 5841 removeAssumedBits(NO_READS); 5842 if (I.mayWriteToMemory()) 5843 removeAssumedBits(NO_WRITES); 5844 return !isAtFixpoint(); 5845 }; 5846 5847 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this)) 5848 return indicatePessimisticFixpoint(); 5849 5850 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 5851 : ChangeStatus::UNCHANGED; 5852 } 5853 5854 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { 5855 5856 const IRPosition &IRP = getIRPosition(); 5857 const IRPosition &FnPos = IRPosition::function_scope(IRP); 5858 AAMemoryBehavior::StateType &S = getState(); 5859 5860 // First, check the function scope. We take the known information and we avoid 5861 // work if the assumed information implies the current assumed information for 5862 // this attribute. This is a valid for all but byval arguments. 5863 Argument *Arg = IRP.getAssociatedArgument(); 5864 AAMemoryBehavior::base_t FnMemAssumedState = 5865 AAMemoryBehavior::StateType::getWorstState(); 5866 if (!Arg || !Arg->hasByValAttr()) { 5867 const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>( 5868 *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 5869 FnMemAssumedState = FnMemAA.getAssumed(); 5870 S.addKnownBits(FnMemAA.getKnown()); 5871 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed()) 5872 return ChangeStatus::UNCHANGED; 5873 } 5874 5875 // Make sure the value is not captured (except through "return"), if 5876 // it is, any information derived would be irrelevant anyway as we cannot 5877 // check the potential aliases introduced by the capture. However, no need 5878 // to fall back to anythign less optimistic than the function state. 5879 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 5880 *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL); 5881 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 5882 S.intersectAssumedBits(FnMemAssumedState); 5883 return ChangeStatus::CHANGED; 5884 } 5885 5886 // The current assumed state used to determine a change. 5887 auto AssumedState = S.getAssumed(); 5888 5889 // Liveness information to exclude dead users. 5890 // TODO: Take the FnPos once we have call site specific liveness information. 5891 const auto &LivenessAA = A.getAAFor<AAIsDead>( 5892 *this, IRPosition::function(*IRP.getAssociatedFunction()), 5893 /* TrackDependence */ false); 5894 5895 // Visit and expand uses until all are analyzed or a fixpoint is reached. 5896 for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) { 5897 const Use *U = Uses[i]; 5898 Instruction *UserI = cast<Instruction>(U->getUser()); 5899 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI 5900 << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA)) 5901 << "]\n"); 5902 if (A.isAssumedDead(*U, this, &LivenessAA)) 5903 continue; 5904 5905 // Droppable users, e.g., llvm::assume does not actually perform any action. 5906 if (UserI->isDroppable()) 5907 continue; 5908 5909 // Check if the users of UserI should also be visited. 5910 if (followUsersOfUseIn(A, U, UserI)) 5911 for (const Use &UserIUse : UserI->uses()) 5912 Uses.insert(&UserIUse); 5913 5914 // If UserI might touch memory we analyze the use in detail. 5915 if (UserI->mayReadOrWriteMemory()) 5916 analyzeUseIn(A, U, UserI); 5917 } 5918 5919 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 5920 : ChangeStatus::UNCHANGED; 5921 } 5922 5923 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U, 5924 const Instruction *UserI) { 5925 // The loaded value is unrelated to the pointer argument, no need to 5926 // follow the users of the load. 5927 if (isa<LoadInst>(UserI)) 5928 return false; 5929 5930 // By default we follow all uses assuming UserI might leak information on U, 5931 // we have special handling for call sites operands though. 5932 const auto *CB = dyn_cast<CallBase>(UserI); 5933 if (!CB || !CB->isArgOperand(U)) 5934 return true; 5935 5936 // If the use is a call argument known not to be captured, the users of 5937 // the call do not need to be visited because they have to be unrelated to 5938 // the input. Note that this check is not trivial even though we disallow 5939 // general capturing of the underlying argument. The reason is that the 5940 // call might the argument "through return", which we allow and for which we 5941 // need to check call users. 5942 if (U->get()->getType()->isPointerTy()) { 5943 unsigned ArgNo = CB->getArgOperandNo(U); 5944 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 5945 *this, IRPosition::callsite_argument(*CB, ArgNo), 5946 /* TrackDependence */ true, DepClassTy::OPTIONAL); 5947 return !ArgNoCaptureAA.isAssumedNoCapture(); 5948 } 5949 5950 return true; 5951 } 5952 5953 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U, 5954 const Instruction *UserI) { 5955 assert(UserI->mayReadOrWriteMemory()); 5956 5957 switch (UserI->getOpcode()) { 5958 default: 5959 // TODO: Handle all atomics and other side-effect operations we know of. 5960 break; 5961 case Instruction::Load: 5962 // Loads cause the NO_READS property to disappear. 5963 removeAssumedBits(NO_READS); 5964 return; 5965 5966 case Instruction::Store: 5967 // Stores cause the NO_WRITES property to disappear if the use is the 5968 // pointer operand. Note that we do assume that capturing was taken care of 5969 // somewhere else. 5970 if (cast<StoreInst>(UserI)->getPointerOperand() == U->get()) 5971 removeAssumedBits(NO_WRITES); 5972 return; 5973 5974 case Instruction::Call: 5975 case Instruction::CallBr: 5976 case Instruction::Invoke: { 5977 // For call sites we look at the argument memory behavior attribute (this 5978 // could be recursive!) in order to restrict our own state. 5979 const auto *CB = cast<CallBase>(UserI); 5980 5981 // Give up on operand bundles. 5982 if (CB->isBundleOperand(U)) { 5983 indicatePessimisticFixpoint(); 5984 return; 5985 } 5986 5987 // Calling a function does read the function pointer, maybe write it if the 5988 // function is self-modifying. 5989 if (CB->isCallee(U)) { 5990 removeAssumedBits(NO_READS); 5991 break; 5992 } 5993 5994 // Adjust the possible access behavior based on the information on the 5995 // argument. 5996 IRPosition Pos; 5997 if (U->get()->getType()->isPointerTy()) 5998 Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U)); 5999 else 6000 Pos = IRPosition::callsite_function(*CB); 6001 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 6002 *this, Pos, 6003 /* TrackDependence */ true, DepClassTy::OPTIONAL); 6004 // "assumed" has at most the same bits as the MemBehaviorAA assumed 6005 // and at least "known". 6006 intersectAssumedBits(MemBehaviorAA.getAssumed()); 6007 return; 6008 } 6009 }; 6010 6011 // Generally, look at the "may-properties" and adjust the assumed state if we 6012 // did not trigger special handling before. 6013 if (UserI->mayReadFromMemory()) 6014 removeAssumedBits(NO_READS); 6015 if (UserI->mayWriteToMemory()) 6016 removeAssumedBits(NO_WRITES); 6017 } 6018 6019 } // namespace 6020 6021 /// -------------------- Memory Locations Attributes --------------------------- 6022 /// Includes read-none, argmemonly, inaccessiblememonly, 6023 /// inaccessiblememorargmemonly 6024 /// ---------------------------------------------------------------------------- 6025 6026 std::string AAMemoryLocation::getMemoryLocationsAsStr( 6027 AAMemoryLocation::MemoryLocationsKind MLK) { 6028 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) 6029 return "all memory"; 6030 if (MLK == AAMemoryLocation::NO_LOCATIONS) 6031 return "no memory"; 6032 std::string S = "memory:"; 6033 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) 6034 S += "stack,"; 6035 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) 6036 S += "constant,"; 6037 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) 6038 S += "internal global,"; 6039 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) 6040 S += "external global,"; 6041 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) 6042 S += "argument,"; 6043 if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) 6044 S += "inaccessible,"; 6045 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) 6046 S += "malloced,"; 6047 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) 6048 S += "unknown,"; 6049 S.pop_back(); 6050 return S; 6051 } 6052 6053 namespace { 6054 struct AAMemoryLocationImpl : public AAMemoryLocation { 6055 6056 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) 6057 : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { 6058 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 6059 AccessKind2Accesses[u] = nullptr; 6060 } 6061 6062 ~AAMemoryLocationImpl() { 6063 // The AccessSets are allocated via a BumpPtrAllocator, we call 6064 // the destructor manually. 6065 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 6066 if (AccessKind2Accesses[u]) 6067 AccessKind2Accesses[u]->~AccessSet(); 6068 } 6069 6070 /// See AbstractAttribute::initialize(...). 6071 void initialize(Attributor &A) override { 6072 intersectAssumedBits(BEST_STATE); 6073 getKnownStateFromValue(A, getIRPosition(), getState()); 6074 IRAttribute::initialize(A); 6075 } 6076 6077 /// Return the memory behavior information encoded in the IR for \p IRP. 6078 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, 6079 BitIntegerState &State, 6080 bool IgnoreSubsumingPositions = false) { 6081 // For internal functions we ignore `argmemonly` and 6082 // `inaccessiblememorargmemonly` as we might break it via interprocedural 6083 // constant propagation. It is unclear if this is the best way but it is 6084 // unlikely this will cause real performance problems. If we are deriving 6085 // attributes for the anchor function we even remove the attribute in 6086 // addition to ignoring it. 6087 bool UseArgMemOnly = true; 6088 Function *AnchorFn = IRP.getAnchorScope(); 6089 if (AnchorFn && A.isRunOn(*AnchorFn)) 6090 UseArgMemOnly = !AnchorFn->hasLocalLinkage(); 6091 6092 SmallVector<Attribute, 2> Attrs; 6093 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 6094 for (const Attribute &Attr : Attrs) { 6095 switch (Attr.getKindAsEnum()) { 6096 case Attribute::ReadNone: 6097 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM); 6098 break; 6099 case Attribute::InaccessibleMemOnly: 6100 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 6101 break; 6102 case Attribute::ArgMemOnly: 6103 if (UseArgMemOnly) 6104 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true)); 6105 else 6106 IRP.removeAttrs({Attribute::ArgMemOnly}); 6107 break; 6108 case Attribute::InaccessibleMemOrArgMemOnly: 6109 if (UseArgMemOnly) 6110 State.addKnownBits(inverseLocation( 6111 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 6112 else 6113 IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly}); 6114 break; 6115 default: 6116 llvm_unreachable("Unexpected attribute!"); 6117 } 6118 } 6119 } 6120 6121 /// See AbstractAttribute::getDeducedAttributes(...). 6122 void getDeducedAttributes(LLVMContext &Ctx, 6123 SmallVectorImpl<Attribute> &Attrs) const override { 6124 assert(Attrs.size() == 0); 6125 if (isAssumedReadNone()) { 6126 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 6127 } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { 6128 if (isAssumedInaccessibleMemOnly()) 6129 Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly)); 6130 else if (isAssumedArgMemOnly()) 6131 Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly)); 6132 else if (isAssumedInaccessibleOrArgMemOnly()) 6133 Attrs.push_back( 6134 Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly)); 6135 } 6136 assert(Attrs.size() <= 1); 6137 } 6138 6139 /// See AbstractAttribute::manifest(...). 6140 ChangeStatus manifest(Attributor &A) override { 6141 const IRPosition &IRP = getIRPosition(); 6142 6143 // Check if we would improve the existing attributes first. 6144 SmallVector<Attribute, 4> DeducedAttrs; 6145 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 6146 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 6147 return IRP.hasAttr(Attr.getKindAsEnum(), 6148 /* IgnoreSubsumingPositions */ true); 6149 })) 6150 return ChangeStatus::UNCHANGED; 6151 6152 // Clear existing attributes. 6153 IRP.removeAttrs(AttrKinds); 6154 if (isAssumedReadNone()) 6155 IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds); 6156 6157 // Use the generic manifest method. 6158 return IRAttribute::manifest(A); 6159 } 6160 6161 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). 6162 bool checkForAllAccessesToMemoryKind( 6163 function_ref<bool(const Instruction *, const Value *, AccessKind, 6164 MemoryLocationsKind)> 6165 Pred, 6166 MemoryLocationsKind RequestedMLK) const override { 6167 if (!isValidState()) 6168 return false; 6169 6170 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); 6171 if (AssumedMLK == NO_LOCATIONS) 6172 return true; 6173 6174 unsigned Idx = 0; 6175 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; 6176 CurMLK *= 2, ++Idx) { 6177 if (CurMLK & RequestedMLK) 6178 continue; 6179 6180 if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) 6181 for (const AccessInfo &AI : *Accesses) 6182 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) 6183 return false; 6184 } 6185 6186 return true; 6187 } 6188 6189 ChangeStatus indicatePessimisticFixpoint() override { 6190 // If we give up and indicate a pessimistic fixpoint this instruction will 6191 // become an access for all potential access kinds: 6192 // TODO: Add pointers for argmemonly and globals to improve the results of 6193 // checkForAllAccessesToMemoryKind. 6194 bool Changed = false; 6195 MemoryLocationsKind KnownMLK = getKnown(); 6196 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 6197 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) 6198 if (!(CurMLK & KnownMLK)) 6199 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed, 6200 getAccessKindFromInst(I)); 6201 return AAMemoryLocation::indicatePessimisticFixpoint(); 6202 } 6203 6204 protected: 6205 /// Helper struct to tie together an instruction that has a read or write 6206 /// effect with the pointer it accesses (if any). 6207 struct AccessInfo { 6208 6209 /// The instruction that caused the access. 6210 const Instruction *I; 6211 6212 /// The base pointer that is accessed, or null if unknown. 6213 const Value *Ptr; 6214 6215 /// The kind of access (read/write/read+write). 6216 AccessKind Kind; 6217 6218 bool operator==(const AccessInfo &RHS) const { 6219 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; 6220 } 6221 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { 6222 if (LHS.I != RHS.I) 6223 return LHS.I < RHS.I; 6224 if (LHS.Ptr != RHS.Ptr) 6225 return LHS.Ptr < RHS.Ptr; 6226 if (LHS.Kind != RHS.Kind) 6227 return LHS.Kind < RHS.Kind; 6228 return false; 6229 } 6230 }; 6231 6232 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the 6233 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. 6234 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; 6235 AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()]; 6236 6237 /// Return the kind(s) of location that may be accessed by \p V. 6238 AAMemoryLocation::MemoryLocationsKind 6239 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); 6240 6241 /// Return the access kind as determined by \p I. 6242 AccessKind getAccessKindFromInst(const Instruction *I) { 6243 AccessKind AK = READ_WRITE; 6244 if (I) { 6245 AK = I->mayReadFromMemory() ? READ : NONE; 6246 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); 6247 } 6248 return AK; 6249 } 6250 6251 /// Update the state \p State and the AccessKind2Accesses given that \p I is 6252 /// an access of kind \p AK to a \p MLK memory location with the access 6253 /// pointer \p Ptr. 6254 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, 6255 MemoryLocationsKind MLK, const Instruction *I, 6256 const Value *Ptr, bool &Changed, 6257 AccessKind AK = READ_WRITE) { 6258 6259 assert(isPowerOf2_32(MLK) && "Expected a single location set!"); 6260 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)]; 6261 if (!Accesses) 6262 Accesses = new (Allocator) AccessSet(); 6263 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second; 6264 State.removeAssumedBits(MLK); 6265 } 6266 6267 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or 6268 /// arguments, and update the state and access map accordingly. 6269 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, 6270 AAMemoryLocation::StateType &State, bool &Changed); 6271 6272 /// Used to allocate access sets. 6273 BumpPtrAllocator &Allocator; 6274 6275 /// The set of IR attributes AAMemoryLocation deals with. 6276 static const Attribute::AttrKind AttrKinds[4]; 6277 }; 6278 6279 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = { 6280 Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly, 6281 Attribute::InaccessibleMemOrArgMemOnly}; 6282 6283 void AAMemoryLocationImpl::categorizePtrValue( 6284 Attributor &A, const Instruction &I, const Value &Ptr, 6285 AAMemoryLocation::StateType &State, bool &Changed) { 6286 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for " 6287 << Ptr << " [" 6288 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n"); 6289 6290 auto StripGEPCB = [](Value *V) -> Value * { 6291 auto *GEP = dyn_cast<GEPOperator>(V); 6292 while (GEP) { 6293 V = GEP->getPointerOperand(); 6294 GEP = dyn_cast<GEPOperator>(V); 6295 } 6296 return V; 6297 }; 6298 6299 auto VisitValueCB = [&](Value &V, const Instruction *, 6300 AAMemoryLocation::StateType &T, 6301 bool Stripped) -> bool { 6302 MemoryLocationsKind MLK = NO_LOCATIONS; 6303 assert(!isa<GEPOperator>(V) && "GEPs should have been stripped."); 6304 if (isa<UndefValue>(V)) 6305 return true; 6306 if (auto *Arg = dyn_cast<Argument>(&V)) { 6307 if (Arg->hasByValAttr()) 6308 MLK = NO_LOCAL_MEM; 6309 else 6310 MLK = NO_ARGUMENT_MEM; 6311 } else if (auto *GV = dyn_cast<GlobalValue>(&V)) { 6312 if (GV->hasLocalLinkage()) 6313 MLK = NO_GLOBAL_INTERNAL_MEM; 6314 else 6315 MLK = NO_GLOBAL_EXTERNAL_MEM; 6316 } else if (isa<ConstantPointerNull>(V) && 6317 !NullPointerIsDefined(getAssociatedFunction(), 6318 V.getType()->getPointerAddressSpace())) { 6319 return true; 6320 } else if (isa<AllocaInst>(V)) { 6321 MLK = NO_LOCAL_MEM; 6322 } else if (const auto *CB = dyn_cast<CallBase>(&V)) { 6323 const auto &NoAliasAA = 6324 A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB)); 6325 if (NoAliasAA.isAssumedNoAlias()) 6326 MLK = NO_MALLOCED_MEM; 6327 else 6328 MLK = NO_UNKOWN_MEM; 6329 } else { 6330 MLK = NO_UNKOWN_MEM; 6331 } 6332 6333 assert(MLK != NO_LOCATIONS && "No location specified!"); 6334 updateStateAndAccessesMap(T, MLK, &I, &V, Changed, 6335 getAccessKindFromInst(&I)); 6336 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: " 6337 << V << " -> " << getMemoryLocationsAsStr(T.getAssumed()) 6338 << "\n"); 6339 return true; 6340 }; 6341 6342 if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>( 6343 A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(), 6344 /* UseValueSimplify */ true, 6345 /* MaxValues */ 32, StripGEPCB)) { 6346 LLVM_DEBUG( 6347 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"); 6348 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed, 6349 getAccessKindFromInst(&I)); 6350 } else { 6351 LLVM_DEBUG( 6352 dbgs() 6353 << "[AAMemoryLocation] Accessed locations with pointer locations: " 6354 << getMemoryLocationsAsStr(State.getAssumed()) << "\n"); 6355 } 6356 } 6357 6358 AAMemoryLocation::MemoryLocationsKind 6359 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, 6360 bool &Changed) { 6361 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for " 6362 << I << "\n"); 6363 6364 AAMemoryLocation::StateType AccessedLocs; 6365 AccessedLocs.intersectAssumedBits(NO_LOCATIONS); 6366 6367 if (auto *CB = dyn_cast<CallBase>(&I)) { 6368 6369 // First check if we assume any memory is access is visible. 6370 const auto &CBMemLocationAA = 6371 A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB)); 6372 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I 6373 << " [" << CBMemLocationAA << "]\n"); 6374 6375 if (CBMemLocationAA.isAssumedReadNone()) 6376 return NO_LOCATIONS; 6377 6378 if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) { 6379 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr, 6380 Changed, getAccessKindFromInst(&I)); 6381 return AccessedLocs.getAssumed(); 6382 } 6383 6384 uint32_t CBAssumedNotAccessedLocs = 6385 CBMemLocationAA.getAssumedNotAccessedLocation(); 6386 6387 // Set the argmemonly and global bit as we handle them separately below. 6388 uint32_t CBAssumedNotAccessedLocsNoArgMem = 6389 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; 6390 6391 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { 6392 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) 6393 continue; 6394 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed, 6395 getAccessKindFromInst(&I)); 6396 } 6397 6398 // Now handle global memory if it might be accessed. This is slightly tricky 6399 // as NO_GLOBAL_MEM has multiple bits set. 6400 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); 6401 if (HasGlobalAccesses) { 6402 auto AccessPred = [&](const Instruction *, const Value *Ptr, 6403 AccessKind Kind, MemoryLocationsKind MLK) { 6404 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed, 6405 getAccessKindFromInst(&I)); 6406 return true; 6407 }; 6408 if (!CBMemLocationAA.checkForAllAccessesToMemoryKind( 6409 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false))) 6410 return AccessedLocs.getWorstState(); 6411 } 6412 6413 LLVM_DEBUG( 6414 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " 6415 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 6416 6417 // Now handle argument memory if it might be accessed. 6418 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); 6419 if (HasArgAccesses) { 6420 for (unsigned ArgNo = 0, E = CB->getNumArgOperands(); ArgNo < E; 6421 ++ArgNo) { 6422 6423 // Skip non-pointer arguments. 6424 const Value *ArgOp = CB->getArgOperand(ArgNo); 6425 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 6426 continue; 6427 6428 // Skip readnone arguments. 6429 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(*CB, ArgNo); 6430 const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>( 6431 *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL); 6432 6433 if (ArgOpMemLocationAA.isAssumedReadNone()) 6434 continue; 6435 6436 // Categorize potentially accessed pointer arguments as if there was an 6437 // access instruction with them as pointer. 6438 categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed); 6439 } 6440 } 6441 6442 LLVM_DEBUG( 6443 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " 6444 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 6445 6446 return AccessedLocs.getAssumed(); 6447 } 6448 6449 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) { 6450 LLVM_DEBUG( 6451 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " 6452 << I << " [" << *Ptr << "]\n"); 6453 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed); 6454 return AccessedLocs.getAssumed(); 6455 } 6456 6457 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " 6458 << I << "\n"); 6459 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed, 6460 getAccessKindFromInst(&I)); 6461 return AccessedLocs.getAssumed(); 6462 } 6463 6464 /// An AA to represent the memory behavior function attributes. 6465 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { 6466 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) 6467 : AAMemoryLocationImpl(IRP, A) {} 6468 6469 /// See AbstractAttribute::updateImpl(Attributor &A). 6470 virtual ChangeStatus updateImpl(Attributor &A) override { 6471 6472 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 6473 *this, getIRPosition(), /* TrackDependence */ false); 6474 if (MemBehaviorAA.isAssumedReadNone()) { 6475 if (MemBehaviorAA.isKnownReadNone()) 6476 return indicateOptimisticFixpoint(); 6477 assert(isAssumedReadNone() && 6478 "AAMemoryLocation was not read-none but AAMemoryBehavior was!"); 6479 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 6480 return ChangeStatus::UNCHANGED; 6481 } 6482 6483 // The current assumed state used to determine a change. 6484 auto AssumedState = getAssumed(); 6485 bool Changed = false; 6486 6487 auto CheckRWInst = [&](Instruction &I) { 6488 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); 6489 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I 6490 << ": " << getMemoryLocationsAsStr(MLK) << "\n"); 6491 removeAssumedBits(inverseLocation(MLK, false, false)); 6492 return true; 6493 }; 6494 6495 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this)) 6496 return indicatePessimisticFixpoint(); 6497 6498 Changed |= AssumedState != getAssumed(); 6499 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 6500 } 6501 6502 /// See AbstractAttribute::trackStatistics() 6503 void trackStatistics() const override { 6504 if (isAssumedReadNone()) 6505 STATS_DECLTRACK_FN_ATTR(readnone) 6506 else if (isAssumedArgMemOnly()) 6507 STATS_DECLTRACK_FN_ATTR(argmemonly) 6508 else if (isAssumedInaccessibleMemOnly()) 6509 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly) 6510 else if (isAssumedInaccessibleOrArgMemOnly()) 6511 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly) 6512 } 6513 }; 6514 6515 /// AAMemoryLocation attribute for call sites. 6516 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { 6517 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) 6518 : AAMemoryLocationImpl(IRP, A) {} 6519 6520 /// See AbstractAttribute::initialize(...). 6521 void initialize(Attributor &A) override { 6522 AAMemoryLocationImpl::initialize(A); 6523 Function *F = getAssociatedFunction(); 6524 if (!F || !A.isFunctionIPOAmendable(*F)) { 6525 indicatePessimisticFixpoint(); 6526 return; 6527 } 6528 } 6529 6530 /// See AbstractAttribute::updateImpl(...). 6531 ChangeStatus updateImpl(Attributor &A) override { 6532 // TODO: Once we have call site specific value information we can provide 6533 // call site specific liveness liveness information and then it makes 6534 // sense to specialize attributes for call sites arguments instead of 6535 // redirecting requests to the callee argument. 6536 Function *F = getAssociatedFunction(); 6537 const IRPosition &FnPos = IRPosition::function(*F); 6538 auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos); 6539 bool Changed = false; 6540 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 6541 AccessKind Kind, MemoryLocationsKind MLK) { 6542 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed, 6543 getAccessKindFromInst(I)); 6544 return true; 6545 }; 6546 if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS)) 6547 return indicatePessimisticFixpoint(); 6548 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 6549 } 6550 6551 /// See AbstractAttribute::trackStatistics() 6552 void trackStatistics() const override { 6553 if (isAssumedReadNone()) 6554 STATS_DECLTRACK_CS_ATTR(readnone) 6555 } 6556 }; 6557 6558 /// ------------------ Value Constant Range Attribute ------------------------- 6559 6560 struct AAValueConstantRangeImpl : AAValueConstantRange { 6561 using StateType = IntegerRangeState; 6562 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) 6563 : AAValueConstantRange(IRP, A) {} 6564 6565 /// See AbstractAttribute::getAsStr(). 6566 const std::string getAsStr() const override { 6567 std::string Str; 6568 llvm::raw_string_ostream OS(Str); 6569 OS << "range(" << getBitWidth() << ")<"; 6570 getKnown().print(OS); 6571 OS << " / "; 6572 getAssumed().print(OS); 6573 OS << ">"; 6574 return OS.str(); 6575 } 6576 6577 /// Helper function to get a SCEV expr for the associated value at program 6578 /// point \p I. 6579 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { 6580 if (!getAnchorScope()) 6581 return nullptr; 6582 6583 ScalarEvolution *SE = 6584 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 6585 *getAnchorScope()); 6586 6587 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( 6588 *getAnchorScope()); 6589 6590 if (!SE || !LI) 6591 return nullptr; 6592 6593 const SCEV *S = SE->getSCEV(&getAssociatedValue()); 6594 if (!I) 6595 return S; 6596 6597 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent())); 6598 } 6599 6600 /// Helper function to get a range from SCEV for the associated value at 6601 /// program point \p I. 6602 ConstantRange getConstantRangeFromSCEV(Attributor &A, 6603 const Instruction *I = nullptr) const { 6604 if (!getAnchorScope()) 6605 return getWorstState(getBitWidth()); 6606 6607 ScalarEvolution *SE = 6608 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 6609 *getAnchorScope()); 6610 6611 const SCEV *S = getSCEV(A, I); 6612 if (!SE || !S) 6613 return getWorstState(getBitWidth()); 6614 6615 return SE->getUnsignedRange(S); 6616 } 6617 6618 /// Helper function to get a range from LVI for the associated value at 6619 /// program point \p I. 6620 ConstantRange 6621 getConstantRangeFromLVI(Attributor &A, 6622 const Instruction *CtxI = nullptr) const { 6623 if (!getAnchorScope()) 6624 return getWorstState(getBitWidth()); 6625 6626 LazyValueInfo *LVI = 6627 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( 6628 *getAnchorScope()); 6629 6630 if (!LVI || !CtxI) 6631 return getWorstState(getBitWidth()); 6632 return LVI->getConstantRange(&getAssociatedValue(), 6633 const_cast<BasicBlock *>(CtxI->getParent()), 6634 const_cast<Instruction *>(CtxI)); 6635 } 6636 6637 /// See AAValueConstantRange::getKnownConstantRange(..). 6638 ConstantRange 6639 getKnownConstantRange(Attributor &A, 6640 const Instruction *CtxI = nullptr) const override { 6641 if (!CtxI || CtxI == getCtxI()) 6642 return getKnown(); 6643 6644 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 6645 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 6646 return getKnown().intersectWith(SCEVR).intersectWith(LVIR); 6647 } 6648 6649 /// See AAValueConstantRange::getAssumedConstantRange(..). 6650 ConstantRange 6651 getAssumedConstantRange(Attributor &A, 6652 const Instruction *CtxI = nullptr) const override { 6653 // TODO: Make SCEV use Attributor assumption. 6654 // We may be able to bound a variable range via assumptions in 6655 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to 6656 // evolve to x^2 + x, then we can say that y is in [2, 12]. 6657 6658 if (!CtxI || CtxI == getCtxI()) 6659 return getAssumed(); 6660 6661 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 6662 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 6663 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR); 6664 } 6665 6666 /// See AbstractAttribute::initialize(..). 6667 void initialize(Attributor &A) override { 6668 // Intersect a range given by SCEV. 6669 intersectKnown(getConstantRangeFromSCEV(A, getCtxI())); 6670 6671 // Intersect a range given by LVI. 6672 intersectKnown(getConstantRangeFromLVI(A, getCtxI())); 6673 } 6674 6675 /// Helper function to create MDNode for range metadata. 6676 static MDNode * 6677 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, 6678 const ConstantRange &AssumedConstantRange) { 6679 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get( 6680 Ty, AssumedConstantRange.getLower())), 6681 ConstantAsMetadata::get(ConstantInt::get( 6682 Ty, AssumedConstantRange.getUpper()))}; 6683 return MDNode::get(Ctx, LowAndHigh); 6684 } 6685 6686 /// Return true if \p Assumed is included in \p KnownRanges. 6687 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { 6688 6689 if (Assumed.isFullSet()) 6690 return false; 6691 6692 if (!KnownRanges) 6693 return true; 6694 6695 // If multiple ranges are annotated in IR, we give up to annotate assumed 6696 // range for now. 6697 6698 // TODO: If there exists a known range which containts assumed range, we 6699 // can say assumed range is better. 6700 if (KnownRanges->getNumOperands() > 2) 6701 return false; 6702 6703 ConstantInt *Lower = 6704 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0)); 6705 ConstantInt *Upper = 6706 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1)); 6707 6708 ConstantRange Known(Lower->getValue(), Upper->getValue()); 6709 return Known.contains(Assumed) && Known != Assumed; 6710 } 6711 6712 /// Helper function to set range metadata. 6713 static bool 6714 setRangeMetadataIfisBetterRange(Instruction *I, 6715 const ConstantRange &AssumedConstantRange) { 6716 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range); 6717 if (isBetterRange(AssumedConstantRange, OldRangeMD)) { 6718 if (!AssumedConstantRange.isEmptySet()) { 6719 I->setMetadata(LLVMContext::MD_range, 6720 getMDNodeForConstantRange(I->getType(), I->getContext(), 6721 AssumedConstantRange)); 6722 return true; 6723 } 6724 } 6725 return false; 6726 } 6727 6728 /// See AbstractAttribute::manifest() 6729 ChangeStatus manifest(Attributor &A) override { 6730 ChangeStatus Changed = ChangeStatus::UNCHANGED; 6731 ConstantRange AssumedConstantRange = getAssumedConstantRange(A); 6732 assert(!AssumedConstantRange.isFullSet() && "Invalid state"); 6733 6734 auto &V = getAssociatedValue(); 6735 if (!AssumedConstantRange.isEmptySet() && 6736 !AssumedConstantRange.isSingleElement()) { 6737 if (Instruction *I = dyn_cast<Instruction>(&V)) 6738 if (isa<CallInst>(I) || isa<LoadInst>(I)) 6739 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) 6740 Changed = ChangeStatus::CHANGED; 6741 } 6742 6743 return Changed; 6744 } 6745 }; 6746 6747 struct AAValueConstantRangeArgument final 6748 : AAArgumentFromCallSiteArguments< 6749 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> { 6750 using Base = AAArgumentFromCallSiteArguments< 6751 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>; 6752 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) 6753 : Base(IRP, A) {} 6754 6755 /// See AbstractAttribute::initialize(..). 6756 void initialize(Attributor &A) override { 6757 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 6758 indicatePessimisticFixpoint(); 6759 } else { 6760 Base::initialize(A); 6761 } 6762 } 6763 6764 /// See AbstractAttribute::trackStatistics() 6765 void trackStatistics() const override { 6766 STATS_DECLTRACK_ARG_ATTR(value_range) 6767 } 6768 }; 6769 6770 struct AAValueConstantRangeReturned 6771 : AAReturnedFromReturnedValues<AAValueConstantRange, 6772 AAValueConstantRangeImpl> { 6773 using Base = AAReturnedFromReturnedValues<AAValueConstantRange, 6774 AAValueConstantRangeImpl>; 6775 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) 6776 : Base(IRP, A) {} 6777 6778 /// See AbstractAttribute::initialize(...). 6779 void initialize(Attributor &A) override {} 6780 6781 /// See AbstractAttribute::trackStatistics() 6782 void trackStatistics() const override { 6783 STATS_DECLTRACK_FNRET_ATTR(value_range) 6784 } 6785 }; 6786 6787 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { 6788 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) 6789 : AAValueConstantRangeImpl(IRP, A) {} 6790 6791 /// See AbstractAttribute::initialize(...). 6792 void initialize(Attributor &A) override { 6793 AAValueConstantRangeImpl::initialize(A); 6794 Value &V = getAssociatedValue(); 6795 6796 if (auto *C = dyn_cast<ConstantInt>(&V)) { 6797 unionAssumed(ConstantRange(C->getValue())); 6798 indicateOptimisticFixpoint(); 6799 return; 6800 } 6801 6802 if (isa<UndefValue>(&V)) { 6803 // Collapse the undef state to 0. 6804 unionAssumed(ConstantRange(APInt(getBitWidth(), 0))); 6805 indicateOptimisticFixpoint(); 6806 return; 6807 } 6808 6809 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V)) 6810 return; 6811 // If it is a load instruction with range metadata, use it. 6812 if (LoadInst *LI = dyn_cast<LoadInst>(&V)) 6813 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) { 6814 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 6815 return; 6816 } 6817 6818 // We can work with PHI and select instruction as we traverse their operands 6819 // during update. 6820 if (isa<SelectInst>(V) || isa<PHINode>(V)) 6821 return; 6822 6823 // Otherwise we give up. 6824 indicatePessimisticFixpoint(); 6825 6826 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: " 6827 << getAssociatedValue() << "\n"); 6828 } 6829 6830 bool calculateBinaryOperator( 6831 Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, 6832 const Instruction *CtxI, 6833 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 6834 Value *LHS = BinOp->getOperand(0); 6835 Value *RHS = BinOp->getOperand(1); 6836 // TODO: Allow non integers as well. 6837 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 6838 return false; 6839 6840 auto &LHSAA = 6841 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS)); 6842 QuerriedAAs.push_back(&LHSAA); 6843 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 6844 6845 auto &RHSAA = 6846 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS)); 6847 QuerriedAAs.push_back(&RHSAA); 6848 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 6849 6850 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange); 6851 6852 T.unionAssumed(AssumedRange); 6853 6854 // TODO: Track a known state too. 6855 6856 return T.isValidState(); 6857 } 6858 6859 bool calculateCastInst( 6860 Attributor &A, CastInst *CastI, IntegerRangeState &T, 6861 const Instruction *CtxI, 6862 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 6863 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!"); 6864 // TODO: Allow non integers as well. 6865 Value &OpV = *CastI->getOperand(0); 6866 if (!OpV.getType()->isIntegerTy()) 6867 return false; 6868 6869 auto &OpAA = 6870 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV)); 6871 QuerriedAAs.push_back(&OpAA); 6872 T.unionAssumed( 6873 OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth())); 6874 return T.isValidState(); 6875 } 6876 6877 bool 6878 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, 6879 const Instruction *CtxI, 6880 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 6881 Value *LHS = CmpI->getOperand(0); 6882 Value *RHS = CmpI->getOperand(1); 6883 // TODO: Allow non integers as well. 6884 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 6885 return false; 6886 6887 auto &LHSAA = 6888 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS)); 6889 QuerriedAAs.push_back(&LHSAA); 6890 auto &RHSAA = 6891 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS)); 6892 QuerriedAAs.push_back(&RHSAA); 6893 6894 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 6895 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 6896 6897 // If one of them is empty set, we can't decide. 6898 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) 6899 return true; 6900 6901 bool MustTrue = false, MustFalse = false; 6902 6903 auto AllowedRegion = 6904 ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange); 6905 6906 auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion( 6907 CmpI->getPredicate(), RHSAARange); 6908 6909 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet()) 6910 MustFalse = true; 6911 6912 if (SatisfyingRegion.contains(LHSAARange)) 6913 MustTrue = true; 6914 6915 assert((!MustTrue || !MustFalse) && 6916 "Either MustTrue or MustFalse should be false!"); 6917 6918 if (MustTrue) 6919 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); 6920 else if (MustFalse) 6921 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); 6922 else 6923 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); 6924 6925 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA 6926 << " " << RHSAA << "\n"); 6927 6928 // TODO: Track a known state too. 6929 return T.isValidState(); 6930 } 6931 6932 /// See AbstractAttribute::updateImpl(...). 6933 ChangeStatus updateImpl(Attributor &A) override { 6934 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 6935 IntegerRangeState &T, bool Stripped) -> bool { 6936 Instruction *I = dyn_cast<Instruction>(&V); 6937 if (!I || isa<CallBase>(I)) { 6938 6939 // If the value is not instruction, we query AA to Attributor. 6940 const auto &AA = 6941 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V)); 6942 6943 // Clamp operator is not used to utilize a program point CtxI. 6944 T.unionAssumed(AA.getAssumedConstantRange(A, CtxI)); 6945 6946 return T.isValidState(); 6947 } 6948 6949 SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; 6950 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) { 6951 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) 6952 return false; 6953 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) { 6954 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) 6955 return false; 6956 } else if (auto *CastI = dyn_cast<CastInst>(I)) { 6957 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) 6958 return false; 6959 } else { 6960 // Give up with other instructions. 6961 // TODO: Add other instructions 6962 6963 T.indicatePessimisticFixpoint(); 6964 return false; 6965 } 6966 6967 // Catch circular reasoning in a pessimistic way for now. 6968 // TODO: Check how the range evolves and if we stripped anything, see also 6969 // AADereferenceable or AAAlign for similar situations. 6970 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { 6971 if (QueriedAA != this) 6972 continue; 6973 // If we are in a stady state we do not need to worry. 6974 if (T.getAssumed() == getState().getAssumed()) 6975 continue; 6976 T.indicatePessimisticFixpoint(); 6977 } 6978 6979 return T.isValidState(); 6980 }; 6981 6982 IntegerRangeState T(getBitWidth()); 6983 6984 if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>( 6985 A, getIRPosition(), *this, T, VisitValueCB, getCtxI(), 6986 /* UseValueSimplify */ false)) 6987 return indicatePessimisticFixpoint(); 6988 6989 return clampStateAndIndicateChange(getState(), T); 6990 } 6991 6992 /// See AbstractAttribute::trackStatistics() 6993 void trackStatistics() const override { 6994 STATS_DECLTRACK_FLOATING_ATTR(value_range) 6995 } 6996 }; 6997 6998 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { 6999 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) 7000 : AAValueConstantRangeImpl(IRP, A) {} 7001 7002 /// See AbstractAttribute::initialize(...). 7003 ChangeStatus updateImpl(Attributor &A) override { 7004 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will " 7005 "not be called"); 7006 } 7007 7008 /// See AbstractAttribute::trackStatistics() 7009 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) } 7010 }; 7011 7012 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { 7013 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) 7014 : AAValueConstantRangeFunction(IRP, A) {} 7015 7016 /// See AbstractAttribute::trackStatistics() 7017 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) } 7018 }; 7019 7020 struct AAValueConstantRangeCallSiteReturned 7021 : AACallSiteReturnedFromReturned<AAValueConstantRange, 7022 AAValueConstantRangeImpl> { 7023 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) 7024 : AACallSiteReturnedFromReturned<AAValueConstantRange, 7025 AAValueConstantRangeImpl>(IRP, A) {} 7026 7027 /// See AbstractAttribute::initialize(...). 7028 void initialize(Attributor &A) override { 7029 // If it is a load instruction with range metadata, use the metadata. 7030 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue())) 7031 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range)) 7032 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 7033 7034 AAValueConstantRangeImpl::initialize(A); 7035 } 7036 7037 /// See AbstractAttribute::trackStatistics() 7038 void trackStatistics() const override { 7039 STATS_DECLTRACK_CSRET_ATTR(value_range) 7040 } 7041 }; 7042 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { 7043 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) 7044 : AAValueConstantRangeFloating(IRP, A) {} 7045 7046 /// See AbstractAttribute::trackStatistics() 7047 void trackStatistics() const override { 7048 STATS_DECLTRACK_CSARG_ATTR(value_range) 7049 } 7050 }; 7051 } // namespace 7052 7053 const char AAReturnedValues::ID = 0; 7054 const char AANoUnwind::ID = 0; 7055 const char AANoSync::ID = 0; 7056 const char AANoFree::ID = 0; 7057 const char AANonNull::ID = 0; 7058 const char AANoRecurse::ID = 0; 7059 const char AAWillReturn::ID = 0; 7060 const char AAUndefinedBehavior::ID = 0; 7061 const char AANoAlias::ID = 0; 7062 const char AAReachability::ID = 0; 7063 const char AANoReturn::ID = 0; 7064 const char AAIsDead::ID = 0; 7065 const char AADereferenceable::ID = 0; 7066 const char AAAlign::ID = 0; 7067 const char AANoCapture::ID = 0; 7068 const char AAValueSimplify::ID = 0; 7069 const char AAHeapToStack::ID = 0; 7070 const char AAPrivatizablePtr::ID = 0; 7071 const char AAMemoryBehavior::ID = 0; 7072 const char AAMemoryLocation::ID = 0; 7073 const char AAValueConstantRange::ID = 0; 7074 7075 // Macro magic to create the static generator function for attributes that 7076 // follow the naming scheme. 7077 7078 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ 7079 case IRPosition::PK: \ 7080 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); 7081 7082 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ 7083 case IRPosition::PK: \ 7084 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ 7085 ++NumAAs; \ 7086 break; 7087 7088 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7089 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7090 CLASS *AA = nullptr; \ 7091 switch (IRP.getPositionKind()) { \ 7092 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7093 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 7094 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 7095 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 7096 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 7097 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 7098 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7099 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 7100 } \ 7101 return *AA; \ 7102 } 7103 7104 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7105 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7106 CLASS *AA = nullptr; \ 7107 switch (IRP.getPositionKind()) { \ 7108 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7109 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ 7110 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 7111 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 7112 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 7113 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 7114 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 7115 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 7116 } \ 7117 return *AA; \ 7118 } 7119 7120 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7121 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7122 CLASS *AA = nullptr; \ 7123 switch (IRP.getPositionKind()) { \ 7124 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7125 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7126 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 7127 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 7128 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 7129 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 7130 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 7131 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 7132 } \ 7133 return *AA; \ 7134 } 7135 7136 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7137 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7138 CLASS *AA = nullptr; \ 7139 switch (IRP.getPositionKind()) { \ 7140 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7141 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 7142 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 7143 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 7144 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 7145 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 7146 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 7147 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7148 } \ 7149 return *AA; \ 7150 } 7151 7152 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7153 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7154 CLASS *AA = nullptr; \ 7155 switch (IRP.getPositionKind()) { \ 7156 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7157 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 7158 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7159 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 7160 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 7161 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 7162 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 7163 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 7164 } \ 7165 return *AA; \ 7166 } 7167 7168 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) 7169 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) 7170 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) 7171 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) 7172 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) 7173 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) 7174 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) 7175 7176 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) 7177 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) 7178 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) 7179 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) 7180 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) 7181 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) 7182 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) 7183 7184 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) 7185 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) 7186 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) 7187 7188 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) 7189 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability) 7190 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) 7191 7192 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) 7193 7194 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION 7195 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION 7196 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION 7197 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION 7198 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION 7199 #undef SWITCH_PK_CREATE 7200 #undef SWITCH_PK_INV 7201