1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // See the Attributor.h file comment and the class descriptions in that file for 10 // more information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/IPO/Attributor.h" 15 16 #include "llvm/ADT/SCCIterator.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/AssumeBundleQueries.h" 20 #include "llvm/Analysis/AssumptionCache.h" 21 #include "llvm/Analysis/CaptureTracking.h" 22 #include "llvm/Analysis/LazyValueInfo.h" 23 #include "llvm/Analysis/MemoryBuiltins.h" 24 #include "llvm/Analysis/ScalarEvolution.h" 25 #include "llvm/Analysis/TargetTransformInfo.h" 26 #include "llvm/Analysis/ValueTracking.h" 27 #include "llvm/IR/IRBuilder.h" 28 #include "llvm/IR/IntrinsicInst.h" 29 #include "llvm/IR/NoFolder.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Transforms/IPO/ArgumentPromotion.h" 32 #include "llvm/Transforms/Utils/Local.h" 33 34 #include <cassert> 35 36 using namespace llvm; 37 38 #define DEBUG_TYPE "attributor" 39 40 static cl::opt<bool> ManifestInternal( 41 "attributor-manifest-internal", cl::Hidden, 42 cl::desc("Manifest Attributor internal string attributes."), 43 cl::init(false)); 44 45 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), 46 cl::Hidden); 47 48 template <> 49 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0; 50 51 static cl::opt<unsigned, true> MaxPotentialValues( 52 "attributor-max-potential-values", cl::Hidden, 53 cl::desc("Maximum number of potential values to be " 54 "tracked for each position."), 55 cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues), 56 cl::init(7)); 57 58 STATISTIC(NumAAs, "Number of abstract attributes created"); 59 60 // Some helper macros to deal with statistics tracking. 61 // 62 // Usage: 63 // For simple IR attribute tracking overload trackStatistics in the abstract 64 // attribute and choose the right STATS_DECLTRACK_********* macro, 65 // e.g.,: 66 // void trackStatistics() const override { 67 // STATS_DECLTRACK_ARG_ATTR(returned) 68 // } 69 // If there is a single "increment" side one can use the macro 70 // STATS_DECLTRACK with a custom message. If there are multiple increment 71 // sides, STATS_DECL and STATS_TRACK can also be used separately. 72 // 73 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ 74 ("Number of " #TYPE " marked '" #NAME "'") 75 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME 76 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); 77 #define STATS_DECL(NAME, TYPE, MSG) \ 78 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); 79 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); 80 #define STATS_DECLTRACK(NAME, TYPE, MSG) \ 81 { \ 82 STATS_DECL(NAME, TYPE, MSG) \ 83 STATS_TRACK(NAME, TYPE) \ 84 } 85 #define STATS_DECLTRACK_ARG_ATTR(NAME) \ 86 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) 87 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ 88 STATS_DECLTRACK(NAME, CSArguments, \ 89 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) 90 #define STATS_DECLTRACK_FN_ATTR(NAME) \ 91 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) 92 #define STATS_DECLTRACK_CS_ATTR(NAME) \ 93 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) 94 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ 95 STATS_DECLTRACK(NAME, FunctionReturn, \ 96 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) 97 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ 98 STATS_DECLTRACK(NAME, CSReturn, \ 99 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) 100 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ 101 STATS_DECLTRACK(NAME, Floating, \ 102 ("Number of floating values known to be '" #NAME "'")) 103 104 // Specialization of the operator<< for abstract attributes subclasses. This 105 // disambiguates situations where multiple operators are applicable. 106 namespace llvm { 107 #define PIPE_OPERATOR(CLASS) \ 108 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ 109 return OS << static_cast<const AbstractAttribute &>(AA); \ 110 } 111 112 PIPE_OPERATOR(AAIsDead) 113 PIPE_OPERATOR(AANoUnwind) 114 PIPE_OPERATOR(AANoSync) 115 PIPE_OPERATOR(AANoRecurse) 116 PIPE_OPERATOR(AAWillReturn) 117 PIPE_OPERATOR(AANoReturn) 118 PIPE_OPERATOR(AAReturnedValues) 119 PIPE_OPERATOR(AANonNull) 120 PIPE_OPERATOR(AANoAlias) 121 PIPE_OPERATOR(AADereferenceable) 122 PIPE_OPERATOR(AAAlign) 123 PIPE_OPERATOR(AANoCapture) 124 PIPE_OPERATOR(AAValueSimplify) 125 PIPE_OPERATOR(AANoFree) 126 PIPE_OPERATOR(AAHeapToStack) 127 PIPE_OPERATOR(AAReachability) 128 PIPE_OPERATOR(AAMemoryBehavior) 129 PIPE_OPERATOR(AAMemoryLocation) 130 PIPE_OPERATOR(AAValueConstantRange) 131 PIPE_OPERATOR(AAPrivatizablePtr) 132 PIPE_OPERATOR(AAUndefinedBehavior) 133 PIPE_OPERATOR(AAPotentialValues) 134 PIPE_OPERATOR(AANoUndef) 135 136 #undef PIPE_OPERATOR 137 } // namespace llvm 138 139 namespace { 140 141 static Optional<ConstantInt *> 142 getAssumedConstantInt(Attributor &A, const Value &V, 143 const AbstractAttribute &AA, 144 bool &UsedAssumedInformation) { 145 Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation); 146 if (C.hasValue()) 147 return dyn_cast_or_null<ConstantInt>(C.getValue()); 148 return llvm::None; 149 } 150 151 /// Get pointer operand of memory accessing instruction. If \p I is 152 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, 153 /// is set to false and the instruction is volatile, return nullptr. 154 static const Value *getPointerOperand(const Instruction *I, 155 bool AllowVolatile) { 156 if (auto *LI = dyn_cast<LoadInst>(I)) { 157 if (!AllowVolatile && LI->isVolatile()) 158 return nullptr; 159 return LI->getPointerOperand(); 160 } 161 162 if (auto *SI = dyn_cast<StoreInst>(I)) { 163 if (!AllowVolatile && SI->isVolatile()) 164 return nullptr; 165 return SI->getPointerOperand(); 166 } 167 168 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { 169 if (!AllowVolatile && CXI->isVolatile()) 170 return nullptr; 171 return CXI->getPointerOperand(); 172 } 173 174 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { 175 if (!AllowVolatile && RMWI->isVolatile()) 176 return nullptr; 177 return RMWI->getPointerOperand(); 178 } 179 180 return nullptr; 181 } 182 183 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and 184 /// advanced by \p Offset bytes. To aid later analysis the method tries to build 185 /// getelement pointer instructions that traverse the natural type of \p Ptr if 186 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence 187 /// through a cast to i8*. 188 /// 189 /// TODO: This could probably live somewhere more prominantly if it doesn't 190 /// already exist. 191 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset, 192 IRBuilder<NoFolder> &IRB, const DataLayout &DL) { 193 assert(Offset >= 0 && "Negative offset not supported yet!"); 194 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset 195 << "-bytes as " << *ResTy << "\n"); 196 197 // The initial type we are trying to traverse to get nice GEPs. 198 Type *Ty = Ptr->getType(); 199 200 SmallVector<Value *, 4> Indices; 201 std::string GEPName = Ptr->getName().str(); 202 while (Offset) { 203 uint64_t Idx, Rem; 204 205 if (auto *STy = dyn_cast<StructType>(Ty)) { 206 const StructLayout *SL = DL.getStructLayout(STy); 207 if (int64_t(SL->getSizeInBytes()) < Offset) 208 break; 209 Idx = SL->getElementContainingOffset(Offset); 210 assert(Idx < STy->getNumElements() && "Offset calculation error!"); 211 Rem = Offset - SL->getElementOffset(Idx); 212 Ty = STy->getElementType(Idx); 213 } else if (auto *PTy = dyn_cast<PointerType>(Ty)) { 214 Ty = PTy->getElementType(); 215 if (!Ty->isSized()) 216 break; 217 uint64_t ElementSize = DL.getTypeAllocSize(Ty); 218 assert(ElementSize && "Expected type with size!"); 219 Idx = Offset / ElementSize; 220 Rem = Offset % ElementSize; 221 } else { 222 // Non-aggregate type, we cast and make byte-wise progress now. 223 break; 224 } 225 226 LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset 227 << " Idx: " << Idx << " Rem: " << Rem << "\n"); 228 229 GEPName += "." + std::to_string(Idx); 230 Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx)); 231 Offset = Rem; 232 } 233 234 // Create a GEP if we collected indices above. 235 if (Indices.size()) 236 Ptr = IRB.CreateGEP(Ptr, Indices, GEPName); 237 238 // If an offset is left we use byte-wise adjustment. 239 if (Offset) { 240 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); 241 Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset), 242 GEPName + ".b" + Twine(Offset)); 243 } 244 245 // Ensure the result has the requested type. 246 Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast"); 247 248 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n"); 249 return Ptr; 250 } 251 252 /// Recursively visit all values that might become \p IRP at some point. This 253 /// will be done by looking through cast instructions, selects, phis, and calls 254 /// with the "returned" attribute. Once we cannot look through the value any 255 /// further, the callback \p VisitValueCB is invoked and passed the current 256 /// value, the \p State, and a flag to indicate if we stripped anything. 257 /// Stripped means that we unpacked the value associated with \p IRP at least 258 /// once. Note that the value used for the callback may still be the value 259 /// associated with \p IRP (due to PHIs). To limit how much effort is invested, 260 /// we will never visit more values than specified by \p MaxValues. 261 template <typename AAType, typename StateTy> 262 static bool genericValueTraversal( 263 Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State, 264 function_ref<bool(Value &, const Instruction *, StateTy &, bool)> 265 VisitValueCB, 266 const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16, 267 function_ref<Value *(Value *)> StripCB = nullptr) { 268 269 const AAIsDead *LivenessAA = nullptr; 270 if (IRP.getAnchorScope()) 271 LivenessAA = &A.getAAFor<AAIsDead>( 272 QueryingAA, IRPosition::function(*IRP.getAnchorScope()), 273 /* TrackDependence */ false); 274 bool AnyDead = false; 275 276 using Item = std::pair<Value *, const Instruction *>; 277 SmallSet<Item, 16> Visited; 278 SmallVector<Item, 16> Worklist; 279 Worklist.push_back({&IRP.getAssociatedValue(), CtxI}); 280 281 int Iteration = 0; 282 do { 283 Item I = Worklist.pop_back_val(); 284 Value *V = I.first; 285 CtxI = I.second; 286 if (StripCB) 287 V = StripCB(V); 288 289 // Check if we should process the current value. To prevent endless 290 // recursion keep a record of the values we followed! 291 if (!Visited.insert(I).second) 292 continue; 293 294 // Make sure we limit the compile time for complex expressions. 295 if (Iteration++ >= MaxValues) 296 return false; 297 298 // Explicitly look through calls with a "returned" attribute if we do 299 // not have a pointer as stripPointerCasts only works on them. 300 Value *NewV = nullptr; 301 if (V->getType()->isPointerTy()) { 302 NewV = V->stripPointerCasts(); 303 } else { 304 auto *CB = dyn_cast<CallBase>(V); 305 if (CB && CB->getCalledFunction()) { 306 for (Argument &Arg : CB->getCalledFunction()->args()) 307 if (Arg.hasReturnedAttr()) { 308 NewV = CB->getArgOperand(Arg.getArgNo()); 309 break; 310 } 311 } 312 } 313 if (NewV && NewV != V) { 314 Worklist.push_back({NewV, CtxI}); 315 continue; 316 } 317 318 // Look through select instructions, visit both potential values. 319 if (auto *SI = dyn_cast<SelectInst>(V)) { 320 Worklist.push_back({SI->getTrueValue(), CtxI}); 321 Worklist.push_back({SI->getFalseValue(), CtxI}); 322 continue; 323 } 324 325 // Look through phi nodes, visit all live operands. 326 if (auto *PHI = dyn_cast<PHINode>(V)) { 327 assert(LivenessAA && 328 "Expected liveness in the presence of instructions!"); 329 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 330 BasicBlock *IncomingBB = PHI->getIncomingBlock(u); 331 if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA, 332 LivenessAA, 333 /* CheckBBLivenessOnly */ true)) { 334 AnyDead = true; 335 continue; 336 } 337 Worklist.push_back( 338 {PHI->getIncomingValue(u), IncomingBB->getTerminator()}); 339 } 340 continue; 341 } 342 343 if (UseValueSimplify && !isa<Constant>(V)) { 344 bool UsedAssumedInformation = false; 345 Optional<Constant *> C = 346 A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation); 347 if (!C.hasValue()) 348 continue; 349 if (Value *NewV = C.getValue()) { 350 Worklist.push_back({NewV, CtxI}); 351 continue; 352 } 353 } 354 355 // Once a leaf is reached we inform the user through the callback. 356 if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) 357 return false; 358 } while (!Worklist.empty()); 359 360 // If we actually used liveness information so we have to record a dependence. 361 if (AnyDead) 362 A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL); 363 364 // All values have been visited. 365 return true; 366 } 367 368 const Value *stripAndAccumulateMinimalOffsets( 369 Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val, 370 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, 371 bool UseAssumed = false) { 372 373 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { 374 const IRPosition &Pos = IRPosition::value(V); 375 // Only track dependence if we are going to use the assumed info. 376 const AAValueConstantRange &ValueConstantRangeAA = 377 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, 378 /* TrackDependence */ UseAssumed); 379 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() 380 : ValueConstantRangeAA.getKnown(); 381 // We can only use the lower part of the range because the upper part can 382 // be higher than what the value can really be. 383 ROffset = Range.getSignedMin(); 384 return true; 385 }; 386 387 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, 388 AttributorAnalysis); 389 } 390 391 static const Value *getMinimalBaseOfAccsesPointerOperand( 392 Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I, 393 int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) { 394 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); 395 if (!Ptr) 396 return nullptr; 397 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 398 const Value *Base = stripAndAccumulateMinimalOffsets( 399 A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds); 400 401 BytesOffset = OffsetAPInt.getSExtValue(); 402 return Base; 403 } 404 405 static const Value * 406 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset, 407 const DataLayout &DL, 408 bool AllowNonInbounds = false) { 409 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); 410 if (!Ptr) 411 return nullptr; 412 413 return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL, 414 AllowNonInbounds); 415 } 416 417 /// Helper function to clamp a state \p S of type \p StateType with the 418 /// information in \p R and indicate/return if \p S did change (as-in update is 419 /// required to be run again). 420 template <typename StateType> 421 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) { 422 auto Assumed = S.getAssumed(); 423 S ^= R; 424 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 425 : ChangeStatus::CHANGED; 426 } 427 428 /// Clamp the information known for all returned values of a function 429 /// (identified by \p QueryingAA) into \p S. 430 template <typename AAType, typename StateType = typename AAType::StateType> 431 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA, 432 StateType &S) { 433 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " 434 << QueryingAA << " into " << S << "\n"); 435 436 assert((QueryingAA.getIRPosition().getPositionKind() == 437 IRPosition::IRP_RETURNED || 438 QueryingAA.getIRPosition().getPositionKind() == 439 IRPosition::IRP_CALL_SITE_RETURNED) && 440 "Can only clamp returned value states for a function returned or call " 441 "site returned position!"); 442 443 // Use an optional state as there might not be any return values and we want 444 // to join (IntegerState::operator&) the state of all there are. 445 Optional<StateType> T; 446 447 // Callback for each possibly returned value. 448 auto CheckReturnValue = [&](Value &RV) -> bool { 449 const IRPosition &RVPos = IRPosition::value(RV); 450 const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos); 451 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() 452 << " @ " << RVPos << "\n"); 453 const StateType &AAS = AA.getState(); 454 if (T.hasValue()) 455 *T &= AAS; 456 else 457 T = AAS; 458 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T 459 << "\n"); 460 return T->isValidState(); 461 }; 462 463 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) 464 S.indicatePessimisticFixpoint(); 465 else if (T.hasValue()) 466 S ^= *T; 467 } 468 469 /// Helper class for generic deduction: return value -> returned position. 470 template <typename AAType, typename BaseType, 471 typename StateType = typename BaseType::StateType> 472 struct AAReturnedFromReturnedValues : public BaseType { 473 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) 474 : BaseType(IRP, A) {} 475 476 /// See AbstractAttribute::updateImpl(...). 477 ChangeStatus updateImpl(Attributor &A) override { 478 StateType S(StateType::getBestState(this->getState())); 479 clampReturnedValueStates<AAType, StateType>(A, *this, S); 480 // TODO: If we know we visited all returned values, thus no are assumed 481 // dead, we can take the known information from the state T. 482 return clampStateAndIndicateChange<StateType>(this->getState(), S); 483 } 484 }; 485 486 /// Clamp the information known at all call sites for a given argument 487 /// (identified by \p QueryingAA) into \p S. 488 template <typename AAType, typename StateType = typename AAType::StateType> 489 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, 490 StateType &S) { 491 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " 492 << QueryingAA << " into " << S << "\n"); 493 494 assert(QueryingAA.getIRPosition().getPositionKind() == 495 IRPosition::IRP_ARGUMENT && 496 "Can only clamp call site argument states for an argument position!"); 497 498 // Use an optional state as there might not be any return values and we want 499 // to join (IntegerState::operator&) the state of all there are. 500 Optional<StateType> T; 501 502 // The argument number which is also the call site argument number. 503 unsigned ArgNo = QueryingAA.getIRPosition().getArgNo(); 504 505 auto CallSiteCheck = [&](AbstractCallSite ACS) { 506 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 507 // Check if a coresponding argument was found or if it is on not associated 508 // (which can happen for callback calls). 509 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 510 return false; 511 512 const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos); 513 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() 514 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n"); 515 const StateType &AAS = AA.getState(); 516 if (T.hasValue()) 517 *T &= AAS; 518 else 519 T = AAS; 520 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T 521 << "\n"); 522 return T->isValidState(); 523 }; 524 525 bool AllCallSitesKnown; 526 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, 527 AllCallSitesKnown)) 528 S.indicatePessimisticFixpoint(); 529 else if (T.hasValue()) 530 S ^= *T; 531 } 532 533 /// Helper class for generic deduction: call site argument -> argument position. 534 template <typename AAType, typename BaseType, 535 typename StateType = typename AAType::StateType> 536 struct AAArgumentFromCallSiteArguments : public BaseType { 537 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) 538 : BaseType(IRP, A) {} 539 540 /// See AbstractAttribute::updateImpl(...). 541 ChangeStatus updateImpl(Attributor &A) override { 542 StateType S(StateType::getBestState(this->getState())); 543 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); 544 // TODO: If we know we visited all incoming values, thus no are assumed 545 // dead, we can take the known information from the state T. 546 return clampStateAndIndicateChange<StateType>(this->getState(), S); 547 } 548 }; 549 550 /// Helper class for generic replication: function returned -> cs returned. 551 template <typename AAType, typename BaseType, 552 typename StateType = typename BaseType::StateType> 553 struct AACallSiteReturnedFromReturned : public BaseType { 554 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) 555 : BaseType(IRP, A) {} 556 557 /// See AbstractAttribute::updateImpl(...). 558 ChangeStatus updateImpl(Attributor &A) override { 559 assert(this->getIRPosition().getPositionKind() == 560 IRPosition::IRP_CALL_SITE_RETURNED && 561 "Can only wrap function returned positions for call site returned " 562 "positions!"); 563 auto &S = this->getState(); 564 565 const Function *AssociatedFunction = 566 this->getIRPosition().getAssociatedFunction(); 567 if (!AssociatedFunction) 568 return S.indicatePessimisticFixpoint(); 569 570 IRPosition FnPos = IRPosition::returned(*AssociatedFunction); 571 const AAType &AA = A.getAAFor<AAType>(*this, FnPos); 572 return clampStateAndIndicateChange(S, AA.getState()); 573 } 574 }; 575 576 /// Helper function to accumulate uses. 577 template <class AAType, typename StateType = typename AAType::StateType> 578 static void followUsesInContext(AAType &AA, Attributor &A, 579 MustBeExecutedContextExplorer &Explorer, 580 const Instruction *CtxI, 581 SetVector<const Use *> &Uses, 582 StateType &State) { 583 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); 584 for (unsigned u = 0; u < Uses.size(); ++u) { 585 const Use *U = Uses[u]; 586 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { 587 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); 588 if (Found && AA.followUseInMBEC(A, U, UserI, State)) 589 for (const Use &Us : UserI->uses()) 590 Uses.insert(&Us); 591 } 592 } 593 } 594 595 /// Use the must-be-executed-context around \p I to add information into \p S. 596 /// The AAType class is required to have `followUseInMBEC` method with the 597 /// following signature and behaviour: 598 /// 599 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) 600 /// U - Underlying use. 601 /// I - The user of the \p U. 602 /// Returns true if the value should be tracked transitively. 603 /// 604 template <class AAType, typename StateType = typename AAType::StateType> 605 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, 606 Instruction &CtxI) { 607 608 // Container for (transitive) uses of the associated value. 609 SetVector<const Use *> Uses; 610 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) 611 Uses.insert(&U); 612 613 MustBeExecutedContextExplorer &Explorer = 614 A.getInfoCache().getMustBeExecutedContextExplorer(); 615 616 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); 617 618 if (S.isAtFixpoint()) 619 return; 620 621 SmallVector<const BranchInst *, 4> BrInsts; 622 auto Pred = [&](const Instruction *I) { 623 if (const BranchInst *Br = dyn_cast<BranchInst>(I)) 624 if (Br->isConditional()) 625 BrInsts.push_back(Br); 626 return true; 627 }; 628 629 // Here, accumulate conditional branch instructions in the context. We 630 // explore the child paths and collect the known states. The disjunction of 631 // those states can be merged to its own state. Let ParentState_i be a state 632 // to indicate the known information for an i-th branch instruction in the 633 // context. ChildStates are created for its successors respectively. 634 // 635 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} 636 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} 637 // ... 638 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} 639 // 640 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m 641 // 642 // FIXME: Currently, recursive branches are not handled. For example, we 643 // can't deduce that ptr must be dereferenced in below function. 644 // 645 // void f(int a, int c, int *ptr) { 646 // if(a) 647 // if (b) { 648 // *ptr = 0; 649 // } else { 650 // *ptr = 1; 651 // } 652 // else { 653 // if (b) { 654 // *ptr = 0; 655 // } else { 656 // *ptr = 1; 657 // } 658 // } 659 // } 660 661 Explorer.checkForAllContext(&CtxI, Pred); 662 for (const BranchInst *Br : BrInsts) { 663 StateType ParentState; 664 665 // The known state of the parent state is a conjunction of children's 666 // known states so it is initialized with a best state. 667 ParentState.indicateOptimisticFixpoint(); 668 669 for (const BasicBlock *BB : Br->successors()) { 670 StateType ChildState; 671 672 size_t BeforeSize = Uses.size(); 673 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); 674 675 // Erase uses which only appear in the child. 676 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) 677 It = Uses.erase(It); 678 679 ParentState &= ChildState; 680 } 681 682 // Use only known state. 683 S += ParentState; 684 } 685 } 686 687 /// -----------------------NoUnwind Function Attribute-------------------------- 688 689 struct AANoUnwindImpl : AANoUnwind { 690 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} 691 692 const std::string getAsStr() const override { 693 return getAssumed() ? "nounwind" : "may-unwind"; 694 } 695 696 /// See AbstractAttribute::updateImpl(...). 697 ChangeStatus updateImpl(Attributor &A) override { 698 auto Opcodes = { 699 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 700 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 701 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 702 703 auto CheckForNoUnwind = [&](Instruction &I) { 704 if (!I.mayThrow()) 705 return true; 706 707 if (const auto *CB = dyn_cast<CallBase>(&I)) { 708 const auto &NoUnwindAA = 709 A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB)); 710 return NoUnwindAA.isAssumedNoUnwind(); 711 } 712 return false; 713 }; 714 715 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes)) 716 return indicatePessimisticFixpoint(); 717 718 return ChangeStatus::UNCHANGED; 719 } 720 }; 721 722 struct AANoUnwindFunction final : public AANoUnwindImpl { 723 AANoUnwindFunction(const IRPosition &IRP, Attributor &A) 724 : AANoUnwindImpl(IRP, A) {} 725 726 /// See AbstractAttribute::trackStatistics() 727 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } 728 }; 729 730 /// NoUnwind attribute deduction for a call sites. 731 struct AANoUnwindCallSite final : AANoUnwindImpl { 732 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) 733 : AANoUnwindImpl(IRP, A) {} 734 735 /// See AbstractAttribute::initialize(...). 736 void initialize(Attributor &A) override { 737 AANoUnwindImpl::initialize(A); 738 Function *F = getAssociatedFunction(); 739 if (!F) 740 indicatePessimisticFixpoint(); 741 } 742 743 /// See AbstractAttribute::updateImpl(...). 744 ChangeStatus updateImpl(Attributor &A) override { 745 // TODO: Once we have call site specific value information we can provide 746 // call site specific liveness information and then it makes 747 // sense to specialize attributes for call sites arguments instead of 748 // redirecting requests to the callee argument. 749 Function *F = getAssociatedFunction(); 750 const IRPosition &FnPos = IRPosition::function(*F); 751 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos); 752 return clampStateAndIndicateChange(getState(), FnAA.getState()); 753 } 754 755 /// See AbstractAttribute::trackStatistics() 756 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } 757 }; 758 759 /// --------------------- Function Return Values ------------------------------- 760 761 /// "Attribute" that collects all potential returned values and the return 762 /// instructions that they arise from. 763 /// 764 /// If there is a unique returned value R, the manifest method will: 765 /// - mark R with the "returned" attribute, if R is an argument. 766 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { 767 768 /// Mapping of values potentially returned by the associated function to the 769 /// return instructions that might return them. 770 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; 771 772 /// Mapping to remember the number of returned values for a call site such 773 /// that we can avoid updates if nothing changed. 774 DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA; 775 776 /// Set of unresolved calls returned by the associated function. 777 SmallSetVector<CallBase *, 4> UnresolvedCalls; 778 779 /// State flags 780 /// 781 ///{ 782 bool IsFixed = false; 783 bool IsValidState = true; 784 ///} 785 786 public: 787 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) 788 : AAReturnedValues(IRP, A) {} 789 790 /// See AbstractAttribute::initialize(...). 791 void initialize(Attributor &A) override { 792 // Reset the state. 793 IsFixed = false; 794 IsValidState = true; 795 ReturnedValues.clear(); 796 797 Function *F = getAssociatedFunction(); 798 if (!F) { 799 indicatePessimisticFixpoint(); 800 return; 801 } 802 assert(!F->getReturnType()->isVoidTy() && 803 "Did not expect a void return type!"); 804 805 // The map from instruction opcodes to those instructions in the function. 806 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); 807 808 // Look through all arguments, if one is marked as returned we are done. 809 for (Argument &Arg : F->args()) { 810 if (Arg.hasReturnedAttr()) { 811 auto &ReturnInstSet = ReturnedValues[&Arg]; 812 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) 813 for (Instruction *RI : *Insts) 814 ReturnInstSet.insert(cast<ReturnInst>(RI)); 815 816 indicateOptimisticFixpoint(); 817 return; 818 } 819 } 820 821 if (!A.isFunctionIPOAmendable(*F)) 822 indicatePessimisticFixpoint(); 823 } 824 825 /// See AbstractAttribute::manifest(...). 826 ChangeStatus manifest(Attributor &A) override; 827 828 /// See AbstractAttribute::getState(...). 829 AbstractState &getState() override { return *this; } 830 831 /// See AbstractAttribute::getState(...). 832 const AbstractState &getState() const override { return *this; } 833 834 /// See AbstractAttribute::updateImpl(Attributor &A). 835 ChangeStatus updateImpl(Attributor &A) override; 836 837 llvm::iterator_range<iterator> returned_values() override { 838 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 839 } 840 841 llvm::iterator_range<const_iterator> returned_values() const override { 842 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 843 } 844 845 const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override { 846 return UnresolvedCalls; 847 } 848 849 /// Return the number of potential return values, -1 if unknown. 850 size_t getNumReturnValues() const override { 851 return isValidState() ? ReturnedValues.size() : -1; 852 } 853 854 /// Return an assumed unique return value if a single candidate is found. If 855 /// there cannot be one, return a nullptr. If it is not clear yet, return the 856 /// Optional::NoneType. 857 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 858 859 /// See AbstractState::checkForAllReturnedValues(...). 860 bool checkForAllReturnedValuesAndReturnInsts( 861 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 862 const override; 863 864 /// Pretty print the attribute similar to the IR representation. 865 const std::string getAsStr() const override; 866 867 /// See AbstractState::isAtFixpoint(). 868 bool isAtFixpoint() const override { return IsFixed; } 869 870 /// See AbstractState::isValidState(). 871 bool isValidState() const override { return IsValidState; } 872 873 /// See AbstractState::indicateOptimisticFixpoint(...). 874 ChangeStatus indicateOptimisticFixpoint() override { 875 IsFixed = true; 876 return ChangeStatus::UNCHANGED; 877 } 878 879 ChangeStatus indicatePessimisticFixpoint() override { 880 IsFixed = true; 881 IsValidState = false; 882 return ChangeStatus::CHANGED; 883 } 884 }; 885 886 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 887 ChangeStatus Changed = ChangeStatus::UNCHANGED; 888 889 // Bookkeeping. 890 assert(isValidState()); 891 STATS_DECLTRACK(KnownReturnValues, FunctionReturn, 892 "Number of function with known return values"); 893 894 // Check if we have an assumed unique return value that we could manifest. 895 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); 896 897 if (!UniqueRV.hasValue() || !UniqueRV.getValue()) 898 return Changed; 899 900 // Bookkeeping. 901 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, 902 "Number of function with unique return"); 903 904 // Callback to replace the uses of CB with the constant C. 905 auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) { 906 if (CB.use_empty()) 907 return ChangeStatus::UNCHANGED; 908 if (A.changeValueAfterManifest(CB, C)) 909 return ChangeStatus::CHANGED; 910 return ChangeStatus::UNCHANGED; 911 }; 912 913 // If the assumed unique return value is an argument, annotate it. 914 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { 915 if (UniqueRVArg->getType()->canLosslesslyBitCastTo( 916 getAssociatedFunction()->getReturnType())) { 917 getIRPosition() = IRPosition::argument(*UniqueRVArg); 918 Changed = IRAttribute::manifest(A); 919 } 920 } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) { 921 // We can replace the returned value with the unique returned constant. 922 Value &AnchorValue = getAnchorValue(); 923 if (Function *F = dyn_cast<Function>(&AnchorValue)) { 924 for (const Use &U : F->uses()) 925 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) 926 if (CB->isCallee(&U)) { 927 Constant *RVCCast = 928 CB->getType() == RVC->getType() 929 ? RVC 930 : ConstantExpr::getTruncOrBitCast(RVC, CB->getType()); 931 Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed; 932 } 933 } else { 934 assert(isa<CallBase>(AnchorValue) && 935 "Expcected a function or call base anchor!"); 936 Constant *RVCCast = 937 AnchorValue.getType() == RVC->getType() 938 ? RVC 939 : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType()); 940 Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast); 941 } 942 if (Changed == ChangeStatus::CHANGED) 943 STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn, 944 "Number of function returns replaced by constant return"); 945 } 946 947 return Changed; 948 } 949 950 const std::string AAReturnedValuesImpl::getAsStr() const { 951 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 952 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + 953 ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]"; 954 } 955 956 Optional<Value *> 957 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { 958 // If checkForAllReturnedValues provides a unique value, ignoring potential 959 // undef values that can also be present, it is assumed to be the actual 960 // return value and forwarded to the caller of this method. If there are 961 // multiple, a nullptr is returned indicating there cannot be a unique 962 // returned value. 963 Optional<Value *> UniqueRV; 964 965 auto Pred = [&](Value &RV) -> bool { 966 // If we found a second returned value and neither the current nor the saved 967 // one is an undef, there is no unique returned value. Undefs are special 968 // since we can pretend they have any value. 969 if (UniqueRV.hasValue() && UniqueRV != &RV && 970 !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) { 971 UniqueRV = nullptr; 972 return false; 973 } 974 975 // Do not overwrite a value with an undef. 976 if (!UniqueRV.hasValue() || !isa<UndefValue>(RV)) 977 UniqueRV = &RV; 978 979 return true; 980 }; 981 982 if (!A.checkForAllReturnedValues(Pred, *this)) 983 UniqueRV = nullptr; 984 985 return UniqueRV; 986 } 987 988 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( 989 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 990 const { 991 if (!isValidState()) 992 return false; 993 994 // Check all returned values but ignore call sites as long as we have not 995 // encountered an overdefined one during an update. 996 for (auto &It : ReturnedValues) { 997 Value *RV = It.first; 998 999 CallBase *CB = dyn_cast<CallBase>(RV); 1000 if (CB && !UnresolvedCalls.count(CB)) 1001 continue; 1002 1003 if (!Pred(*RV, It.second)) 1004 return false; 1005 } 1006 1007 return true; 1008 } 1009 1010 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 1011 size_t NumUnresolvedCalls = UnresolvedCalls.size(); 1012 bool Changed = false; 1013 1014 // State used in the value traversals starting in returned values. 1015 struct RVState { 1016 // The map in which we collect return values -> return instrs. 1017 decltype(ReturnedValues) &RetValsMap; 1018 // The flag to indicate a change. 1019 bool &Changed; 1020 // The return instrs we come from. 1021 SmallSetVector<ReturnInst *, 4> RetInsts; 1022 }; 1023 1024 // Callback for a leaf value returned by the associated function. 1025 auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS, 1026 bool) -> bool { 1027 auto Size = RVS.RetValsMap[&Val].size(); 1028 RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end()); 1029 bool Inserted = RVS.RetValsMap[&Val].size() != Size; 1030 RVS.Changed |= Inserted; 1031 LLVM_DEBUG({ 1032 if (Inserted) 1033 dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val 1034 << " => " << RVS.RetInsts.size() << "\n"; 1035 }); 1036 return true; 1037 }; 1038 1039 // Helper method to invoke the generic value traversal. 1040 auto VisitReturnedValue = [&](Value &RV, RVState &RVS, 1041 const Instruction *CtxI) { 1042 IRPosition RetValPos = IRPosition::value(RV); 1043 return genericValueTraversal<AAReturnedValues, RVState>( 1044 A, RetValPos, *this, RVS, VisitValueCB, CtxI, 1045 /* UseValueSimplify */ false); 1046 }; 1047 1048 // Callback for all "return intructions" live in the associated function. 1049 auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) { 1050 ReturnInst &Ret = cast<ReturnInst>(I); 1051 RVState RVS({ReturnedValues, Changed, {}}); 1052 RVS.RetInsts.insert(&Ret); 1053 return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I); 1054 }; 1055 1056 // Start by discovering returned values from all live returned instructions in 1057 // the associated function. 1058 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret})) 1059 return indicatePessimisticFixpoint(); 1060 1061 // Once returned values "directly" present in the code are handled we try to 1062 // resolve returned calls. To avoid modifications to the ReturnedValues map 1063 // while we iterate over it we kept record of potential new entries in a copy 1064 // map, NewRVsMap. 1065 decltype(ReturnedValues) NewRVsMap; 1066 1067 auto HandleReturnValue = [&](Value *RV, 1068 SmallSetVector<ReturnInst *, 4> &RIs) { 1069 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #" 1070 << RIs.size() << " RIs\n"); 1071 CallBase *CB = dyn_cast<CallBase>(RV); 1072 if (!CB || UnresolvedCalls.count(CB)) 1073 return; 1074 1075 if (!CB->getCalledFunction()) { 1076 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB 1077 << "\n"); 1078 UnresolvedCalls.insert(CB); 1079 return; 1080 } 1081 1082 // TODO: use the function scope once we have call site AAReturnedValues. 1083 const auto &RetValAA = A.getAAFor<AAReturnedValues>( 1084 *this, IRPosition::function(*CB->getCalledFunction())); 1085 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: " 1086 << RetValAA << "\n"); 1087 1088 // Skip dead ends, thus if we do not know anything about the returned 1089 // call we mark it as unresolved and it will stay that way. 1090 if (!RetValAA.getState().isValidState()) { 1091 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB 1092 << "\n"); 1093 UnresolvedCalls.insert(CB); 1094 return; 1095 } 1096 1097 // Do not try to learn partial information. If the callee has unresolved 1098 // return values we will treat the call as unresolved/opaque. 1099 auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls(); 1100 if (!RetValAAUnresolvedCalls.empty()) { 1101 UnresolvedCalls.insert(CB); 1102 return; 1103 } 1104 1105 // Now check if we can track transitively returned values. If possible, thus 1106 // if all return value can be represented in the current scope, do so. 1107 bool Unresolved = false; 1108 for (auto &RetValAAIt : RetValAA.returned_values()) { 1109 Value *RetVal = RetValAAIt.first; 1110 if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) || 1111 isa<Constant>(RetVal)) 1112 continue; 1113 // Anything that did not fit in the above categories cannot be resolved, 1114 // mark the call as unresolved. 1115 LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value " 1116 "cannot be translated: " 1117 << *RetVal << "\n"); 1118 UnresolvedCalls.insert(CB); 1119 Unresolved = true; 1120 break; 1121 } 1122 1123 if (Unresolved) 1124 return; 1125 1126 // Now track transitively returned values. 1127 unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB]; 1128 if (NumRetAA == RetValAA.getNumReturnValues()) { 1129 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not " 1130 "changed since it was seen last\n"); 1131 return; 1132 } 1133 NumRetAA = RetValAA.getNumReturnValues(); 1134 1135 for (auto &RetValAAIt : RetValAA.returned_values()) { 1136 Value *RetVal = RetValAAIt.first; 1137 if (Argument *Arg = dyn_cast<Argument>(RetVal)) { 1138 // Arguments are mapped to call site operands and we begin the traversal 1139 // again. 1140 bool Unused = false; 1141 RVState RVS({NewRVsMap, Unused, RetValAAIt.second}); 1142 VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB); 1143 continue; 1144 } else if (isa<CallBase>(RetVal)) { 1145 // Call sites are resolved by the callee attribute over time, no need to 1146 // do anything for us. 1147 continue; 1148 } else if (isa<Constant>(RetVal)) { 1149 // Constants are valid everywhere, we can simply take them. 1150 NewRVsMap[RetVal].insert(RIs.begin(), RIs.end()); 1151 continue; 1152 } 1153 } 1154 }; 1155 1156 for (auto &It : ReturnedValues) 1157 HandleReturnValue(It.first, It.second); 1158 1159 // Because processing the new information can again lead to new return values 1160 // we have to be careful and iterate until this iteration is complete. The 1161 // idea is that we are in a stable state at the end of an update. All return 1162 // values have been handled and properly categorized. We might not update 1163 // again if we have not requested a non-fix attribute so we cannot "wait" for 1164 // the next update to analyze a new return value. 1165 while (!NewRVsMap.empty()) { 1166 auto It = std::move(NewRVsMap.back()); 1167 NewRVsMap.pop_back(); 1168 1169 assert(!It.second.empty() && "Entry does not add anything."); 1170 auto &ReturnInsts = ReturnedValues[It.first]; 1171 for (ReturnInst *RI : It.second) 1172 if (ReturnInsts.insert(RI)) { 1173 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value " 1174 << *It.first << " => " << *RI << "\n"); 1175 HandleReturnValue(It.first, ReturnInsts); 1176 Changed = true; 1177 } 1178 } 1179 1180 Changed |= (NumUnresolvedCalls != UnresolvedCalls.size()); 1181 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 1182 } 1183 1184 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { 1185 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) 1186 : AAReturnedValuesImpl(IRP, A) {} 1187 1188 /// See AbstractAttribute::trackStatistics() 1189 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } 1190 }; 1191 1192 /// Returned values information for a call sites. 1193 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { 1194 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) 1195 : AAReturnedValuesImpl(IRP, A) {} 1196 1197 /// See AbstractAttribute::initialize(...). 1198 void initialize(Attributor &A) override { 1199 // TODO: Once we have call site specific value information we can provide 1200 // call site specific liveness information and then it makes 1201 // sense to specialize attributes for call sites instead of 1202 // redirecting requests to the callee. 1203 llvm_unreachable("Abstract attributes for returned values are not " 1204 "supported for call sites yet!"); 1205 } 1206 1207 /// See AbstractAttribute::updateImpl(...). 1208 ChangeStatus updateImpl(Attributor &A) override { 1209 return indicatePessimisticFixpoint(); 1210 } 1211 1212 /// See AbstractAttribute::trackStatistics() 1213 void trackStatistics() const override {} 1214 }; 1215 1216 /// ------------------------ NoSync Function Attribute ------------------------- 1217 1218 struct AANoSyncImpl : AANoSync { 1219 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} 1220 1221 const std::string getAsStr() const override { 1222 return getAssumed() ? "nosync" : "may-sync"; 1223 } 1224 1225 /// See AbstractAttribute::updateImpl(...). 1226 ChangeStatus updateImpl(Attributor &A) override; 1227 1228 /// Helper function used to determine whether an instruction is non-relaxed 1229 /// atomic. In other words, if an atomic instruction does not have unordered 1230 /// or monotonic ordering 1231 static bool isNonRelaxedAtomic(Instruction *I); 1232 1233 /// Helper function used to determine whether an instruction is volatile. 1234 static bool isVolatile(Instruction *I); 1235 1236 /// Helper function uset to check if intrinsic is volatile (memcpy, memmove, 1237 /// memset). 1238 static bool isNoSyncIntrinsic(Instruction *I); 1239 }; 1240 1241 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) { 1242 if (!I->isAtomic()) 1243 return false; 1244 1245 AtomicOrdering Ordering; 1246 switch (I->getOpcode()) { 1247 case Instruction::AtomicRMW: 1248 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 1249 break; 1250 case Instruction::Store: 1251 Ordering = cast<StoreInst>(I)->getOrdering(); 1252 break; 1253 case Instruction::Load: 1254 Ordering = cast<LoadInst>(I)->getOrdering(); 1255 break; 1256 case Instruction::Fence: { 1257 auto *FI = cast<FenceInst>(I); 1258 if (FI->getSyncScopeID() == SyncScope::SingleThread) 1259 return false; 1260 Ordering = FI->getOrdering(); 1261 break; 1262 } 1263 case Instruction::AtomicCmpXchg: { 1264 AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering(); 1265 AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering(); 1266 // Only if both are relaxed, than it can be treated as relaxed. 1267 // Otherwise it is non-relaxed. 1268 if (Success != AtomicOrdering::Unordered && 1269 Success != AtomicOrdering::Monotonic) 1270 return true; 1271 if (Failure != AtomicOrdering::Unordered && 1272 Failure != AtomicOrdering::Monotonic) 1273 return true; 1274 return false; 1275 } 1276 default: 1277 llvm_unreachable( 1278 "New atomic operations need to be known in the attributor."); 1279 } 1280 1281 // Relaxed. 1282 if (Ordering == AtomicOrdering::Unordered || 1283 Ordering == AtomicOrdering::Monotonic) 1284 return false; 1285 return true; 1286 } 1287 1288 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics. 1289 /// FIXME: We should ipmrove the handling of intrinsics. 1290 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) { 1291 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 1292 switch (II->getIntrinsicID()) { 1293 /// Element wise atomic memory intrinsics are can only be unordered, 1294 /// therefore nosync. 1295 case Intrinsic::memset_element_unordered_atomic: 1296 case Intrinsic::memmove_element_unordered_atomic: 1297 case Intrinsic::memcpy_element_unordered_atomic: 1298 return true; 1299 case Intrinsic::memset: 1300 case Intrinsic::memmove: 1301 case Intrinsic::memcpy: 1302 if (!cast<MemIntrinsic>(II)->isVolatile()) 1303 return true; 1304 return false; 1305 default: 1306 return false; 1307 } 1308 } 1309 return false; 1310 } 1311 1312 bool AANoSyncImpl::isVolatile(Instruction *I) { 1313 assert(!isa<CallBase>(I) && "Calls should not be checked here"); 1314 1315 switch (I->getOpcode()) { 1316 case Instruction::AtomicRMW: 1317 return cast<AtomicRMWInst>(I)->isVolatile(); 1318 case Instruction::Store: 1319 return cast<StoreInst>(I)->isVolatile(); 1320 case Instruction::Load: 1321 return cast<LoadInst>(I)->isVolatile(); 1322 case Instruction::AtomicCmpXchg: 1323 return cast<AtomicCmpXchgInst>(I)->isVolatile(); 1324 default: 1325 return false; 1326 } 1327 } 1328 1329 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { 1330 1331 auto CheckRWInstForNoSync = [&](Instruction &I) { 1332 /// We are looking for volatile instructions or Non-Relaxed atomics. 1333 /// FIXME: We should improve the handling of intrinsics. 1334 1335 if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I)) 1336 return true; 1337 1338 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1339 if (CB->hasFnAttr(Attribute::NoSync)) 1340 return true; 1341 1342 const auto &NoSyncAA = 1343 A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB)); 1344 if (NoSyncAA.isAssumedNoSync()) 1345 return true; 1346 return false; 1347 } 1348 1349 if (!isVolatile(&I) && !isNonRelaxedAtomic(&I)) 1350 return true; 1351 1352 return false; 1353 }; 1354 1355 auto CheckForNoSync = [&](Instruction &I) { 1356 // At this point we handled all read/write effects and they are all 1357 // nosync, so they can be skipped. 1358 if (I.mayReadOrWriteMemory()) 1359 return true; 1360 1361 // non-convergent and readnone imply nosync. 1362 return !cast<CallBase>(I).isConvergent(); 1363 }; 1364 1365 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) || 1366 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this)) 1367 return indicatePessimisticFixpoint(); 1368 1369 return ChangeStatus::UNCHANGED; 1370 } 1371 1372 struct AANoSyncFunction final : public AANoSyncImpl { 1373 AANoSyncFunction(const IRPosition &IRP, Attributor &A) 1374 : AANoSyncImpl(IRP, A) {} 1375 1376 /// See AbstractAttribute::trackStatistics() 1377 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } 1378 }; 1379 1380 /// NoSync attribute deduction for a call sites. 1381 struct AANoSyncCallSite final : AANoSyncImpl { 1382 AANoSyncCallSite(const IRPosition &IRP, Attributor &A) 1383 : AANoSyncImpl(IRP, A) {} 1384 1385 /// See AbstractAttribute::initialize(...). 1386 void initialize(Attributor &A) override { 1387 AANoSyncImpl::initialize(A); 1388 Function *F = getAssociatedFunction(); 1389 if (!F) 1390 indicatePessimisticFixpoint(); 1391 } 1392 1393 /// See AbstractAttribute::updateImpl(...). 1394 ChangeStatus updateImpl(Attributor &A) override { 1395 // TODO: Once we have call site specific value information we can provide 1396 // call site specific liveness information and then it makes 1397 // sense to specialize attributes for call sites arguments instead of 1398 // redirecting requests to the callee argument. 1399 Function *F = getAssociatedFunction(); 1400 const IRPosition &FnPos = IRPosition::function(*F); 1401 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos); 1402 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1403 } 1404 1405 /// See AbstractAttribute::trackStatistics() 1406 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } 1407 }; 1408 1409 /// ------------------------ No-Free Attributes ---------------------------- 1410 1411 struct AANoFreeImpl : public AANoFree { 1412 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} 1413 1414 /// See AbstractAttribute::updateImpl(...). 1415 ChangeStatus updateImpl(Attributor &A) override { 1416 auto CheckForNoFree = [&](Instruction &I) { 1417 const auto &CB = cast<CallBase>(I); 1418 if (CB.hasFnAttr(Attribute::NoFree)) 1419 return true; 1420 1421 const auto &NoFreeAA = 1422 A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB)); 1423 return NoFreeAA.isAssumedNoFree(); 1424 }; 1425 1426 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this)) 1427 return indicatePessimisticFixpoint(); 1428 return ChangeStatus::UNCHANGED; 1429 } 1430 1431 /// See AbstractAttribute::getAsStr(). 1432 const std::string getAsStr() const override { 1433 return getAssumed() ? "nofree" : "may-free"; 1434 } 1435 }; 1436 1437 struct AANoFreeFunction final : public AANoFreeImpl { 1438 AANoFreeFunction(const IRPosition &IRP, Attributor &A) 1439 : AANoFreeImpl(IRP, A) {} 1440 1441 /// See AbstractAttribute::trackStatistics() 1442 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } 1443 }; 1444 1445 /// NoFree attribute deduction for a call sites. 1446 struct AANoFreeCallSite final : AANoFreeImpl { 1447 AANoFreeCallSite(const IRPosition &IRP, Attributor &A) 1448 : AANoFreeImpl(IRP, A) {} 1449 1450 /// See AbstractAttribute::initialize(...). 1451 void initialize(Attributor &A) override { 1452 AANoFreeImpl::initialize(A); 1453 Function *F = getAssociatedFunction(); 1454 if (!F) 1455 indicatePessimisticFixpoint(); 1456 } 1457 1458 /// See AbstractAttribute::updateImpl(...). 1459 ChangeStatus updateImpl(Attributor &A) override { 1460 // TODO: Once we have call site specific value information we can provide 1461 // call site specific liveness information and then it makes 1462 // sense to specialize attributes for call sites arguments instead of 1463 // redirecting requests to the callee argument. 1464 Function *F = getAssociatedFunction(); 1465 const IRPosition &FnPos = IRPosition::function(*F); 1466 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos); 1467 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1468 } 1469 1470 /// See AbstractAttribute::trackStatistics() 1471 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } 1472 }; 1473 1474 /// NoFree attribute for floating values. 1475 struct AANoFreeFloating : AANoFreeImpl { 1476 AANoFreeFloating(const IRPosition &IRP, Attributor &A) 1477 : AANoFreeImpl(IRP, A) {} 1478 1479 /// See AbstractAttribute::trackStatistics() 1480 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} 1481 1482 /// See Abstract Attribute::updateImpl(...). 1483 ChangeStatus updateImpl(Attributor &A) override { 1484 const IRPosition &IRP = getIRPosition(); 1485 1486 const auto &NoFreeAA = 1487 A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP)); 1488 if (NoFreeAA.isAssumedNoFree()) 1489 return ChangeStatus::UNCHANGED; 1490 1491 Value &AssociatedValue = getIRPosition().getAssociatedValue(); 1492 auto Pred = [&](const Use &U, bool &Follow) -> bool { 1493 Instruction *UserI = cast<Instruction>(U.getUser()); 1494 if (auto *CB = dyn_cast<CallBase>(UserI)) { 1495 if (CB->isBundleOperand(&U)) 1496 return false; 1497 if (!CB->isArgOperand(&U)) 1498 return true; 1499 unsigned ArgNo = CB->getArgOperandNo(&U); 1500 1501 const auto &NoFreeArg = A.getAAFor<AANoFree>( 1502 *this, IRPosition::callsite_argument(*CB, ArgNo)); 1503 return NoFreeArg.isAssumedNoFree(); 1504 } 1505 1506 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 1507 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 1508 Follow = true; 1509 return true; 1510 } 1511 if (isa<ReturnInst>(UserI)) 1512 return true; 1513 1514 // Unknown user. 1515 return false; 1516 }; 1517 if (!A.checkForAllUses(Pred, *this, AssociatedValue)) 1518 return indicatePessimisticFixpoint(); 1519 1520 return ChangeStatus::UNCHANGED; 1521 } 1522 }; 1523 1524 /// NoFree attribute for a call site argument. 1525 struct AANoFreeArgument final : AANoFreeFloating { 1526 AANoFreeArgument(const IRPosition &IRP, Attributor &A) 1527 : AANoFreeFloating(IRP, A) {} 1528 1529 /// See AbstractAttribute::trackStatistics() 1530 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } 1531 }; 1532 1533 /// NoFree attribute for call site arguments. 1534 struct AANoFreeCallSiteArgument final : AANoFreeFloating { 1535 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) 1536 : AANoFreeFloating(IRP, A) {} 1537 1538 /// See AbstractAttribute::updateImpl(...). 1539 ChangeStatus updateImpl(Attributor &A) override { 1540 // TODO: Once we have call site specific value information we can provide 1541 // call site specific liveness information and then it makes 1542 // sense to specialize attributes for call sites arguments instead of 1543 // redirecting requests to the callee argument. 1544 Argument *Arg = getAssociatedArgument(); 1545 if (!Arg) 1546 return indicatePessimisticFixpoint(); 1547 const IRPosition &ArgPos = IRPosition::argument(*Arg); 1548 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos); 1549 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 1550 } 1551 1552 /// See AbstractAttribute::trackStatistics() 1553 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)}; 1554 }; 1555 1556 /// NoFree attribute for function return value. 1557 struct AANoFreeReturned final : AANoFreeFloating { 1558 AANoFreeReturned(const IRPosition &IRP, Attributor &A) 1559 : AANoFreeFloating(IRP, A) { 1560 llvm_unreachable("NoFree is not applicable to function returns!"); 1561 } 1562 1563 /// See AbstractAttribute::initialize(...). 1564 void initialize(Attributor &A) override { 1565 llvm_unreachable("NoFree is not applicable to function returns!"); 1566 } 1567 1568 /// See AbstractAttribute::updateImpl(...). 1569 ChangeStatus updateImpl(Attributor &A) override { 1570 llvm_unreachable("NoFree is not applicable to function returns!"); 1571 } 1572 1573 /// See AbstractAttribute::trackStatistics() 1574 void trackStatistics() const override {} 1575 }; 1576 1577 /// NoFree attribute deduction for a call site return value. 1578 struct AANoFreeCallSiteReturned final : AANoFreeFloating { 1579 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) 1580 : AANoFreeFloating(IRP, A) {} 1581 1582 ChangeStatus manifest(Attributor &A) override { 1583 return ChangeStatus::UNCHANGED; 1584 } 1585 /// See AbstractAttribute::trackStatistics() 1586 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } 1587 }; 1588 1589 /// ------------------------ NonNull Argument Attribute ------------------------ 1590 static int64_t getKnownNonNullAndDerefBytesForUse( 1591 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, 1592 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { 1593 TrackUse = false; 1594 1595 const Value *UseV = U->get(); 1596 if (!UseV->getType()->isPointerTy()) 1597 return 0; 1598 1599 Type *PtrTy = UseV->getType(); 1600 const Function *F = I->getFunction(); 1601 bool NullPointerIsDefined = 1602 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; 1603 const DataLayout &DL = A.getInfoCache().getDL(); 1604 if (const auto *CB = dyn_cast<CallBase>(I)) { 1605 if (CB->isBundleOperand(U)) { 1606 if (RetainedKnowledge RK = getKnowledgeFromUse( 1607 U, {Attribute::NonNull, Attribute::Dereferenceable})) { 1608 IsNonNull |= 1609 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); 1610 return RK.ArgValue; 1611 } 1612 return 0; 1613 } 1614 1615 if (CB->isCallee(U)) { 1616 IsNonNull |= !NullPointerIsDefined; 1617 return 0; 1618 } 1619 1620 unsigned ArgNo = CB->getArgOperandNo(U); 1621 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 1622 // As long as we only use known information there is no need to track 1623 // dependences here. 1624 auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP, 1625 /* TrackDependence */ false); 1626 IsNonNull |= DerefAA.isKnownNonNull(); 1627 return DerefAA.getKnownDereferenceableBytes(); 1628 } 1629 1630 // We need to follow common pointer manipulation uses to the accesses they 1631 // feed into. We can try to be smart to avoid looking through things we do not 1632 // like for now, e.g., non-inbounds GEPs. 1633 if (isa<CastInst>(I)) { 1634 TrackUse = true; 1635 return 0; 1636 } 1637 1638 if (isa<GetElementPtrInst>(I)) { 1639 TrackUse = true; 1640 return 0; 1641 } 1642 1643 int64_t Offset; 1644 const Value *Base = 1645 getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL); 1646 if (Base) { 1647 if (Base == &AssociatedValue && 1648 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 1649 int64_t DerefBytes = 1650 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset; 1651 1652 IsNonNull |= !NullPointerIsDefined; 1653 return std::max(int64_t(0), DerefBytes); 1654 } 1655 } 1656 1657 /// Corner case when an offset is 0. 1658 Base = getBasePointerOfAccessPointerOperand(I, Offset, DL, 1659 /*AllowNonInbounds*/ true); 1660 if (Base) { 1661 if (Offset == 0 && Base == &AssociatedValue && 1662 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 1663 int64_t DerefBytes = 1664 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()); 1665 IsNonNull |= !NullPointerIsDefined; 1666 return std::max(int64_t(0), DerefBytes); 1667 } 1668 } 1669 1670 return 0; 1671 } 1672 1673 struct AANonNullImpl : AANonNull { 1674 AANonNullImpl(const IRPosition &IRP, Attributor &A) 1675 : AANonNull(IRP, A), 1676 NullIsDefined(NullPointerIsDefined( 1677 getAnchorScope(), 1678 getAssociatedValue().getType()->getPointerAddressSpace())) {} 1679 1680 /// See AbstractAttribute::initialize(...). 1681 void initialize(Attributor &A) override { 1682 Value &V = getAssociatedValue(); 1683 if (!NullIsDefined && 1684 hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, 1685 /* IgnoreSubsumingPositions */ false, &A)) { 1686 indicateOptimisticFixpoint(); 1687 return; 1688 } 1689 1690 if (isa<ConstantPointerNull>(V)) { 1691 indicatePessimisticFixpoint(); 1692 return; 1693 } 1694 1695 AANonNull::initialize(A); 1696 1697 bool CanBeNull = true; 1698 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull)) { 1699 if (!CanBeNull) { 1700 indicateOptimisticFixpoint(); 1701 return; 1702 } 1703 } 1704 1705 if (isa<GlobalValue>(&getAssociatedValue())) { 1706 indicatePessimisticFixpoint(); 1707 return; 1708 } 1709 1710 if (Instruction *CtxI = getCtxI()) 1711 followUsesInMBEC(*this, A, getState(), *CtxI); 1712 } 1713 1714 /// See followUsesInMBEC 1715 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 1716 AANonNull::StateType &State) { 1717 bool IsNonNull = false; 1718 bool TrackUse = false; 1719 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, 1720 IsNonNull, TrackUse); 1721 State.setKnown(IsNonNull); 1722 return TrackUse; 1723 } 1724 1725 /// See AbstractAttribute::getAsStr(). 1726 const std::string getAsStr() const override { 1727 return getAssumed() ? "nonnull" : "may-null"; 1728 } 1729 1730 /// Flag to determine if the underlying value can be null and still allow 1731 /// valid accesses. 1732 const bool NullIsDefined; 1733 }; 1734 1735 /// NonNull attribute for a floating value. 1736 struct AANonNullFloating : public AANonNullImpl { 1737 AANonNullFloating(const IRPosition &IRP, Attributor &A) 1738 : AANonNullImpl(IRP, A) {} 1739 1740 /// See AbstractAttribute::updateImpl(...). 1741 ChangeStatus updateImpl(Attributor &A) override { 1742 const DataLayout &DL = A.getDataLayout(); 1743 1744 DominatorTree *DT = nullptr; 1745 AssumptionCache *AC = nullptr; 1746 InformationCache &InfoCache = A.getInfoCache(); 1747 if (const Function *Fn = getAnchorScope()) { 1748 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); 1749 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); 1750 } 1751 1752 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 1753 AANonNull::StateType &T, bool Stripped) -> bool { 1754 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V)); 1755 if (!Stripped && this == &AA) { 1756 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) 1757 T.indicatePessimisticFixpoint(); 1758 } else { 1759 // Use abstract attribute information. 1760 const AANonNull::StateType &NS = AA.getState(); 1761 T ^= NS; 1762 } 1763 return T.isValidState(); 1764 }; 1765 1766 StateType T; 1767 if (!genericValueTraversal<AANonNull, StateType>( 1768 A, getIRPosition(), *this, T, VisitValueCB, getCtxI())) 1769 return indicatePessimisticFixpoint(); 1770 1771 return clampStateAndIndicateChange(getState(), T); 1772 } 1773 1774 /// See AbstractAttribute::trackStatistics() 1775 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 1776 }; 1777 1778 /// NonNull attribute for function return value. 1779 struct AANonNullReturned final 1780 : AAReturnedFromReturnedValues<AANonNull, AANonNull> { 1781 AANonNullReturned(const IRPosition &IRP, Attributor &A) 1782 : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {} 1783 1784 /// See AbstractAttribute::getAsStr(). 1785 const std::string getAsStr() const override { 1786 return getAssumed() ? "nonnull" : "may-null"; 1787 } 1788 1789 /// See AbstractAttribute::trackStatistics() 1790 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 1791 }; 1792 1793 /// NonNull attribute for function argument. 1794 struct AANonNullArgument final 1795 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { 1796 AANonNullArgument(const IRPosition &IRP, Attributor &A) 1797 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} 1798 1799 /// See AbstractAttribute::trackStatistics() 1800 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } 1801 }; 1802 1803 struct AANonNullCallSiteArgument final : AANonNullFloating { 1804 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) 1805 : AANonNullFloating(IRP, A) {} 1806 1807 /// See AbstractAttribute::trackStatistics() 1808 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } 1809 }; 1810 1811 /// NonNull attribute for a call site return position. 1812 struct AANonNullCallSiteReturned final 1813 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { 1814 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) 1815 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} 1816 1817 /// See AbstractAttribute::trackStatistics() 1818 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } 1819 }; 1820 1821 /// ------------------------ No-Recurse Attributes ---------------------------- 1822 1823 struct AANoRecurseImpl : public AANoRecurse { 1824 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} 1825 1826 /// See AbstractAttribute::getAsStr() 1827 const std::string getAsStr() const override { 1828 return getAssumed() ? "norecurse" : "may-recurse"; 1829 } 1830 }; 1831 1832 struct AANoRecurseFunction final : AANoRecurseImpl { 1833 AANoRecurseFunction(const IRPosition &IRP, Attributor &A) 1834 : AANoRecurseImpl(IRP, A) {} 1835 1836 /// See AbstractAttribute::initialize(...). 1837 void initialize(Attributor &A) override { 1838 AANoRecurseImpl::initialize(A); 1839 if (const Function *F = getAnchorScope()) 1840 if (A.getInfoCache().getSccSize(*F) != 1) 1841 indicatePessimisticFixpoint(); 1842 } 1843 1844 /// See AbstractAttribute::updateImpl(...). 1845 ChangeStatus updateImpl(Attributor &A) override { 1846 1847 // If all live call sites are known to be no-recurse, we are as well. 1848 auto CallSitePred = [&](AbstractCallSite ACS) { 1849 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1850 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 1851 /* TrackDependence */ false, DepClassTy::OPTIONAL); 1852 return NoRecurseAA.isKnownNoRecurse(); 1853 }; 1854 bool AllCallSitesKnown; 1855 if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) { 1856 // If we know all call sites and all are known no-recurse, we are done. 1857 // If all known call sites, which might not be all that exist, are known 1858 // to be no-recurse, we are not done but we can continue to assume 1859 // no-recurse. If one of the call sites we have not visited will become 1860 // live, another update is triggered. 1861 if (AllCallSitesKnown) 1862 indicateOptimisticFixpoint(); 1863 return ChangeStatus::UNCHANGED; 1864 } 1865 1866 // If the above check does not hold anymore we look at the calls. 1867 auto CheckForNoRecurse = [&](Instruction &I) { 1868 const auto &CB = cast<CallBase>(I); 1869 if (CB.hasFnAttr(Attribute::NoRecurse)) 1870 return true; 1871 1872 const auto &NoRecurseAA = 1873 A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB)); 1874 if (!NoRecurseAA.isAssumedNoRecurse()) 1875 return false; 1876 1877 // Recursion to the same function 1878 if (CB.getCalledFunction() == getAnchorScope()) 1879 return false; 1880 1881 return true; 1882 }; 1883 1884 if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this)) 1885 return indicatePessimisticFixpoint(); 1886 return ChangeStatus::UNCHANGED; 1887 } 1888 1889 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } 1890 }; 1891 1892 /// NoRecurse attribute deduction for a call sites. 1893 struct AANoRecurseCallSite final : AANoRecurseImpl { 1894 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) 1895 : AANoRecurseImpl(IRP, A) {} 1896 1897 /// See AbstractAttribute::initialize(...). 1898 void initialize(Attributor &A) override { 1899 AANoRecurseImpl::initialize(A); 1900 Function *F = getAssociatedFunction(); 1901 if (!F) 1902 indicatePessimisticFixpoint(); 1903 } 1904 1905 /// See AbstractAttribute::updateImpl(...). 1906 ChangeStatus updateImpl(Attributor &A) override { 1907 // TODO: Once we have call site specific value information we can provide 1908 // call site specific liveness information and then it makes 1909 // sense to specialize attributes for call sites arguments instead of 1910 // redirecting requests to the callee argument. 1911 Function *F = getAssociatedFunction(); 1912 const IRPosition &FnPos = IRPosition::function(*F); 1913 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos); 1914 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1915 } 1916 1917 /// See AbstractAttribute::trackStatistics() 1918 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } 1919 }; 1920 1921 /// -------------------- Undefined-Behavior Attributes ------------------------ 1922 1923 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { 1924 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) 1925 : AAUndefinedBehavior(IRP, A) {} 1926 1927 /// See AbstractAttribute::updateImpl(...). 1928 // through a pointer (i.e. also branches etc.) 1929 ChangeStatus updateImpl(Attributor &A) override { 1930 const size_t UBPrevSize = KnownUBInsts.size(); 1931 const size_t NoUBPrevSize = AssumedNoUBInsts.size(); 1932 1933 auto InspectMemAccessInstForUB = [&](Instruction &I) { 1934 // Skip instructions that are already saved. 1935 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 1936 return true; 1937 1938 // If we reach here, we know we have an instruction 1939 // that accesses memory through a pointer operand, 1940 // for which getPointerOperand() should give it to us. 1941 const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true); 1942 assert(PtrOp && 1943 "Expected pointer operand of memory accessing instruction"); 1944 1945 // Either we stopped and the appropriate action was taken, 1946 // or we got back a simplified value to continue. 1947 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); 1948 if (!SimplifiedPtrOp.hasValue()) 1949 return true; 1950 const Value *PtrOpVal = SimplifiedPtrOp.getValue(); 1951 1952 // A memory access through a pointer is considered UB 1953 // only if the pointer has constant null value. 1954 // TODO: Expand it to not only check constant values. 1955 if (!isa<ConstantPointerNull>(PtrOpVal)) { 1956 AssumedNoUBInsts.insert(&I); 1957 return true; 1958 } 1959 const Type *PtrTy = PtrOpVal->getType(); 1960 1961 // Because we only consider instructions inside functions, 1962 // assume that a parent function exists. 1963 const Function *F = I.getFunction(); 1964 1965 // A memory access using constant null pointer is only considered UB 1966 // if null pointer is _not_ defined for the target platform. 1967 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) 1968 AssumedNoUBInsts.insert(&I); 1969 else 1970 KnownUBInsts.insert(&I); 1971 return true; 1972 }; 1973 1974 auto InspectBrInstForUB = [&](Instruction &I) { 1975 // A conditional branch instruction is considered UB if it has `undef` 1976 // condition. 1977 1978 // Skip instructions that are already saved. 1979 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 1980 return true; 1981 1982 // We know we have a branch instruction. 1983 auto BrInst = cast<BranchInst>(&I); 1984 1985 // Unconditional branches are never considered UB. 1986 if (BrInst->isUnconditional()) 1987 return true; 1988 1989 // Either we stopped and the appropriate action was taken, 1990 // or we got back a simplified value to continue. 1991 Optional<Value *> SimplifiedCond = 1992 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); 1993 if (!SimplifiedCond.hasValue()) 1994 return true; 1995 AssumedNoUBInsts.insert(&I); 1996 return true; 1997 }; 1998 1999 auto InspectCallSiteForUB = [&](Instruction &I) { 2000 // Check whether a callsite always cause UB or not 2001 2002 // Skip instructions that are already saved. 2003 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2004 return true; 2005 2006 // Check nonnull and noundef argument attribute violation for each 2007 // callsite. 2008 CallBase &CB = cast<CallBase>(I); 2009 Function *Callee = CB.getCalledFunction(); 2010 if (!Callee) 2011 return true; 2012 for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) { 2013 // If current argument is known to be simplified to null pointer and the 2014 // corresponding argument position is known to have nonnull attribute, 2015 // the argument is poison. Furthermore, if the argument is poison and 2016 // the position is known to have noundef attriubte, this callsite is 2017 // considered UB. 2018 if (idx >= Callee->arg_size()) 2019 break; 2020 Value *ArgVal = CB.getArgOperand(idx); 2021 if (!ArgVal) 2022 continue; 2023 // Here, we handle three cases. 2024 // (1) Not having a value means it is dead. (we can replace the value 2025 // with undef) 2026 // (2) Simplified to undef. The argument violate noundef attriubte. 2027 // (3) Simplified to null pointer where known to be nonnull. 2028 // The argument is a poison value and violate noundef attribute. 2029 IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx); 2030 auto &NoUndefAA = A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, 2031 /* TrackDependence */ false); 2032 if (!NoUndefAA.isKnownNoUndef()) 2033 continue; 2034 auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>( 2035 *this, IRPosition::value(*ArgVal), /* TrackDependence */ false); 2036 if (!ValueSimplifyAA.isKnown()) 2037 continue; 2038 Optional<Value *> SimplifiedVal = 2039 ValueSimplifyAA.getAssumedSimplifiedValue(A); 2040 if (!SimplifiedVal.hasValue() || 2041 isa<UndefValue>(*SimplifiedVal.getValue())) { 2042 KnownUBInsts.insert(&I); 2043 continue; 2044 } 2045 if (!ArgVal->getType()->isPointerTy() || 2046 !isa<ConstantPointerNull>(*SimplifiedVal.getValue())) 2047 continue; 2048 auto &NonNullAA = A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, 2049 /* TrackDependence */ false); 2050 if (NonNullAA.isKnownNonNull()) 2051 KnownUBInsts.insert(&I); 2052 } 2053 return true; 2054 }; 2055 2056 auto InspectReturnInstForUB = 2057 [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) { 2058 // Check if a return instruction always cause UB or not 2059 // Note: It is guaranteed that the returned position of the anchor 2060 // scope has noundef attribute when this is called. 2061 2062 // When the returned position has noundef attriubte, UB occur in the 2063 // following cases. 2064 // (1) Returned value is known to be undef. 2065 // (2) The value is known to be a null pointer and the returned 2066 // position has nonnull attribute (because the returned value is 2067 // poison). 2068 // Note: This callback is not called for a dead returned value because 2069 // such values are ignored in 2070 // checkForAllReturnedValuesAndReturnedInsts. 2071 bool FoundUB = false; 2072 if (isa<UndefValue>(V)) { 2073 FoundUB = true; 2074 } else { 2075 if (isa<ConstantPointerNull>(V)) { 2076 auto &NonNullAA = A.getAAFor<AANonNull>( 2077 *this, IRPosition::returned(*getAnchorScope()), 2078 /* TrackDependence */ false); 2079 if (NonNullAA.isKnownNonNull()) 2080 FoundUB = true; 2081 } 2082 } 2083 2084 if (FoundUB) 2085 for (ReturnInst *RI : RetInsts) 2086 KnownUBInsts.insert(RI); 2087 return true; 2088 }; 2089 2090 A.checkForAllInstructions(InspectMemAccessInstForUB, *this, 2091 {Instruction::Load, Instruction::Store, 2092 Instruction::AtomicCmpXchg, 2093 Instruction::AtomicRMW}, 2094 /* CheckBBLivenessOnly */ true); 2095 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, 2096 /* CheckBBLivenessOnly */ true); 2097 A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this); 2098 2099 // If the returned position of the anchor scope has noundef attriubte, check 2100 // all returned instructions. 2101 if (!getAnchorScope()->getReturnType()->isVoidTy()) { 2102 auto &RetPosNoUndefAA = 2103 A.getAAFor<AANoUndef>(*this, IRPosition::returned(*getAnchorScope()), 2104 /* TrackDependence */ false); 2105 if (RetPosNoUndefAA.isKnownNoUndef()) 2106 A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB, 2107 *this); 2108 } 2109 2110 if (NoUBPrevSize != AssumedNoUBInsts.size() || 2111 UBPrevSize != KnownUBInsts.size()) 2112 return ChangeStatus::CHANGED; 2113 return ChangeStatus::UNCHANGED; 2114 } 2115 2116 bool isKnownToCauseUB(Instruction *I) const override { 2117 return KnownUBInsts.count(I); 2118 } 2119 2120 bool isAssumedToCauseUB(Instruction *I) const override { 2121 // In simple words, if an instruction is not in the assumed to _not_ 2122 // cause UB, then it is assumed UB (that includes those 2123 // in the KnownUBInsts set). The rest is boilerplate 2124 // is to ensure that it is one of the instructions we test 2125 // for UB. 2126 2127 switch (I->getOpcode()) { 2128 case Instruction::Load: 2129 case Instruction::Store: 2130 case Instruction::AtomicCmpXchg: 2131 case Instruction::AtomicRMW: 2132 return !AssumedNoUBInsts.count(I); 2133 case Instruction::Br: { 2134 auto BrInst = cast<BranchInst>(I); 2135 if (BrInst->isUnconditional()) 2136 return false; 2137 return !AssumedNoUBInsts.count(I); 2138 } break; 2139 default: 2140 return false; 2141 } 2142 return false; 2143 } 2144 2145 ChangeStatus manifest(Attributor &A) override { 2146 if (KnownUBInsts.empty()) 2147 return ChangeStatus::UNCHANGED; 2148 for (Instruction *I : KnownUBInsts) 2149 A.changeToUnreachableAfterManifest(I); 2150 return ChangeStatus::CHANGED; 2151 } 2152 2153 /// See AbstractAttribute::getAsStr() 2154 const std::string getAsStr() const override { 2155 return getAssumed() ? "undefined-behavior" : "no-ub"; 2156 } 2157 2158 /// Note: The correctness of this analysis depends on the fact that the 2159 /// following 2 sets will stop changing after some point. 2160 /// "Change" here means that their size changes. 2161 /// The size of each set is monotonically increasing 2162 /// (we only add items to them) and it is upper bounded by the number of 2163 /// instructions in the processed function (we can never save more 2164 /// elements in either set than this number). Hence, at some point, 2165 /// they will stop increasing. 2166 /// Consequently, at some point, both sets will have stopped 2167 /// changing, effectively making the analysis reach a fixpoint. 2168 2169 /// Note: These 2 sets are disjoint and an instruction can be considered 2170 /// one of 3 things: 2171 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in 2172 /// the KnownUBInsts set. 2173 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior 2174 /// has a reason to assume it). 2175 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior 2176 /// could not find a reason to assume or prove that it can cause UB, 2177 /// hence it assumes it doesn't. We have a set for these instructions 2178 /// so that we don't reprocess them in every update. 2179 /// Note however that instructions in this set may cause UB. 2180 2181 protected: 2182 /// A set of all live instructions _known_ to cause UB. 2183 SmallPtrSet<Instruction *, 8> KnownUBInsts; 2184 2185 private: 2186 /// A set of all the (live) instructions that are assumed to _not_ cause UB. 2187 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; 2188 2189 // Should be called on updates in which if we're processing an instruction 2190 // \p I that depends on a value \p V, one of the following has to happen: 2191 // - If the value is assumed, then stop. 2192 // - If the value is known but undef, then consider it UB. 2193 // - Otherwise, do specific processing with the simplified value. 2194 // We return None in the first 2 cases to signify that an appropriate 2195 // action was taken and the caller should stop. 2196 // Otherwise, we return the simplified value that the caller should 2197 // use for specific processing. 2198 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V, 2199 Instruction *I) { 2200 const auto &ValueSimplifyAA = 2201 A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V)); 2202 Optional<Value *> SimplifiedV = 2203 ValueSimplifyAA.getAssumedSimplifiedValue(A); 2204 if (!ValueSimplifyAA.isKnown()) { 2205 // Don't depend on assumed values. 2206 return llvm::None; 2207 } 2208 if (!SimplifiedV.hasValue()) { 2209 // If it is known (which we tested above) but it doesn't have a value, 2210 // then we can assume `undef` and hence the instruction is UB. 2211 KnownUBInsts.insert(I); 2212 return llvm::None; 2213 } 2214 Value *Val = SimplifiedV.getValue(); 2215 if (isa<UndefValue>(Val)) { 2216 KnownUBInsts.insert(I); 2217 return llvm::None; 2218 } 2219 return Val; 2220 } 2221 }; 2222 2223 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { 2224 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) 2225 : AAUndefinedBehaviorImpl(IRP, A) {} 2226 2227 /// See AbstractAttribute::trackStatistics() 2228 void trackStatistics() const override { 2229 STATS_DECL(UndefinedBehaviorInstruction, Instruction, 2230 "Number of instructions known to have UB"); 2231 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += 2232 KnownUBInsts.size(); 2233 } 2234 }; 2235 2236 /// ------------------------ Will-Return Attributes ---------------------------- 2237 2238 // Helper function that checks whether a function has any cycle which we don't 2239 // know if it is bounded or not. 2240 // Loops with maximum trip count are considered bounded, any other cycle not. 2241 static bool mayContainUnboundedCycle(Function &F, Attributor &A) { 2242 ScalarEvolution *SE = 2243 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); 2244 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); 2245 // If either SCEV or LoopInfo is not available for the function then we assume 2246 // any cycle to be unbounded cycle. 2247 // We use scc_iterator which uses Tarjan algorithm to find all the maximal 2248 // SCCs.To detect if there's a cycle, we only need to find the maximal ones. 2249 if (!SE || !LI) { 2250 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) 2251 if (SCCI.hasCycle()) 2252 return true; 2253 return false; 2254 } 2255 2256 // If there's irreducible control, the function may contain non-loop cycles. 2257 if (mayContainIrreducibleControl(F, LI)) 2258 return true; 2259 2260 // Any loop that does not have a max trip count is considered unbounded cycle. 2261 for (auto *L : LI->getLoopsInPreorder()) { 2262 if (!SE->getSmallConstantMaxTripCount(L)) 2263 return true; 2264 } 2265 return false; 2266 } 2267 2268 struct AAWillReturnImpl : public AAWillReturn { 2269 AAWillReturnImpl(const IRPosition &IRP, Attributor &A) 2270 : AAWillReturn(IRP, A) {} 2271 2272 /// See AbstractAttribute::initialize(...). 2273 void initialize(Attributor &A) override { 2274 AAWillReturn::initialize(A); 2275 2276 Function *F = getAnchorScope(); 2277 if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A)) 2278 indicatePessimisticFixpoint(); 2279 } 2280 2281 /// See AbstractAttribute::updateImpl(...). 2282 ChangeStatus updateImpl(Attributor &A) override { 2283 auto CheckForWillReturn = [&](Instruction &I) { 2284 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); 2285 const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos); 2286 if (WillReturnAA.isKnownWillReturn()) 2287 return true; 2288 if (!WillReturnAA.isAssumedWillReturn()) 2289 return false; 2290 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos); 2291 return NoRecurseAA.isAssumedNoRecurse(); 2292 }; 2293 2294 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this)) 2295 return indicatePessimisticFixpoint(); 2296 2297 return ChangeStatus::UNCHANGED; 2298 } 2299 2300 /// See AbstractAttribute::getAsStr() 2301 const std::string getAsStr() const override { 2302 return getAssumed() ? "willreturn" : "may-noreturn"; 2303 } 2304 }; 2305 2306 struct AAWillReturnFunction final : AAWillReturnImpl { 2307 AAWillReturnFunction(const IRPosition &IRP, Attributor &A) 2308 : AAWillReturnImpl(IRP, A) {} 2309 2310 /// See AbstractAttribute::trackStatistics() 2311 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } 2312 }; 2313 2314 /// WillReturn attribute deduction for a call sites. 2315 struct AAWillReturnCallSite final : AAWillReturnImpl { 2316 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) 2317 : AAWillReturnImpl(IRP, A) {} 2318 2319 /// See AbstractAttribute::initialize(...). 2320 void initialize(Attributor &A) override { 2321 AAWillReturnImpl::initialize(A); 2322 Function *F = getAssociatedFunction(); 2323 if (!F) 2324 indicatePessimisticFixpoint(); 2325 } 2326 2327 /// See AbstractAttribute::updateImpl(...). 2328 ChangeStatus updateImpl(Attributor &A) override { 2329 // TODO: Once we have call site specific value information we can provide 2330 // call site specific liveness information and then it makes 2331 // sense to specialize attributes for call sites arguments instead of 2332 // redirecting requests to the callee argument. 2333 Function *F = getAssociatedFunction(); 2334 const IRPosition &FnPos = IRPosition::function(*F); 2335 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos); 2336 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2337 } 2338 2339 /// See AbstractAttribute::trackStatistics() 2340 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } 2341 }; 2342 2343 /// -------------------AAReachability Attribute-------------------------- 2344 2345 struct AAReachabilityImpl : AAReachability { 2346 AAReachabilityImpl(const IRPosition &IRP, Attributor &A) 2347 : AAReachability(IRP, A) {} 2348 2349 const std::string getAsStr() const override { 2350 // TODO: Return the number of reachable queries. 2351 return "reachable"; 2352 } 2353 2354 /// See AbstractAttribute::initialize(...). 2355 void initialize(Attributor &A) override { indicatePessimisticFixpoint(); } 2356 2357 /// See AbstractAttribute::updateImpl(...). 2358 ChangeStatus updateImpl(Attributor &A) override { 2359 return indicatePessimisticFixpoint(); 2360 } 2361 }; 2362 2363 struct AAReachabilityFunction final : public AAReachabilityImpl { 2364 AAReachabilityFunction(const IRPosition &IRP, Attributor &A) 2365 : AAReachabilityImpl(IRP, A) {} 2366 2367 /// See AbstractAttribute::trackStatistics() 2368 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); } 2369 }; 2370 2371 /// ------------------------ NoAlias Argument Attribute ------------------------ 2372 2373 struct AANoAliasImpl : AANoAlias { 2374 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { 2375 assert(getAssociatedType()->isPointerTy() && 2376 "Noalias is a pointer attribute"); 2377 } 2378 2379 const std::string getAsStr() const override { 2380 return getAssumed() ? "noalias" : "may-alias"; 2381 } 2382 }; 2383 2384 /// NoAlias attribute for a floating value. 2385 struct AANoAliasFloating final : AANoAliasImpl { 2386 AANoAliasFloating(const IRPosition &IRP, Attributor &A) 2387 : AANoAliasImpl(IRP, A) {} 2388 2389 /// See AbstractAttribute::initialize(...). 2390 void initialize(Attributor &A) override { 2391 AANoAliasImpl::initialize(A); 2392 Value *Val = &getAssociatedValue(); 2393 do { 2394 CastInst *CI = dyn_cast<CastInst>(Val); 2395 if (!CI) 2396 break; 2397 Value *Base = CI->getOperand(0); 2398 if (!Base->hasOneUse()) 2399 break; 2400 Val = Base; 2401 } while (true); 2402 2403 if (!Val->getType()->isPointerTy()) { 2404 indicatePessimisticFixpoint(); 2405 return; 2406 } 2407 2408 if (isa<AllocaInst>(Val)) 2409 indicateOptimisticFixpoint(); 2410 else if (isa<ConstantPointerNull>(Val) && 2411 !NullPointerIsDefined(getAnchorScope(), 2412 Val->getType()->getPointerAddressSpace())) 2413 indicateOptimisticFixpoint(); 2414 else if (Val != &getAssociatedValue()) { 2415 const auto &ValNoAliasAA = 2416 A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val)); 2417 if (ValNoAliasAA.isKnownNoAlias()) 2418 indicateOptimisticFixpoint(); 2419 } 2420 } 2421 2422 /// See AbstractAttribute::updateImpl(...). 2423 ChangeStatus updateImpl(Attributor &A) override { 2424 // TODO: Implement this. 2425 return indicatePessimisticFixpoint(); 2426 } 2427 2428 /// See AbstractAttribute::trackStatistics() 2429 void trackStatistics() const override { 2430 STATS_DECLTRACK_FLOATING_ATTR(noalias) 2431 } 2432 }; 2433 2434 /// NoAlias attribute for an argument. 2435 struct AANoAliasArgument final 2436 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { 2437 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; 2438 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 2439 2440 /// See AbstractAttribute::initialize(...). 2441 void initialize(Attributor &A) override { 2442 Base::initialize(A); 2443 // See callsite argument attribute and callee argument attribute. 2444 if (hasAttr({Attribute::ByVal})) 2445 indicateOptimisticFixpoint(); 2446 } 2447 2448 /// See AbstractAttribute::update(...). 2449 ChangeStatus updateImpl(Attributor &A) override { 2450 // We have to make sure no-alias on the argument does not break 2451 // synchronization when this is a callback argument, see also [1] below. 2452 // If synchronization cannot be affected, we delegate to the base updateImpl 2453 // function, otherwise we give up for now. 2454 2455 // If the function is no-sync, no-alias cannot break synchronization. 2456 const auto &NoSyncAA = A.getAAFor<AANoSync>( 2457 *this, IRPosition::function_scope(getIRPosition())); 2458 if (NoSyncAA.isAssumedNoSync()) 2459 return Base::updateImpl(A); 2460 2461 // If the argument is read-only, no-alias cannot break synchronization. 2462 const auto &MemBehaviorAA = 2463 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition()); 2464 if (MemBehaviorAA.isAssumedReadOnly()) 2465 return Base::updateImpl(A); 2466 2467 // If the argument is never passed through callbacks, no-alias cannot break 2468 // synchronization. 2469 bool AllCallSitesKnown; 2470 if (A.checkForAllCallSites( 2471 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, 2472 true, AllCallSitesKnown)) 2473 return Base::updateImpl(A); 2474 2475 // TODO: add no-alias but make sure it doesn't break synchronization by 2476 // introducing fake uses. See: 2477 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, 2478 // International Workshop on OpenMP 2018, 2479 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf 2480 2481 return indicatePessimisticFixpoint(); 2482 } 2483 2484 /// See AbstractAttribute::trackStatistics() 2485 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } 2486 }; 2487 2488 struct AANoAliasCallSiteArgument final : AANoAliasImpl { 2489 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) 2490 : AANoAliasImpl(IRP, A) {} 2491 2492 /// See AbstractAttribute::initialize(...). 2493 void initialize(Attributor &A) override { 2494 // See callsite argument attribute and callee argument attribute. 2495 const auto &CB = cast<CallBase>(getAnchorValue()); 2496 if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias)) 2497 indicateOptimisticFixpoint(); 2498 Value &Val = getAssociatedValue(); 2499 if (isa<ConstantPointerNull>(Val) && 2500 !NullPointerIsDefined(getAnchorScope(), 2501 Val.getType()->getPointerAddressSpace())) 2502 indicateOptimisticFixpoint(); 2503 } 2504 2505 /// Determine if the underlying value may alias with the call site argument 2506 /// \p OtherArgNo of \p ICS (= the underlying call site). 2507 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, 2508 const AAMemoryBehavior &MemBehaviorAA, 2509 const CallBase &CB, unsigned OtherArgNo) { 2510 // We do not need to worry about aliasing with the underlying IRP. 2511 if (this->getArgNo() == (int)OtherArgNo) 2512 return false; 2513 2514 // If it is not a pointer or pointer vector we do not alias. 2515 const Value *ArgOp = CB.getArgOperand(OtherArgNo); 2516 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 2517 return false; 2518 2519 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 2520 *this, IRPosition::callsite_argument(CB, OtherArgNo), 2521 /* TrackDependence */ false); 2522 2523 // If the argument is readnone, there is no read-write aliasing. 2524 if (CBArgMemBehaviorAA.isAssumedReadNone()) { 2525 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 2526 return false; 2527 } 2528 2529 // If the argument is readonly and the underlying value is readonly, there 2530 // is no read-write aliasing. 2531 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); 2532 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { 2533 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2534 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 2535 return false; 2536 } 2537 2538 // We have to utilize actual alias analysis queries so we need the object. 2539 if (!AAR) 2540 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); 2541 2542 // Try to rule it out at the call site. 2543 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); 2544 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " 2545 "callsite arguments: " 2546 << getAssociatedValue() << " " << *ArgOp << " => " 2547 << (IsAliasing ? "" : "no-") << "alias \n"); 2548 2549 return IsAliasing; 2550 } 2551 2552 bool 2553 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, 2554 const AAMemoryBehavior &MemBehaviorAA, 2555 const AANoAlias &NoAliasAA) { 2556 // We can deduce "noalias" if the following conditions hold. 2557 // (i) Associated value is assumed to be noalias in the definition. 2558 // (ii) Associated value is assumed to be no-capture in all the uses 2559 // possibly executed before this callsite. 2560 // (iii) There is no other pointer argument which could alias with the 2561 // value. 2562 2563 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); 2564 if (!AssociatedValueIsNoAliasAtDef) { 2565 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() 2566 << " is not no-alias at the definition\n"); 2567 return false; 2568 } 2569 2570 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); 2571 2572 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 2573 auto &NoCaptureAA = 2574 A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false); 2575 // Check whether the value is captured in the scope using AANoCapture. 2576 // Look at CFG and check only uses possibly executed before this 2577 // callsite. 2578 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 2579 Instruction *UserI = cast<Instruction>(U.getUser()); 2580 2581 // If user if curr instr and only use. 2582 if (UserI == getCtxI() && UserI->hasOneUse()) 2583 return true; 2584 2585 const Function *ScopeFn = VIRP.getAnchorScope(); 2586 if (ScopeFn) { 2587 const auto &ReachabilityAA = 2588 A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn)); 2589 2590 if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI())) 2591 return true; 2592 2593 if (auto *CB = dyn_cast<CallBase>(UserI)) { 2594 if (CB->isArgOperand(&U)) { 2595 2596 unsigned ArgNo = CB->getArgOperandNo(&U); 2597 2598 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 2599 *this, IRPosition::callsite_argument(*CB, ArgNo)); 2600 2601 if (NoCaptureAA.isAssumedNoCapture()) 2602 return true; 2603 } 2604 } 2605 } 2606 2607 // For cases which can potentially have more users 2608 if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) || 2609 isa<SelectInst>(U)) { 2610 Follow = true; 2611 return true; 2612 } 2613 2614 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n"); 2615 return false; 2616 }; 2617 2618 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 2619 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { 2620 LLVM_DEBUG( 2621 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() 2622 << " cannot be noalias as it is potentially captured\n"); 2623 return false; 2624 } 2625 } 2626 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); 2627 2628 // Check there is no other pointer argument which could alias with the 2629 // value passed at this call site. 2630 // TODO: AbstractCallSite 2631 const auto &CB = cast<CallBase>(getAnchorValue()); 2632 for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands(); 2633 OtherArgNo++) 2634 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) 2635 return false; 2636 2637 return true; 2638 } 2639 2640 /// See AbstractAttribute::updateImpl(...). 2641 ChangeStatus updateImpl(Attributor &A) override { 2642 // If the argument is readnone we are done as there are no accesses via the 2643 // argument. 2644 auto &MemBehaviorAA = 2645 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), 2646 /* TrackDependence */ false); 2647 if (MemBehaviorAA.isAssumedReadNone()) { 2648 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2649 return ChangeStatus::UNCHANGED; 2650 } 2651 2652 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 2653 const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP, 2654 /* TrackDependence */ false); 2655 2656 AAResults *AAR = nullptr; 2657 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, 2658 NoAliasAA)) { 2659 LLVM_DEBUG( 2660 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"); 2661 return ChangeStatus::UNCHANGED; 2662 } 2663 2664 return indicatePessimisticFixpoint(); 2665 } 2666 2667 /// See AbstractAttribute::trackStatistics() 2668 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } 2669 }; 2670 2671 /// NoAlias attribute for function return value. 2672 struct AANoAliasReturned final : AANoAliasImpl { 2673 AANoAliasReturned(const IRPosition &IRP, Attributor &A) 2674 : AANoAliasImpl(IRP, A) {} 2675 2676 /// See AbstractAttribute::updateImpl(...). 2677 virtual ChangeStatus updateImpl(Attributor &A) override { 2678 2679 auto CheckReturnValue = [&](Value &RV) -> bool { 2680 if (Constant *C = dyn_cast<Constant>(&RV)) 2681 if (C->isNullValue() || isa<UndefValue>(C)) 2682 return true; 2683 2684 /// For now, we can only deduce noalias if we have call sites. 2685 /// FIXME: add more support. 2686 if (!isa<CallBase>(&RV)) 2687 return false; 2688 2689 const IRPosition &RVPos = IRPosition::value(RV); 2690 const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos); 2691 if (!NoAliasAA.isAssumedNoAlias()) 2692 return false; 2693 2694 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos); 2695 return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); 2696 }; 2697 2698 if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) 2699 return indicatePessimisticFixpoint(); 2700 2701 return ChangeStatus::UNCHANGED; 2702 } 2703 2704 /// See AbstractAttribute::trackStatistics() 2705 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } 2706 }; 2707 2708 /// NoAlias attribute deduction for a call site return value. 2709 struct AANoAliasCallSiteReturned final : AANoAliasImpl { 2710 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) 2711 : AANoAliasImpl(IRP, A) {} 2712 2713 /// See AbstractAttribute::initialize(...). 2714 void initialize(Attributor &A) override { 2715 AANoAliasImpl::initialize(A); 2716 Function *F = getAssociatedFunction(); 2717 if (!F) 2718 indicatePessimisticFixpoint(); 2719 } 2720 2721 /// See AbstractAttribute::updateImpl(...). 2722 ChangeStatus updateImpl(Attributor &A) override { 2723 // TODO: Once we have call site specific value information we can provide 2724 // call site specific liveness information and then it makes 2725 // sense to specialize attributes for call sites arguments instead of 2726 // redirecting requests to the callee argument. 2727 Function *F = getAssociatedFunction(); 2728 const IRPosition &FnPos = IRPosition::returned(*F); 2729 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos); 2730 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2731 } 2732 2733 /// See AbstractAttribute::trackStatistics() 2734 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } 2735 }; 2736 2737 /// -------------------AAIsDead Function Attribute----------------------- 2738 2739 struct AAIsDeadValueImpl : public AAIsDead { 2740 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 2741 2742 /// See AAIsDead::isAssumedDead(). 2743 bool isAssumedDead() const override { return getAssumed(); } 2744 2745 /// See AAIsDead::isKnownDead(). 2746 bool isKnownDead() const override { return getKnown(); } 2747 2748 /// See AAIsDead::isAssumedDead(BasicBlock *). 2749 bool isAssumedDead(const BasicBlock *BB) const override { return false; } 2750 2751 /// See AAIsDead::isKnownDead(BasicBlock *). 2752 bool isKnownDead(const BasicBlock *BB) const override { return false; } 2753 2754 /// See AAIsDead::isAssumedDead(Instruction *I). 2755 bool isAssumedDead(const Instruction *I) const override { 2756 return I == getCtxI() && isAssumedDead(); 2757 } 2758 2759 /// See AAIsDead::isKnownDead(Instruction *I). 2760 bool isKnownDead(const Instruction *I) const override { 2761 return isAssumedDead(I) && getKnown(); 2762 } 2763 2764 /// See AbstractAttribute::getAsStr(). 2765 const std::string getAsStr() const override { 2766 return isAssumedDead() ? "assumed-dead" : "assumed-live"; 2767 } 2768 2769 /// Check if all uses are assumed dead. 2770 bool areAllUsesAssumedDead(Attributor &A, Value &V) { 2771 auto UsePred = [&](const Use &U, bool &Follow) { return false; }; 2772 // Explicitly set the dependence class to required because we want a long 2773 // chain of N dependent instructions to be considered live as soon as one is 2774 // without going through N update cycles. This is not required for 2775 // correctness. 2776 return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED); 2777 } 2778 2779 /// Determine if \p I is assumed to be side-effect free. 2780 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { 2781 if (!I || wouldInstructionBeTriviallyDead(I)) 2782 return true; 2783 2784 auto *CB = dyn_cast<CallBase>(I); 2785 if (!CB || isa<IntrinsicInst>(CB)) 2786 return false; 2787 2788 const IRPosition &CallIRP = IRPosition::callsite_function(*CB); 2789 const auto &NoUnwindAA = A.getAndUpdateAAFor<AANoUnwind>( 2790 *this, CallIRP, /* TrackDependence */ false); 2791 if (!NoUnwindAA.isAssumedNoUnwind()) 2792 return false; 2793 if (!NoUnwindAA.isKnownNoUnwind()) 2794 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); 2795 2796 const auto &MemBehaviorAA = A.getAndUpdateAAFor<AAMemoryBehavior>( 2797 *this, CallIRP, /* TrackDependence */ false); 2798 if (MemBehaviorAA.isAssumedReadOnly()) { 2799 if (!MemBehaviorAA.isKnownReadOnly()) 2800 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 2801 return true; 2802 } 2803 return false; 2804 } 2805 }; 2806 2807 struct AAIsDeadFloating : public AAIsDeadValueImpl { 2808 AAIsDeadFloating(const IRPosition &IRP, Attributor &A) 2809 : AAIsDeadValueImpl(IRP, A) {} 2810 2811 /// See AbstractAttribute::initialize(...). 2812 void initialize(Attributor &A) override { 2813 if (isa<UndefValue>(getAssociatedValue())) { 2814 indicatePessimisticFixpoint(); 2815 return; 2816 } 2817 2818 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 2819 if (!isAssumedSideEffectFree(A, I)) 2820 indicatePessimisticFixpoint(); 2821 } 2822 2823 /// See AbstractAttribute::updateImpl(...). 2824 ChangeStatus updateImpl(Attributor &A) override { 2825 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 2826 if (!isAssumedSideEffectFree(A, I)) 2827 return indicatePessimisticFixpoint(); 2828 2829 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 2830 return indicatePessimisticFixpoint(); 2831 return ChangeStatus::UNCHANGED; 2832 } 2833 2834 /// See AbstractAttribute::manifest(...). 2835 ChangeStatus manifest(Attributor &A) override { 2836 Value &V = getAssociatedValue(); 2837 if (auto *I = dyn_cast<Instruction>(&V)) { 2838 // If we get here we basically know the users are all dead. We check if 2839 // isAssumedSideEffectFree returns true here again because it might not be 2840 // the case and only the users are dead but the instruction (=call) is 2841 // still needed. 2842 if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) { 2843 A.deleteAfterManifest(*I); 2844 return ChangeStatus::CHANGED; 2845 } 2846 } 2847 if (V.use_empty()) 2848 return ChangeStatus::UNCHANGED; 2849 2850 bool UsedAssumedInformation = false; 2851 Optional<Constant *> C = 2852 A.getAssumedConstant(V, *this, UsedAssumedInformation); 2853 if (C.hasValue() && C.getValue()) 2854 return ChangeStatus::UNCHANGED; 2855 2856 // Replace the value with undef as it is dead but keep droppable uses around 2857 // as they provide information we don't want to give up on just yet. 2858 UndefValue &UV = *UndefValue::get(V.getType()); 2859 bool AnyChange = 2860 A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false); 2861 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 2862 } 2863 2864 /// See AbstractAttribute::trackStatistics() 2865 void trackStatistics() const override { 2866 STATS_DECLTRACK_FLOATING_ATTR(IsDead) 2867 } 2868 }; 2869 2870 struct AAIsDeadArgument : public AAIsDeadFloating { 2871 AAIsDeadArgument(const IRPosition &IRP, Attributor &A) 2872 : AAIsDeadFloating(IRP, A) {} 2873 2874 /// See AbstractAttribute::initialize(...). 2875 void initialize(Attributor &A) override { 2876 if (!A.isFunctionIPOAmendable(*getAnchorScope())) 2877 indicatePessimisticFixpoint(); 2878 } 2879 2880 /// See AbstractAttribute::manifest(...). 2881 ChangeStatus manifest(Attributor &A) override { 2882 ChangeStatus Changed = AAIsDeadFloating::manifest(A); 2883 Argument &Arg = *getAssociatedArgument(); 2884 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) 2885 if (A.registerFunctionSignatureRewrite( 2886 Arg, /* ReplacementTypes */ {}, 2887 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, 2888 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { 2889 Arg.dropDroppableUses(); 2890 return ChangeStatus::CHANGED; 2891 } 2892 return Changed; 2893 } 2894 2895 /// See AbstractAttribute::trackStatistics() 2896 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) } 2897 }; 2898 2899 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { 2900 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) 2901 : AAIsDeadValueImpl(IRP, A) {} 2902 2903 /// See AbstractAttribute::initialize(...). 2904 void initialize(Attributor &A) override { 2905 if (isa<UndefValue>(getAssociatedValue())) 2906 indicatePessimisticFixpoint(); 2907 } 2908 2909 /// See AbstractAttribute::updateImpl(...). 2910 ChangeStatus updateImpl(Attributor &A) override { 2911 // TODO: Once we have call site specific value information we can provide 2912 // call site specific liveness information and then it makes 2913 // sense to specialize attributes for call sites arguments instead of 2914 // redirecting requests to the callee argument. 2915 Argument *Arg = getAssociatedArgument(); 2916 if (!Arg) 2917 return indicatePessimisticFixpoint(); 2918 const IRPosition &ArgPos = IRPosition::argument(*Arg); 2919 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos); 2920 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 2921 } 2922 2923 /// See AbstractAttribute::manifest(...). 2924 ChangeStatus manifest(Attributor &A) override { 2925 CallBase &CB = cast<CallBase>(getAnchorValue()); 2926 Use &U = CB.getArgOperandUse(getArgNo()); 2927 assert(!isa<UndefValue>(U.get()) && 2928 "Expected undef values to be filtered out!"); 2929 UndefValue &UV = *UndefValue::get(U->getType()); 2930 if (A.changeUseAfterManifest(U, UV)) 2931 return ChangeStatus::CHANGED; 2932 return ChangeStatus::UNCHANGED; 2933 } 2934 2935 /// See AbstractAttribute::trackStatistics() 2936 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) } 2937 }; 2938 2939 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { 2940 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) 2941 : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {} 2942 2943 /// See AAIsDead::isAssumedDead(). 2944 bool isAssumedDead() const override { 2945 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; 2946 } 2947 2948 /// See AbstractAttribute::initialize(...). 2949 void initialize(Attributor &A) override { 2950 if (isa<UndefValue>(getAssociatedValue())) { 2951 indicatePessimisticFixpoint(); 2952 return; 2953 } 2954 2955 // We track this separately as a secondary state. 2956 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); 2957 } 2958 2959 /// See AbstractAttribute::updateImpl(...). 2960 ChangeStatus updateImpl(Attributor &A) override { 2961 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2962 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { 2963 IsAssumedSideEffectFree = false; 2964 Changed = ChangeStatus::CHANGED; 2965 } 2966 2967 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 2968 return indicatePessimisticFixpoint(); 2969 return Changed; 2970 } 2971 2972 /// See AbstractAttribute::trackStatistics() 2973 void trackStatistics() const override { 2974 if (IsAssumedSideEffectFree) 2975 STATS_DECLTRACK_CSRET_ATTR(IsDead) 2976 else 2977 STATS_DECLTRACK_CSRET_ATTR(UnusedResult) 2978 } 2979 2980 /// See AbstractAttribute::getAsStr(). 2981 const std::string getAsStr() const override { 2982 return isAssumedDead() 2983 ? "assumed-dead" 2984 : (getAssumed() ? "assumed-dead-users" : "assumed-live"); 2985 } 2986 2987 private: 2988 bool IsAssumedSideEffectFree; 2989 }; 2990 2991 struct AAIsDeadReturned : public AAIsDeadValueImpl { 2992 AAIsDeadReturned(const IRPosition &IRP, Attributor &A) 2993 : AAIsDeadValueImpl(IRP, A) {} 2994 2995 /// See AbstractAttribute::updateImpl(...). 2996 ChangeStatus updateImpl(Attributor &A) override { 2997 2998 A.checkForAllInstructions([](Instruction &) { return true; }, *this, 2999 {Instruction::Ret}); 3000 3001 auto PredForCallSite = [&](AbstractCallSite ACS) { 3002 if (ACS.isCallbackCall() || !ACS.getInstruction()) 3003 return false; 3004 return areAllUsesAssumedDead(A, *ACS.getInstruction()); 3005 }; 3006 3007 bool AllCallSitesKnown; 3008 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 3009 AllCallSitesKnown)) 3010 return indicatePessimisticFixpoint(); 3011 3012 return ChangeStatus::UNCHANGED; 3013 } 3014 3015 /// See AbstractAttribute::manifest(...). 3016 ChangeStatus manifest(Attributor &A) override { 3017 // TODO: Rewrite the signature to return void? 3018 bool AnyChange = false; 3019 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); 3020 auto RetInstPred = [&](Instruction &I) { 3021 ReturnInst &RI = cast<ReturnInst>(I); 3022 if (!isa<UndefValue>(RI.getReturnValue())) 3023 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); 3024 return true; 3025 }; 3026 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}); 3027 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3028 } 3029 3030 /// See AbstractAttribute::trackStatistics() 3031 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) } 3032 }; 3033 3034 struct AAIsDeadFunction : public AAIsDead { 3035 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3036 3037 /// See AbstractAttribute::initialize(...). 3038 void initialize(Attributor &A) override { 3039 const Function *F = getAnchorScope(); 3040 if (F && !F->isDeclaration()) { 3041 // We only want to compute liveness once. If the function is not part of 3042 // the SCC, skip it. 3043 if (A.isRunOn(*const_cast<Function *>(F))) { 3044 ToBeExploredFrom.insert(&F->getEntryBlock().front()); 3045 assumeLive(A, F->getEntryBlock()); 3046 } else { 3047 indicatePessimisticFixpoint(); 3048 } 3049 } 3050 } 3051 3052 /// See AbstractAttribute::getAsStr(). 3053 const std::string getAsStr() const override { 3054 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + 3055 std::to_string(getAnchorScope()->size()) + "][#TBEP " + 3056 std::to_string(ToBeExploredFrom.size()) + "][#KDE " + 3057 std::to_string(KnownDeadEnds.size()) + "]"; 3058 } 3059 3060 /// See AbstractAttribute::manifest(...). 3061 ChangeStatus manifest(Attributor &A) override { 3062 assert(getState().isValidState() && 3063 "Attempted to manifest an invalid state!"); 3064 3065 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 3066 Function &F = *getAnchorScope(); 3067 3068 if (AssumedLiveBlocks.empty()) { 3069 A.deleteAfterManifest(F); 3070 return ChangeStatus::CHANGED; 3071 } 3072 3073 // Flag to determine if we can change an invoke to a call assuming the 3074 // callee is nounwind. This is not possible if the personality of the 3075 // function allows to catch asynchronous exceptions. 3076 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 3077 3078 KnownDeadEnds.set_union(ToBeExploredFrom); 3079 for (const Instruction *DeadEndI : KnownDeadEnds) { 3080 auto *CB = dyn_cast<CallBase>(DeadEndI); 3081 if (!CB) 3082 continue; 3083 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3084 *this, IRPosition::callsite_function(*CB), /* TrackDependence */ true, 3085 DepClassTy::OPTIONAL); 3086 bool MayReturn = !NoReturnAA.isAssumedNoReturn(); 3087 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) 3088 continue; 3089 3090 if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) 3091 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); 3092 else 3093 A.changeToUnreachableAfterManifest( 3094 const_cast<Instruction *>(DeadEndI->getNextNode())); 3095 HasChanged = ChangeStatus::CHANGED; 3096 } 3097 3098 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted."); 3099 for (BasicBlock &BB : F) 3100 if (!AssumedLiveBlocks.count(&BB)) { 3101 A.deleteAfterManifest(BB); 3102 ++BUILD_STAT_NAME(AAIsDead, BasicBlock); 3103 } 3104 3105 return HasChanged; 3106 } 3107 3108 /// See AbstractAttribute::updateImpl(...). 3109 ChangeStatus updateImpl(Attributor &A) override; 3110 3111 bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override { 3112 return !AssumedLiveEdges.count(std::make_pair(From, To)); 3113 } 3114 3115 /// See AbstractAttribute::trackStatistics() 3116 void trackStatistics() const override {} 3117 3118 /// Returns true if the function is assumed dead. 3119 bool isAssumedDead() const override { return false; } 3120 3121 /// See AAIsDead::isKnownDead(). 3122 bool isKnownDead() const override { return false; } 3123 3124 /// See AAIsDead::isAssumedDead(BasicBlock *). 3125 bool isAssumedDead(const BasicBlock *BB) const override { 3126 assert(BB->getParent() == getAnchorScope() && 3127 "BB must be in the same anchor scope function."); 3128 3129 if (!getAssumed()) 3130 return false; 3131 return !AssumedLiveBlocks.count(BB); 3132 } 3133 3134 /// See AAIsDead::isKnownDead(BasicBlock *). 3135 bool isKnownDead(const BasicBlock *BB) const override { 3136 return getKnown() && isAssumedDead(BB); 3137 } 3138 3139 /// See AAIsDead::isAssumed(Instruction *I). 3140 bool isAssumedDead(const Instruction *I) const override { 3141 assert(I->getParent()->getParent() == getAnchorScope() && 3142 "Instruction must be in the same anchor scope function."); 3143 3144 if (!getAssumed()) 3145 return false; 3146 3147 // If it is not in AssumedLiveBlocks then it for sure dead. 3148 // Otherwise, it can still be after noreturn call in a live block. 3149 if (!AssumedLiveBlocks.count(I->getParent())) 3150 return true; 3151 3152 // If it is not after a liveness barrier it is live. 3153 const Instruction *PrevI = I->getPrevNode(); 3154 while (PrevI) { 3155 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) 3156 return true; 3157 PrevI = PrevI->getPrevNode(); 3158 } 3159 return false; 3160 } 3161 3162 /// See AAIsDead::isKnownDead(Instruction *I). 3163 bool isKnownDead(const Instruction *I) const override { 3164 return getKnown() && isAssumedDead(I); 3165 } 3166 3167 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A 3168 /// that internal function called from \p BB should now be looked at. 3169 bool assumeLive(Attributor &A, const BasicBlock &BB) { 3170 if (!AssumedLiveBlocks.insert(&BB).second) 3171 return false; 3172 3173 // We assume that all of BB is (probably) live now and if there are calls to 3174 // internal functions we will assume that those are now live as well. This 3175 // is a performance optimization for blocks with calls to a lot of internal 3176 // functions. It can however cause dead functions to be treated as live. 3177 for (const Instruction &I : BB) 3178 if (const auto *CB = dyn_cast<CallBase>(&I)) 3179 if (const Function *F = CB->getCalledFunction()) 3180 if (F->hasLocalLinkage()) 3181 A.markLiveInternalFunction(*F); 3182 return true; 3183 } 3184 3185 /// Collection of instructions that need to be explored again, e.g., we 3186 /// did assume they do not transfer control to (one of their) successors. 3187 SmallSetVector<const Instruction *, 8> ToBeExploredFrom; 3188 3189 /// Collection of instructions that are known to not transfer control. 3190 SmallSetVector<const Instruction *, 8> KnownDeadEnds; 3191 3192 /// Collection of all assumed live edges 3193 DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges; 3194 3195 /// Collection of all assumed live BasicBlocks. 3196 DenseSet<const BasicBlock *> AssumedLiveBlocks; 3197 }; 3198 3199 static bool 3200 identifyAliveSuccessors(Attributor &A, const CallBase &CB, 3201 AbstractAttribute &AA, 3202 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3203 const IRPosition &IPos = IRPosition::callsite_function(CB); 3204 3205 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3206 AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 3207 if (NoReturnAA.isAssumedNoReturn()) 3208 return !NoReturnAA.isKnownNoReturn(); 3209 if (CB.isTerminator()) 3210 AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); 3211 else 3212 AliveSuccessors.push_back(CB.getNextNode()); 3213 return false; 3214 } 3215 3216 static bool 3217 identifyAliveSuccessors(Attributor &A, const InvokeInst &II, 3218 AbstractAttribute &AA, 3219 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3220 bool UsedAssumedInformation = 3221 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); 3222 3223 // First, determine if we can change an invoke to a call assuming the 3224 // callee is nounwind. This is not possible if the personality of the 3225 // function allows to catch asynchronous exceptions. 3226 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { 3227 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3228 } else { 3229 const IRPosition &IPos = IRPosition::callsite_function(II); 3230 const auto &AANoUnw = A.getAndUpdateAAFor<AANoUnwind>( 3231 AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 3232 if (AANoUnw.isAssumedNoUnwind()) { 3233 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); 3234 } else { 3235 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3236 } 3237 } 3238 return UsedAssumedInformation; 3239 } 3240 3241 static bool 3242 identifyAliveSuccessors(Attributor &A, const BranchInst &BI, 3243 AbstractAttribute &AA, 3244 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3245 bool UsedAssumedInformation = false; 3246 if (BI.getNumSuccessors() == 1) { 3247 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3248 } else { 3249 Optional<ConstantInt *> CI = getAssumedConstantInt( 3250 A, *BI.getCondition(), AA, UsedAssumedInformation); 3251 if (!CI.hasValue()) { 3252 // No value yet, assume both edges are dead. 3253 } else if (CI.getValue()) { 3254 const BasicBlock *SuccBB = 3255 BI.getSuccessor(1 - CI.getValue()->getZExtValue()); 3256 AliveSuccessors.push_back(&SuccBB->front()); 3257 } else { 3258 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3259 AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); 3260 UsedAssumedInformation = false; 3261 } 3262 } 3263 return UsedAssumedInformation; 3264 } 3265 3266 static bool 3267 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, 3268 AbstractAttribute &AA, 3269 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3270 bool UsedAssumedInformation = false; 3271 Optional<ConstantInt *> CI = 3272 getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation); 3273 if (!CI.hasValue()) { 3274 // No value yet, assume all edges are dead. 3275 } else if (CI.getValue()) { 3276 for (auto &CaseIt : SI.cases()) { 3277 if (CaseIt.getCaseValue() == CI.getValue()) { 3278 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); 3279 return UsedAssumedInformation; 3280 } 3281 } 3282 AliveSuccessors.push_back(&SI.getDefaultDest()->front()); 3283 return UsedAssumedInformation; 3284 } else { 3285 for (const BasicBlock *SuccBB : successors(SI.getParent())) 3286 AliveSuccessors.push_back(&SuccBB->front()); 3287 } 3288 return UsedAssumedInformation; 3289 } 3290 3291 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { 3292 ChangeStatus Change = ChangeStatus::UNCHANGED; 3293 3294 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" 3295 << getAnchorScope()->size() << "] BBs and " 3296 << ToBeExploredFrom.size() << " exploration points and " 3297 << KnownDeadEnds.size() << " known dead ends\n"); 3298 3299 // Copy and clear the list of instructions we need to explore from. It is 3300 // refilled with instructions the next update has to look at. 3301 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), 3302 ToBeExploredFrom.end()); 3303 decltype(ToBeExploredFrom) NewToBeExploredFrom; 3304 3305 SmallVector<const Instruction *, 8> AliveSuccessors; 3306 while (!Worklist.empty()) { 3307 const Instruction *I = Worklist.pop_back_val(); 3308 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"); 3309 3310 // Fast forward for uninteresting instructions. We could look for UB here 3311 // though. 3312 while (!I->isTerminator() && !isa<CallBase>(I)) { 3313 Change = ChangeStatus::CHANGED; 3314 I = I->getNextNode(); 3315 } 3316 3317 AliveSuccessors.clear(); 3318 3319 bool UsedAssumedInformation = false; 3320 switch (I->getOpcode()) { 3321 // TODO: look for (assumed) UB to backwards propagate "deadness". 3322 default: 3323 assert(I->isTerminator() && 3324 "Expected non-terminators to be handled already!"); 3325 for (const BasicBlock *SuccBB : successors(I->getParent())) 3326 AliveSuccessors.push_back(&SuccBB->front()); 3327 break; 3328 case Instruction::Call: 3329 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), 3330 *this, AliveSuccessors); 3331 break; 3332 case Instruction::Invoke: 3333 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), 3334 *this, AliveSuccessors); 3335 break; 3336 case Instruction::Br: 3337 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), 3338 *this, AliveSuccessors); 3339 break; 3340 case Instruction::Switch: 3341 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), 3342 *this, AliveSuccessors); 3343 break; 3344 } 3345 3346 if (UsedAssumedInformation) { 3347 NewToBeExploredFrom.insert(I); 3348 } else { 3349 Change = ChangeStatus::CHANGED; 3350 if (AliveSuccessors.empty() || 3351 (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors())) 3352 KnownDeadEnds.insert(I); 3353 } 3354 3355 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: " 3356 << AliveSuccessors.size() << " UsedAssumedInformation: " 3357 << UsedAssumedInformation << "\n"); 3358 3359 for (const Instruction *AliveSuccessor : AliveSuccessors) { 3360 if (!I->isTerminator()) { 3361 assert(AliveSuccessors.size() == 1 && 3362 "Non-terminator expected to have a single successor!"); 3363 Worklist.push_back(AliveSuccessor); 3364 } else { 3365 // record the assumed live edge 3366 AssumedLiveEdges.insert( 3367 std::make_pair(I->getParent(), AliveSuccessor->getParent())); 3368 if (assumeLive(A, *AliveSuccessor->getParent())) 3369 Worklist.push_back(AliveSuccessor); 3370 } 3371 } 3372 } 3373 3374 ToBeExploredFrom = std::move(NewToBeExploredFrom); 3375 3376 // If we know everything is live there is no need to query for liveness. 3377 // Instead, indicating a pessimistic fixpoint will cause the state to be 3378 // "invalid" and all queries to be answered conservatively without lookups. 3379 // To be in this state we have to (1) finished the exploration and (3) not 3380 // discovered any non-trivial dead end and (2) not ruled unreachable code 3381 // dead. 3382 if (ToBeExploredFrom.empty() && 3383 getAnchorScope()->size() == AssumedLiveBlocks.size() && 3384 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { 3385 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; 3386 })) 3387 return indicatePessimisticFixpoint(); 3388 return Change; 3389 } 3390 3391 /// Liveness information for a call sites. 3392 struct AAIsDeadCallSite final : AAIsDeadFunction { 3393 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) 3394 : AAIsDeadFunction(IRP, A) {} 3395 3396 /// See AbstractAttribute::initialize(...). 3397 void initialize(Attributor &A) override { 3398 // TODO: Once we have call site specific value information we can provide 3399 // call site specific liveness information and then it makes 3400 // sense to specialize attributes for call sites instead of 3401 // redirecting requests to the callee. 3402 llvm_unreachable("Abstract attributes for liveness are not " 3403 "supported for call sites yet!"); 3404 } 3405 3406 /// See AbstractAttribute::updateImpl(...). 3407 ChangeStatus updateImpl(Attributor &A) override { 3408 return indicatePessimisticFixpoint(); 3409 } 3410 3411 /// See AbstractAttribute::trackStatistics() 3412 void trackStatistics() const override {} 3413 }; 3414 3415 /// -------------------- Dereferenceable Argument Attribute -------------------- 3416 3417 template <> 3418 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, 3419 const DerefState &R) { 3420 ChangeStatus CS0 = 3421 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); 3422 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); 3423 return CS0 | CS1; 3424 } 3425 3426 struct AADereferenceableImpl : AADereferenceable { 3427 AADereferenceableImpl(const IRPosition &IRP, Attributor &A) 3428 : AADereferenceable(IRP, A) {} 3429 using StateType = DerefState; 3430 3431 /// See AbstractAttribute::initialize(...). 3432 void initialize(Attributor &A) override { 3433 SmallVector<Attribute, 4> Attrs; 3434 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, 3435 Attrs, /* IgnoreSubsumingPositions */ false, &A); 3436 for (const Attribute &Attr : Attrs) 3437 takeKnownDerefBytesMaximum(Attr.getValueAsInt()); 3438 3439 const IRPosition &IRP = this->getIRPosition(); 3440 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, 3441 /* TrackDependence */ false); 3442 3443 bool CanBeNull; 3444 takeKnownDerefBytesMaximum( 3445 IRP.getAssociatedValue().getPointerDereferenceableBytes( 3446 A.getDataLayout(), CanBeNull)); 3447 3448 bool IsFnInterface = IRP.isFnInterfaceKind(); 3449 Function *FnScope = IRP.getAnchorScope(); 3450 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { 3451 indicatePessimisticFixpoint(); 3452 return; 3453 } 3454 3455 if (Instruction *CtxI = getCtxI()) 3456 followUsesInMBEC(*this, A, getState(), *CtxI); 3457 } 3458 3459 /// See AbstractAttribute::getState() 3460 /// { 3461 StateType &getState() override { return *this; } 3462 const StateType &getState() const override { return *this; } 3463 /// } 3464 3465 /// Helper function for collecting accessed bytes in must-be-executed-context 3466 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, 3467 DerefState &State) { 3468 const Value *UseV = U->get(); 3469 if (!UseV->getType()->isPointerTy()) 3470 return; 3471 3472 Type *PtrTy = UseV->getType(); 3473 const DataLayout &DL = A.getDataLayout(); 3474 int64_t Offset; 3475 if (const Value *Base = getBasePointerOfAccessPointerOperand( 3476 I, Offset, DL, /*AllowNonInbounds*/ true)) { 3477 if (Base == &getAssociatedValue() && 3478 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 3479 uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType()); 3480 State.addAccessedBytes(Offset, Size); 3481 } 3482 } 3483 return; 3484 } 3485 3486 /// See followUsesInMBEC 3487 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 3488 AADereferenceable::StateType &State) { 3489 bool IsNonNull = false; 3490 bool TrackUse = false; 3491 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( 3492 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); 3493 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes 3494 << " for instruction " << *I << "\n"); 3495 3496 addAccessedBytesForUse(A, U, I, State); 3497 State.takeKnownDerefBytesMaximum(DerefBytes); 3498 return TrackUse; 3499 } 3500 3501 /// See AbstractAttribute::manifest(...). 3502 ChangeStatus manifest(Attributor &A) override { 3503 ChangeStatus Change = AADereferenceable::manifest(A); 3504 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { 3505 removeAttrs({Attribute::DereferenceableOrNull}); 3506 return ChangeStatus::CHANGED; 3507 } 3508 return Change; 3509 } 3510 3511 void getDeducedAttributes(LLVMContext &Ctx, 3512 SmallVectorImpl<Attribute> &Attrs) const override { 3513 // TODO: Add *_globally support 3514 if (isAssumedNonNull()) 3515 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 3516 Ctx, getAssumedDereferenceableBytes())); 3517 else 3518 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 3519 Ctx, getAssumedDereferenceableBytes())); 3520 } 3521 3522 /// See AbstractAttribute::getAsStr(). 3523 const std::string getAsStr() const override { 3524 if (!getAssumedDereferenceableBytes()) 3525 return "unknown-dereferenceable"; 3526 return std::string("dereferenceable") + 3527 (isAssumedNonNull() ? "" : "_or_null") + 3528 (isAssumedGlobal() ? "_globally" : "") + "<" + 3529 std::to_string(getKnownDereferenceableBytes()) + "-" + 3530 std::to_string(getAssumedDereferenceableBytes()) + ">"; 3531 } 3532 }; 3533 3534 /// Dereferenceable attribute for a floating value. 3535 struct AADereferenceableFloating : AADereferenceableImpl { 3536 AADereferenceableFloating(const IRPosition &IRP, Attributor &A) 3537 : AADereferenceableImpl(IRP, A) {} 3538 3539 /// See AbstractAttribute::updateImpl(...). 3540 ChangeStatus updateImpl(Attributor &A) override { 3541 const DataLayout &DL = A.getDataLayout(); 3542 3543 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T, 3544 bool Stripped) -> bool { 3545 unsigned IdxWidth = 3546 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 3547 APInt Offset(IdxWidth, 0); 3548 const Value *Base = 3549 stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false); 3550 3551 const auto &AA = 3552 A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base)); 3553 int64_t DerefBytes = 0; 3554 if (!Stripped && this == &AA) { 3555 // Use IR information if we did not strip anything. 3556 // TODO: track globally. 3557 bool CanBeNull; 3558 DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull); 3559 T.GlobalState.indicatePessimisticFixpoint(); 3560 } else { 3561 const DerefState &DS = AA.getState(); 3562 DerefBytes = DS.DerefBytesState.getAssumed(); 3563 T.GlobalState &= DS.GlobalState; 3564 } 3565 3566 // For now we do not try to "increase" dereferenceability due to negative 3567 // indices as we first have to come up with code to deal with loops and 3568 // for overflows of the dereferenceable bytes. 3569 int64_t OffsetSExt = Offset.getSExtValue(); 3570 if (OffsetSExt < 0) 3571 OffsetSExt = 0; 3572 3573 T.takeAssumedDerefBytesMinimum( 3574 std::max(int64_t(0), DerefBytes - OffsetSExt)); 3575 3576 if (this == &AA) { 3577 if (!Stripped) { 3578 // If nothing was stripped IR information is all we got. 3579 T.takeKnownDerefBytesMaximum( 3580 std::max(int64_t(0), DerefBytes - OffsetSExt)); 3581 T.indicatePessimisticFixpoint(); 3582 } else if (OffsetSExt > 0) { 3583 // If something was stripped but there is circular reasoning we look 3584 // for the offset. If it is positive we basically decrease the 3585 // dereferenceable bytes in a circluar loop now, which will simply 3586 // drive them down to the known value in a very slow way which we 3587 // can accelerate. 3588 T.indicatePessimisticFixpoint(); 3589 } 3590 } 3591 3592 return T.isValidState(); 3593 }; 3594 3595 DerefState T; 3596 if (!genericValueTraversal<AADereferenceable, DerefState>( 3597 A, getIRPosition(), *this, T, VisitValueCB, getCtxI())) 3598 return indicatePessimisticFixpoint(); 3599 3600 return clampStateAndIndicateChange(getState(), T); 3601 } 3602 3603 /// See AbstractAttribute::trackStatistics() 3604 void trackStatistics() const override { 3605 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) 3606 } 3607 }; 3608 3609 /// Dereferenceable attribute for a return value. 3610 struct AADereferenceableReturned final 3611 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { 3612 AADereferenceableReturned(const IRPosition &IRP, Attributor &A) 3613 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( 3614 IRP, A) {} 3615 3616 /// See AbstractAttribute::trackStatistics() 3617 void trackStatistics() const override { 3618 STATS_DECLTRACK_FNRET_ATTR(dereferenceable) 3619 } 3620 }; 3621 3622 /// Dereferenceable attribute for an argument 3623 struct AADereferenceableArgument final 3624 : AAArgumentFromCallSiteArguments<AADereferenceable, 3625 AADereferenceableImpl> { 3626 using Base = 3627 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; 3628 AADereferenceableArgument(const IRPosition &IRP, Attributor &A) 3629 : Base(IRP, A) {} 3630 3631 /// See AbstractAttribute::trackStatistics() 3632 void trackStatistics() const override { 3633 STATS_DECLTRACK_ARG_ATTR(dereferenceable) 3634 } 3635 }; 3636 3637 /// Dereferenceable attribute for a call site argument. 3638 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { 3639 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) 3640 : AADereferenceableFloating(IRP, A) {} 3641 3642 /// See AbstractAttribute::trackStatistics() 3643 void trackStatistics() const override { 3644 STATS_DECLTRACK_CSARG_ATTR(dereferenceable) 3645 } 3646 }; 3647 3648 /// Dereferenceable attribute deduction for a call site return value. 3649 struct AADereferenceableCallSiteReturned final 3650 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { 3651 using Base = 3652 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; 3653 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) 3654 : Base(IRP, A) {} 3655 3656 /// See AbstractAttribute::trackStatistics() 3657 void trackStatistics() const override { 3658 STATS_DECLTRACK_CS_ATTR(dereferenceable); 3659 } 3660 }; 3661 3662 // ------------------------ Align Argument Attribute ------------------------ 3663 3664 static unsigned getKnownAlignForUse(Attributor &A, 3665 AbstractAttribute &QueryingAA, 3666 Value &AssociatedValue, const Use *U, 3667 const Instruction *I, bool &TrackUse) { 3668 // We need to follow common pointer manipulation uses to the accesses they 3669 // feed into. 3670 if (isa<CastInst>(I)) { 3671 // Follow all but ptr2int casts. 3672 TrackUse = !isa<PtrToIntInst>(I); 3673 return 0; 3674 } 3675 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 3676 if (GEP->hasAllConstantIndices()) { 3677 TrackUse = true; 3678 return 0; 3679 } 3680 } 3681 3682 MaybeAlign MA; 3683 if (const auto *CB = dyn_cast<CallBase>(I)) { 3684 if (CB->isBundleOperand(U) || CB->isCallee(U)) 3685 return 0; 3686 3687 unsigned ArgNo = CB->getArgOperandNo(U); 3688 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 3689 // As long as we only use known information there is no need to track 3690 // dependences here. 3691 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, 3692 /* TrackDependence */ false); 3693 MA = MaybeAlign(AlignAA.getKnownAlign()); 3694 } 3695 3696 const DataLayout &DL = A.getDataLayout(); 3697 const Value *UseV = U->get(); 3698 if (auto *SI = dyn_cast<StoreInst>(I)) { 3699 if (SI->getPointerOperand() == UseV) 3700 MA = SI->getAlign(); 3701 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 3702 if (LI->getPointerOperand() == UseV) 3703 MA = LI->getAlign(); 3704 } 3705 3706 if (!MA || *MA <= 1) 3707 return 0; 3708 3709 unsigned Alignment = MA->value(); 3710 int64_t Offset; 3711 3712 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { 3713 if (Base == &AssociatedValue) { 3714 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 3715 // So we can say that the maximum power of two which is a divisor of 3716 // gcd(Offset, Alignment) is an alignment. 3717 3718 uint32_t gcd = 3719 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment); 3720 Alignment = llvm::PowerOf2Floor(gcd); 3721 } 3722 } 3723 3724 return Alignment; 3725 } 3726 3727 struct AAAlignImpl : AAAlign { 3728 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} 3729 3730 /// See AbstractAttribute::initialize(...). 3731 void initialize(Attributor &A) override { 3732 SmallVector<Attribute, 4> Attrs; 3733 getAttrs({Attribute::Alignment}, Attrs); 3734 for (const Attribute &Attr : Attrs) 3735 takeKnownMaximum(Attr.getValueAsInt()); 3736 3737 Value &V = getAssociatedValue(); 3738 // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int 3739 // use of the function pointer. This was caused by D73131. We want to 3740 // avoid this for function pointers especially because we iterate 3741 // their uses and int2ptr is not handled. It is not a correctness 3742 // problem though! 3743 if (!V.getType()->getPointerElementType()->isFunctionTy()) 3744 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value()); 3745 3746 if (getIRPosition().isFnInterfaceKind() && 3747 (!getAnchorScope() || 3748 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { 3749 indicatePessimisticFixpoint(); 3750 return; 3751 } 3752 3753 if (Instruction *CtxI = getCtxI()) 3754 followUsesInMBEC(*this, A, getState(), *CtxI); 3755 } 3756 3757 /// See AbstractAttribute::manifest(...). 3758 ChangeStatus manifest(Attributor &A) override { 3759 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; 3760 3761 // Check for users that allow alignment annotations. 3762 Value &AssociatedValue = getAssociatedValue(); 3763 for (const Use &U : AssociatedValue.uses()) { 3764 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { 3765 if (SI->getPointerOperand() == &AssociatedValue) 3766 if (SI->getAlignment() < getAssumedAlign()) { 3767 STATS_DECLTRACK(AAAlign, Store, 3768 "Number of times alignment added to a store"); 3769 SI->setAlignment(Align(getAssumedAlign())); 3770 LoadStoreChanged = ChangeStatus::CHANGED; 3771 } 3772 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { 3773 if (LI->getPointerOperand() == &AssociatedValue) 3774 if (LI->getAlignment() < getAssumedAlign()) { 3775 LI->setAlignment(Align(getAssumedAlign())); 3776 STATS_DECLTRACK(AAAlign, Load, 3777 "Number of times alignment added to a load"); 3778 LoadStoreChanged = ChangeStatus::CHANGED; 3779 } 3780 } 3781 } 3782 3783 ChangeStatus Changed = AAAlign::manifest(A); 3784 3785 Align InheritAlign = 3786 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 3787 if (InheritAlign >= getAssumedAlign()) 3788 return LoadStoreChanged; 3789 return Changed | LoadStoreChanged; 3790 } 3791 3792 // TODO: Provide a helper to determine the implied ABI alignment and check in 3793 // the existing manifest method and a new one for AAAlignImpl that value 3794 // to avoid making the alignment explicit if it did not improve. 3795 3796 /// See AbstractAttribute::getDeducedAttributes 3797 virtual void 3798 getDeducedAttributes(LLVMContext &Ctx, 3799 SmallVectorImpl<Attribute> &Attrs) const override { 3800 if (getAssumedAlign() > 1) 3801 Attrs.emplace_back( 3802 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); 3803 } 3804 3805 /// See followUsesInMBEC 3806 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 3807 AAAlign::StateType &State) { 3808 bool TrackUse = false; 3809 3810 unsigned int KnownAlign = 3811 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); 3812 State.takeKnownMaximum(KnownAlign); 3813 3814 return TrackUse; 3815 } 3816 3817 /// See AbstractAttribute::getAsStr(). 3818 const std::string getAsStr() const override { 3819 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + 3820 "-" + std::to_string(getAssumedAlign()) + ">") 3821 : "unknown-align"; 3822 } 3823 }; 3824 3825 /// Align attribute for a floating value. 3826 struct AAAlignFloating : AAAlignImpl { 3827 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} 3828 3829 /// See AbstractAttribute::updateImpl(...). 3830 ChangeStatus updateImpl(Attributor &A) override { 3831 const DataLayout &DL = A.getDataLayout(); 3832 3833 auto VisitValueCB = [&](Value &V, const Instruction *, 3834 AAAlign::StateType &T, bool Stripped) -> bool { 3835 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V)); 3836 if (!Stripped && this == &AA) { 3837 // Use only IR information if we did not strip anything. 3838 Align PA = V.getPointerAlignment(DL); 3839 T.takeKnownMaximum(PA.value()); 3840 T.indicatePessimisticFixpoint(); 3841 } else { 3842 // Use abstract attribute information. 3843 const AAAlign::StateType &DS = AA.getState(); 3844 T ^= DS; 3845 } 3846 return T.isValidState(); 3847 }; 3848 3849 StateType T; 3850 if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T, 3851 VisitValueCB, getCtxI())) 3852 return indicatePessimisticFixpoint(); 3853 3854 // TODO: If we know we visited all incoming values, thus no are assumed 3855 // dead, we can take the known information from the state T. 3856 return clampStateAndIndicateChange(getState(), T); 3857 } 3858 3859 /// See AbstractAttribute::trackStatistics() 3860 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } 3861 }; 3862 3863 /// Align attribute for function return value. 3864 struct AAAlignReturned final 3865 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { 3866 AAAlignReturned(const IRPosition &IRP, Attributor &A) 3867 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP, A) {} 3868 3869 /// See AbstractAttribute::trackStatistics() 3870 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } 3871 }; 3872 3873 /// Align attribute for function argument. 3874 struct AAAlignArgument final 3875 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { 3876 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; 3877 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 3878 3879 /// See AbstractAttribute::manifest(...). 3880 ChangeStatus manifest(Attributor &A) override { 3881 // If the associated argument is involved in a must-tail call we give up 3882 // because we would need to keep the argument alignments of caller and 3883 // callee in-sync. Just does not seem worth the trouble right now. 3884 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) 3885 return ChangeStatus::UNCHANGED; 3886 return Base::manifest(A); 3887 } 3888 3889 /// See AbstractAttribute::trackStatistics() 3890 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } 3891 }; 3892 3893 struct AAAlignCallSiteArgument final : AAAlignFloating { 3894 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) 3895 : AAAlignFloating(IRP, A) {} 3896 3897 /// See AbstractAttribute::manifest(...). 3898 ChangeStatus manifest(Attributor &A) override { 3899 // If the associated argument is involved in a must-tail call we give up 3900 // because we would need to keep the argument alignments of caller and 3901 // callee in-sync. Just does not seem worth the trouble right now. 3902 if (Argument *Arg = getAssociatedArgument()) 3903 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) 3904 return ChangeStatus::UNCHANGED; 3905 ChangeStatus Changed = AAAlignImpl::manifest(A); 3906 Align InheritAlign = 3907 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 3908 if (InheritAlign >= getAssumedAlign()) 3909 Changed = ChangeStatus::UNCHANGED; 3910 return Changed; 3911 } 3912 3913 /// See AbstractAttribute::updateImpl(Attributor &A). 3914 ChangeStatus updateImpl(Attributor &A) override { 3915 ChangeStatus Changed = AAAlignFloating::updateImpl(A); 3916 if (Argument *Arg = getAssociatedArgument()) { 3917 // We only take known information from the argument 3918 // so we do not need to track a dependence. 3919 const auto &ArgAlignAA = A.getAAFor<AAAlign>( 3920 *this, IRPosition::argument(*Arg), /* TrackDependence */ false); 3921 takeKnownMaximum(ArgAlignAA.getKnownAlign()); 3922 } 3923 return Changed; 3924 } 3925 3926 /// See AbstractAttribute::trackStatistics() 3927 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } 3928 }; 3929 3930 /// Align attribute deduction for a call site return value. 3931 struct AAAlignCallSiteReturned final 3932 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { 3933 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; 3934 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) 3935 : Base(IRP, A) {} 3936 3937 /// See AbstractAttribute::initialize(...). 3938 void initialize(Attributor &A) override { 3939 Base::initialize(A); 3940 Function *F = getAssociatedFunction(); 3941 if (!F) 3942 indicatePessimisticFixpoint(); 3943 } 3944 3945 /// See AbstractAttribute::trackStatistics() 3946 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } 3947 }; 3948 3949 /// ------------------ Function No-Return Attribute ---------------------------- 3950 struct AANoReturnImpl : public AANoReturn { 3951 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} 3952 3953 /// See AbstractAttribute::initialize(...). 3954 void initialize(Attributor &A) override { 3955 AANoReturn::initialize(A); 3956 Function *F = getAssociatedFunction(); 3957 if (!F) 3958 indicatePessimisticFixpoint(); 3959 } 3960 3961 /// See AbstractAttribute::getAsStr(). 3962 const std::string getAsStr() const override { 3963 return getAssumed() ? "noreturn" : "may-return"; 3964 } 3965 3966 /// See AbstractAttribute::updateImpl(Attributor &A). 3967 virtual ChangeStatus updateImpl(Attributor &A) override { 3968 auto CheckForNoReturn = [](Instruction &) { return false; }; 3969 if (!A.checkForAllInstructions(CheckForNoReturn, *this, 3970 {(unsigned)Instruction::Ret})) 3971 return indicatePessimisticFixpoint(); 3972 return ChangeStatus::UNCHANGED; 3973 } 3974 }; 3975 3976 struct AANoReturnFunction final : AANoReturnImpl { 3977 AANoReturnFunction(const IRPosition &IRP, Attributor &A) 3978 : AANoReturnImpl(IRP, A) {} 3979 3980 /// See AbstractAttribute::trackStatistics() 3981 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } 3982 }; 3983 3984 /// NoReturn attribute deduction for a call sites. 3985 struct AANoReturnCallSite final : AANoReturnImpl { 3986 AANoReturnCallSite(const IRPosition &IRP, Attributor &A) 3987 : AANoReturnImpl(IRP, A) {} 3988 3989 /// See AbstractAttribute::updateImpl(...). 3990 ChangeStatus updateImpl(Attributor &A) override { 3991 // TODO: Once we have call site specific value information we can provide 3992 // call site specific liveness information and then it makes 3993 // sense to specialize attributes for call sites arguments instead of 3994 // redirecting requests to the callee argument. 3995 Function *F = getAssociatedFunction(); 3996 const IRPosition &FnPos = IRPosition::function(*F); 3997 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos); 3998 return clampStateAndIndicateChange(getState(), FnAA.getState()); 3999 } 4000 4001 /// See AbstractAttribute::trackStatistics() 4002 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } 4003 }; 4004 4005 /// ----------------------- Variable Capturing --------------------------------- 4006 4007 /// A class to hold the state of for no-capture attributes. 4008 struct AANoCaptureImpl : public AANoCapture { 4009 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} 4010 4011 /// See AbstractAttribute::initialize(...). 4012 void initialize(Attributor &A) override { 4013 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { 4014 indicateOptimisticFixpoint(); 4015 return; 4016 } 4017 Function *AnchorScope = getAnchorScope(); 4018 if (isFnInterfaceKind() && 4019 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { 4020 indicatePessimisticFixpoint(); 4021 return; 4022 } 4023 4024 // You cannot "capture" null in the default address space. 4025 if (isa<ConstantPointerNull>(getAssociatedValue()) && 4026 getAssociatedValue().getType()->getPointerAddressSpace() == 0) { 4027 indicateOptimisticFixpoint(); 4028 return; 4029 } 4030 4031 const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope; 4032 4033 // Check what state the associated function can actually capture. 4034 if (F) 4035 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 4036 else 4037 indicatePessimisticFixpoint(); 4038 } 4039 4040 /// See AbstractAttribute::updateImpl(...). 4041 ChangeStatus updateImpl(Attributor &A) override; 4042 4043 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). 4044 virtual void 4045 getDeducedAttributes(LLVMContext &Ctx, 4046 SmallVectorImpl<Attribute> &Attrs) const override { 4047 if (!isAssumedNoCaptureMaybeReturned()) 4048 return; 4049 4050 if (getArgNo() >= 0) { 4051 if (isAssumedNoCapture()) 4052 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); 4053 else if (ManifestInternal) 4054 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); 4055 } 4056 } 4057 4058 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known 4059 /// depending on the ability of the function associated with \p IRP to capture 4060 /// state in memory and through "returning/throwing", respectively. 4061 static void determineFunctionCaptureCapabilities(const IRPosition &IRP, 4062 const Function &F, 4063 BitIntegerState &State) { 4064 // TODO: Once we have memory behavior attributes we should use them here. 4065 4066 // If we know we cannot communicate or write to memory, we do not care about 4067 // ptr2int anymore. 4068 if (F.onlyReadsMemory() && F.doesNotThrow() && 4069 F.getReturnType()->isVoidTy()) { 4070 State.addKnownBits(NO_CAPTURE); 4071 return; 4072 } 4073 4074 // A function cannot capture state in memory if it only reads memory, it can 4075 // however return/throw state and the state might be influenced by the 4076 // pointer value, e.g., loading from a returned pointer might reveal a bit. 4077 if (F.onlyReadsMemory()) 4078 State.addKnownBits(NOT_CAPTURED_IN_MEM); 4079 4080 // A function cannot communicate state back if it does not through 4081 // exceptions and doesn not return values. 4082 if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) 4083 State.addKnownBits(NOT_CAPTURED_IN_RET); 4084 4085 // Check existing "returned" attributes. 4086 int ArgNo = IRP.getArgNo(); 4087 if (F.doesNotThrow() && ArgNo >= 0) { 4088 for (unsigned u = 0, e = F.arg_size(); u < e; ++u) 4089 if (F.hasParamAttribute(u, Attribute::Returned)) { 4090 if (u == unsigned(ArgNo)) 4091 State.removeAssumedBits(NOT_CAPTURED_IN_RET); 4092 else if (F.onlyReadsMemory()) 4093 State.addKnownBits(NO_CAPTURE); 4094 else 4095 State.addKnownBits(NOT_CAPTURED_IN_RET); 4096 break; 4097 } 4098 } 4099 } 4100 4101 /// See AbstractState::getAsStr(). 4102 const std::string getAsStr() const override { 4103 if (isKnownNoCapture()) 4104 return "known not-captured"; 4105 if (isAssumedNoCapture()) 4106 return "assumed not-captured"; 4107 if (isKnownNoCaptureMaybeReturned()) 4108 return "known not-captured-maybe-returned"; 4109 if (isAssumedNoCaptureMaybeReturned()) 4110 return "assumed not-captured-maybe-returned"; 4111 return "assumed-captured"; 4112 } 4113 }; 4114 4115 /// Attributor-aware capture tracker. 4116 struct AACaptureUseTracker final : public CaptureTracker { 4117 4118 /// Create a capture tracker that can lookup in-flight abstract attributes 4119 /// through the Attributor \p A. 4120 /// 4121 /// If a use leads to a potential capture, \p CapturedInMemory is set and the 4122 /// search is stopped. If a use leads to a return instruction, 4123 /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed. 4124 /// If a use leads to a ptr2int which may capture the value, 4125 /// \p CapturedInInteger is set. If a use is found that is currently assumed 4126 /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies 4127 /// set. All values in \p PotentialCopies are later tracked as well. For every 4128 /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0, 4129 /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger 4130 /// conservatively set to true. 4131 AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA, 4132 const AAIsDead &IsDeadAA, AANoCapture::StateType &State, 4133 SmallVectorImpl<const Value *> &PotentialCopies, 4134 unsigned &RemainingUsesToExplore) 4135 : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State), 4136 PotentialCopies(PotentialCopies), 4137 RemainingUsesToExplore(RemainingUsesToExplore) {} 4138 4139 /// Determine if \p V maybe captured. *Also updates the state!* 4140 bool valueMayBeCaptured(const Value *V) { 4141 if (V->getType()->isPointerTy()) { 4142 PointerMayBeCaptured(V, this); 4143 } else { 4144 State.indicatePessimisticFixpoint(); 4145 } 4146 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4147 } 4148 4149 /// See CaptureTracker::tooManyUses(). 4150 void tooManyUses() override { 4151 State.removeAssumedBits(AANoCapture::NO_CAPTURE); 4152 } 4153 4154 bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override { 4155 if (CaptureTracker::isDereferenceableOrNull(O, DL)) 4156 return true; 4157 const auto &DerefAA = A.getAAFor<AADereferenceable>( 4158 NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true, 4159 DepClassTy::OPTIONAL); 4160 return DerefAA.getAssumedDereferenceableBytes(); 4161 } 4162 4163 /// See CaptureTracker::captured(...). 4164 bool captured(const Use *U) override { 4165 Instruction *UInst = cast<Instruction>(U->getUser()); 4166 LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst 4167 << "\n"); 4168 4169 // Because we may reuse the tracker multiple times we keep track of the 4170 // number of explored uses ourselves as well. 4171 if (RemainingUsesToExplore-- == 0) { 4172 LLVM_DEBUG(dbgs() << " - too many uses to explore!\n"); 4173 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4174 /* Return */ true); 4175 } 4176 4177 // Deal with ptr2int by following uses. 4178 if (isa<PtrToIntInst>(UInst)) { 4179 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n"); 4180 return valueMayBeCaptured(UInst); 4181 } 4182 4183 // Explicitly catch return instructions. 4184 if (isa<ReturnInst>(UInst)) 4185 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4186 /* Return */ true); 4187 4188 // For now we only use special logic for call sites. However, the tracker 4189 // itself knows about a lot of other non-capturing cases already. 4190 auto *CB = dyn_cast<CallBase>(UInst); 4191 if (!CB || !CB->isArgOperand(U)) 4192 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4193 /* Return */ true); 4194 4195 unsigned ArgNo = CB->getArgOperandNo(U); 4196 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); 4197 // If we have a abstract no-capture attribute for the argument we can use 4198 // it to justify a non-capture attribute here. This allows recursion! 4199 auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos); 4200 if (ArgNoCaptureAA.isAssumedNoCapture()) 4201 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4202 /* Return */ false); 4203 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 4204 addPotentialCopy(*CB); 4205 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4206 /* Return */ false); 4207 } 4208 4209 // Lastly, we could not find a reason no-capture can be assumed so we don't. 4210 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4211 /* Return */ true); 4212 } 4213 4214 /// Register \p CS as potential copy of the value we are checking. 4215 void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); } 4216 4217 /// See CaptureTracker::shouldExplore(...). 4218 bool shouldExplore(const Use *U) override { 4219 // Check liveness and ignore droppable users. 4220 return !U->getUser()->isDroppable() && 4221 !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA); 4222 } 4223 4224 /// Update the state according to \p CapturedInMem, \p CapturedInInt, and 4225 /// \p CapturedInRet, then return the appropriate value for use in the 4226 /// CaptureTracker::captured() interface. 4227 bool isCapturedIn(bool CapturedInMem, bool CapturedInInt, 4228 bool CapturedInRet) { 4229 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " 4230 << CapturedInInt << "|Ret " << CapturedInRet << "]\n"); 4231 if (CapturedInMem) 4232 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); 4233 if (CapturedInInt) 4234 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); 4235 if (CapturedInRet) 4236 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); 4237 return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4238 } 4239 4240 private: 4241 /// The attributor providing in-flight abstract attributes. 4242 Attributor &A; 4243 4244 /// The abstract attribute currently updated. 4245 AANoCapture &NoCaptureAA; 4246 4247 /// The abstract liveness state. 4248 const AAIsDead &IsDeadAA; 4249 4250 /// The state currently updated. 4251 AANoCapture::StateType &State; 4252 4253 /// Set of potential copies of the tracked value. 4254 SmallVectorImpl<const Value *> &PotentialCopies; 4255 4256 /// Global counter to limit the number of explored uses. 4257 unsigned &RemainingUsesToExplore; 4258 }; 4259 4260 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { 4261 const IRPosition &IRP = getIRPosition(); 4262 const Value *V = 4263 getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue(); 4264 if (!V) 4265 return indicatePessimisticFixpoint(); 4266 4267 const Function *F = 4268 getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 4269 assert(F && "Expected a function!"); 4270 const IRPosition &FnPos = IRPosition::function(*F); 4271 const auto &IsDeadAA = 4272 A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false); 4273 4274 AANoCapture::StateType T; 4275 4276 // Readonly means we cannot capture through memory. 4277 const auto &FnMemAA = 4278 A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false); 4279 if (FnMemAA.isAssumedReadOnly()) { 4280 T.addKnownBits(NOT_CAPTURED_IN_MEM); 4281 if (FnMemAA.isKnownReadOnly()) 4282 addKnownBits(NOT_CAPTURED_IN_MEM); 4283 else 4284 A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL); 4285 } 4286 4287 // Make sure all returned values are different than the underlying value. 4288 // TODO: we could do this in a more sophisticated way inside 4289 // AAReturnedValues, e.g., track all values that escape through returns 4290 // directly somehow. 4291 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { 4292 bool SeenConstant = false; 4293 for (auto &It : RVAA.returned_values()) { 4294 if (isa<Constant>(It.first)) { 4295 if (SeenConstant) 4296 return false; 4297 SeenConstant = true; 4298 } else if (!isa<Argument>(It.first) || 4299 It.first == getAssociatedArgument()) 4300 return false; 4301 } 4302 return true; 4303 }; 4304 4305 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( 4306 *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 4307 if (NoUnwindAA.isAssumedNoUnwind()) { 4308 bool IsVoidTy = F->getReturnType()->isVoidTy(); 4309 const AAReturnedValues *RVAA = 4310 IsVoidTy ? nullptr 4311 : &A.getAAFor<AAReturnedValues>(*this, FnPos, 4312 /* TrackDependence */ true, 4313 DepClassTy::OPTIONAL); 4314 if (IsVoidTy || CheckReturnedArgs(*RVAA)) { 4315 T.addKnownBits(NOT_CAPTURED_IN_RET); 4316 if (T.isKnown(NOT_CAPTURED_IN_MEM)) 4317 return ChangeStatus::UNCHANGED; 4318 if (NoUnwindAA.isKnownNoUnwind() && 4319 (IsVoidTy || RVAA->getState().isAtFixpoint())) { 4320 addKnownBits(NOT_CAPTURED_IN_RET); 4321 if (isKnown(NOT_CAPTURED_IN_MEM)) 4322 return indicateOptimisticFixpoint(); 4323 } 4324 } 4325 } 4326 4327 // Use the CaptureTracker interface and logic with the specialized tracker, 4328 // defined in AACaptureUseTracker, that can look at in-flight abstract 4329 // attributes and directly updates the assumed state. 4330 SmallVector<const Value *, 4> PotentialCopies; 4331 unsigned RemainingUsesToExplore = 4332 getDefaultMaxUsesToExploreForCaptureTracking(); 4333 AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies, 4334 RemainingUsesToExplore); 4335 4336 // Check all potential copies of the associated value until we can assume 4337 // none will be captured or we have to assume at least one might be. 4338 unsigned Idx = 0; 4339 PotentialCopies.push_back(V); 4340 while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size()) 4341 Tracker.valueMayBeCaptured(PotentialCopies[Idx++]); 4342 4343 AANoCapture::StateType &S = getState(); 4344 auto Assumed = S.getAssumed(); 4345 S.intersectAssumedBits(T.getAssumed()); 4346 if (!isAssumedNoCaptureMaybeReturned()) 4347 return indicatePessimisticFixpoint(); 4348 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 4349 : ChangeStatus::CHANGED; 4350 } 4351 4352 /// NoCapture attribute for function arguments. 4353 struct AANoCaptureArgument final : AANoCaptureImpl { 4354 AANoCaptureArgument(const IRPosition &IRP, Attributor &A) 4355 : AANoCaptureImpl(IRP, A) {} 4356 4357 /// See AbstractAttribute::trackStatistics() 4358 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } 4359 }; 4360 4361 /// NoCapture attribute for call site arguments. 4362 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { 4363 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) 4364 : AANoCaptureImpl(IRP, A) {} 4365 4366 /// See AbstractAttribute::initialize(...). 4367 void initialize(Attributor &A) override { 4368 if (Argument *Arg = getAssociatedArgument()) 4369 if (Arg->hasByValAttr()) 4370 indicateOptimisticFixpoint(); 4371 AANoCaptureImpl::initialize(A); 4372 } 4373 4374 /// See AbstractAttribute::updateImpl(...). 4375 ChangeStatus updateImpl(Attributor &A) override { 4376 // TODO: Once we have call site specific value information we can provide 4377 // call site specific liveness information and then it makes 4378 // sense to specialize attributes for call sites arguments instead of 4379 // redirecting requests to the callee argument. 4380 Argument *Arg = getAssociatedArgument(); 4381 if (!Arg) 4382 return indicatePessimisticFixpoint(); 4383 const IRPosition &ArgPos = IRPosition::argument(*Arg); 4384 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos); 4385 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 4386 } 4387 4388 /// See AbstractAttribute::trackStatistics() 4389 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)}; 4390 }; 4391 4392 /// NoCapture attribute for floating values. 4393 struct AANoCaptureFloating final : AANoCaptureImpl { 4394 AANoCaptureFloating(const IRPosition &IRP, Attributor &A) 4395 : AANoCaptureImpl(IRP, A) {} 4396 4397 /// See AbstractAttribute::trackStatistics() 4398 void trackStatistics() const override { 4399 STATS_DECLTRACK_FLOATING_ATTR(nocapture) 4400 } 4401 }; 4402 4403 /// NoCapture attribute for function return value. 4404 struct AANoCaptureReturned final : AANoCaptureImpl { 4405 AANoCaptureReturned(const IRPosition &IRP, Attributor &A) 4406 : AANoCaptureImpl(IRP, A) { 4407 llvm_unreachable("NoCapture is not applicable to function returns!"); 4408 } 4409 4410 /// See AbstractAttribute::initialize(...). 4411 void initialize(Attributor &A) override { 4412 llvm_unreachable("NoCapture is not applicable to function returns!"); 4413 } 4414 4415 /// See AbstractAttribute::updateImpl(...). 4416 ChangeStatus updateImpl(Attributor &A) override { 4417 llvm_unreachable("NoCapture is not applicable to function returns!"); 4418 } 4419 4420 /// See AbstractAttribute::trackStatistics() 4421 void trackStatistics() const override {} 4422 }; 4423 4424 /// NoCapture attribute deduction for a call site return value. 4425 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { 4426 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) 4427 : AANoCaptureImpl(IRP, A) {} 4428 4429 /// See AbstractAttribute::trackStatistics() 4430 void trackStatistics() const override { 4431 STATS_DECLTRACK_CSRET_ATTR(nocapture) 4432 } 4433 }; 4434 4435 /// ------------------ Value Simplify Attribute ---------------------------- 4436 struct AAValueSimplifyImpl : AAValueSimplify { 4437 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) 4438 : AAValueSimplify(IRP, A) {} 4439 4440 /// See AbstractAttribute::initialize(...). 4441 void initialize(Attributor &A) override { 4442 if (getAssociatedValue().getType()->isVoidTy()) 4443 indicatePessimisticFixpoint(); 4444 } 4445 4446 /// See AbstractAttribute::getAsStr(). 4447 const std::string getAsStr() const override { 4448 return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple") 4449 : "not-simple"; 4450 } 4451 4452 /// See AbstractAttribute::trackStatistics() 4453 void trackStatistics() const override {} 4454 4455 /// See AAValueSimplify::getAssumedSimplifiedValue() 4456 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 4457 if (!getAssumed()) 4458 return const_cast<Value *>(&getAssociatedValue()); 4459 return SimplifiedAssociatedValue; 4460 } 4461 4462 /// Helper function for querying AAValueSimplify and updating candicate. 4463 /// \param QueryingValue Value trying to unify with SimplifiedValue 4464 /// \param AccumulatedSimplifiedValue Current simplification result. 4465 static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, 4466 Value &QueryingValue, 4467 Optional<Value *> &AccumulatedSimplifiedValue) { 4468 // FIXME: Add a typecast support. 4469 4470 auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>( 4471 QueryingAA, IRPosition::value(QueryingValue)); 4472 4473 Optional<Value *> QueryingValueSimplified = 4474 ValueSimplifyAA.getAssumedSimplifiedValue(A); 4475 4476 if (!QueryingValueSimplified.hasValue()) 4477 return true; 4478 4479 if (!QueryingValueSimplified.getValue()) 4480 return false; 4481 4482 Value &QueryingValueSimplifiedUnwrapped = 4483 *QueryingValueSimplified.getValue(); 4484 4485 if (AccumulatedSimplifiedValue.hasValue() && 4486 !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) && 4487 !isa<UndefValue>(QueryingValueSimplifiedUnwrapped)) 4488 return AccumulatedSimplifiedValue == QueryingValueSimplified; 4489 if (AccumulatedSimplifiedValue.hasValue() && 4490 isa<UndefValue>(QueryingValueSimplifiedUnwrapped)) 4491 return true; 4492 4493 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue 4494 << " is assumed to be " 4495 << QueryingValueSimplifiedUnwrapped << "\n"); 4496 4497 AccumulatedSimplifiedValue = QueryingValueSimplified; 4498 return true; 4499 } 4500 4501 /// Returns a candidate is found or not 4502 template <typename AAType> bool askSimplifiedValueFor(Attributor &A) { 4503 if (!getAssociatedValue().getType()->isIntegerTy()) 4504 return false; 4505 4506 const auto &AA = 4507 A.getAAFor<AAType>(*this, getIRPosition(), /* TrackDependence */ false); 4508 4509 Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A); 4510 4511 if (!COpt.hasValue()) { 4512 SimplifiedAssociatedValue = llvm::None; 4513 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 4514 return true; 4515 } 4516 if (auto *C = COpt.getValue()) { 4517 SimplifiedAssociatedValue = C; 4518 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 4519 return true; 4520 } 4521 return false; 4522 } 4523 4524 bool askSimplifiedValueForOtherAAs(Attributor &A) { 4525 if (askSimplifiedValueFor<AAValueConstantRange>(A)) 4526 return true; 4527 if (askSimplifiedValueFor<AAPotentialValues>(A)) 4528 return true; 4529 return false; 4530 } 4531 4532 /// See AbstractAttribute::manifest(...). 4533 ChangeStatus manifest(Attributor &A) override { 4534 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4535 4536 if (SimplifiedAssociatedValue.hasValue() && 4537 !SimplifiedAssociatedValue.getValue()) 4538 return Changed; 4539 4540 Value &V = getAssociatedValue(); 4541 auto *C = SimplifiedAssociatedValue.hasValue() 4542 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) 4543 : UndefValue::get(V.getType()); 4544 if (C) { 4545 // We can replace the AssociatedValue with the constant. 4546 if (!V.user_empty() && &V != C && V.getType() == C->getType()) { 4547 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C 4548 << " :: " << *this << "\n"); 4549 if (A.changeValueAfterManifest(V, *C)) 4550 Changed = ChangeStatus::CHANGED; 4551 } 4552 } 4553 4554 return Changed | AAValueSimplify::manifest(A); 4555 } 4556 4557 /// See AbstractState::indicatePessimisticFixpoint(...). 4558 ChangeStatus indicatePessimisticFixpoint() override { 4559 // NOTE: Associated value will be returned in a pessimistic fixpoint and is 4560 // regarded as known. That's why`indicateOptimisticFixpoint` is called. 4561 SimplifiedAssociatedValue = &getAssociatedValue(); 4562 indicateOptimisticFixpoint(); 4563 return ChangeStatus::CHANGED; 4564 } 4565 4566 protected: 4567 // An assumed simplified value. Initially, it is set to Optional::None, which 4568 // means that the value is not clear under current assumption. If in the 4569 // pessimistic state, getAssumedSimplifiedValue doesn't return this value but 4570 // returns orignal associated value. 4571 Optional<Value *> SimplifiedAssociatedValue; 4572 }; 4573 4574 struct AAValueSimplifyArgument final : AAValueSimplifyImpl { 4575 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) 4576 : AAValueSimplifyImpl(IRP, A) {} 4577 4578 void initialize(Attributor &A) override { 4579 AAValueSimplifyImpl::initialize(A); 4580 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) 4581 indicatePessimisticFixpoint(); 4582 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated, 4583 Attribute::StructRet, Attribute::Nest}, 4584 /* IgnoreSubsumingPositions */ true)) 4585 indicatePessimisticFixpoint(); 4586 4587 // FIXME: This is a hack to prevent us from propagating function poiner in 4588 // the new pass manager CGSCC pass as it creates call edges the 4589 // CallGraphUpdater cannot handle yet. 4590 Value &V = getAssociatedValue(); 4591 if (V.getType()->isPointerTy() && 4592 V.getType()->getPointerElementType()->isFunctionTy() && 4593 !A.isModulePass()) 4594 indicatePessimisticFixpoint(); 4595 } 4596 4597 /// See AbstractAttribute::updateImpl(...). 4598 ChangeStatus updateImpl(Attributor &A) override { 4599 // Byval is only replacable if it is readonly otherwise we would write into 4600 // the replaced value and not the copy that byval creates implicitly. 4601 Argument *Arg = getAssociatedArgument(); 4602 if (Arg->hasByValAttr()) { 4603 // TODO: We probably need to verify synchronization is not an issue, e.g., 4604 // there is no race by not copying a constant byval. 4605 const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition()); 4606 if (!MemAA.isAssumedReadOnly()) 4607 return indicatePessimisticFixpoint(); 4608 } 4609 4610 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4611 4612 auto PredForCallSite = [&](AbstractCallSite ACS) { 4613 const IRPosition &ACSArgPos = 4614 IRPosition::callsite_argument(ACS, getArgNo()); 4615 // Check if a coresponding argument was found or if it is on not 4616 // associated (which can happen for callback calls). 4617 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 4618 return false; 4619 4620 // We can only propagate thread independent values through callbacks. 4621 // This is different to direct/indirect call sites because for them we 4622 // know the thread executing the caller and callee is the same. For 4623 // callbacks this is not guaranteed, thus a thread dependent value could 4624 // be different for the caller and callee, making it invalid to propagate. 4625 Value &ArgOp = ACSArgPos.getAssociatedValue(); 4626 if (ACS.isCallbackCall()) 4627 if (auto *C = dyn_cast<Constant>(&ArgOp)) 4628 if (C->isThreadDependent()) 4629 return false; 4630 return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue); 4631 }; 4632 4633 bool AllCallSitesKnown; 4634 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 4635 AllCallSitesKnown)) 4636 if (!askSimplifiedValueForOtherAAs(A)) 4637 return indicatePessimisticFixpoint(); 4638 4639 // If a candicate was found in this update, return CHANGED. 4640 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4641 ? ChangeStatus::UNCHANGED 4642 : ChangeStatus ::CHANGED; 4643 } 4644 4645 /// See AbstractAttribute::trackStatistics() 4646 void trackStatistics() const override { 4647 STATS_DECLTRACK_ARG_ATTR(value_simplify) 4648 } 4649 }; 4650 4651 struct AAValueSimplifyReturned : AAValueSimplifyImpl { 4652 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) 4653 : AAValueSimplifyImpl(IRP, A) {} 4654 4655 /// See AbstractAttribute::updateImpl(...). 4656 ChangeStatus updateImpl(Attributor &A) override { 4657 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4658 4659 auto PredForReturned = [&](Value &V) { 4660 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue); 4661 }; 4662 4663 if (!A.checkForAllReturnedValues(PredForReturned, *this)) 4664 if (!askSimplifiedValueForOtherAAs(A)) 4665 return indicatePessimisticFixpoint(); 4666 4667 // If a candicate was found in this update, return CHANGED. 4668 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4669 ? ChangeStatus::UNCHANGED 4670 : ChangeStatus ::CHANGED; 4671 } 4672 4673 ChangeStatus manifest(Attributor &A) override { 4674 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4675 4676 if (SimplifiedAssociatedValue.hasValue() && 4677 !SimplifiedAssociatedValue.getValue()) 4678 return Changed; 4679 4680 Value &V = getAssociatedValue(); 4681 auto *C = SimplifiedAssociatedValue.hasValue() 4682 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) 4683 : UndefValue::get(V.getType()); 4684 if (C) { 4685 auto PredForReturned = 4686 [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 4687 // We can replace the AssociatedValue with the constant. 4688 if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V)) 4689 return true; 4690 4691 for (ReturnInst *RI : RetInsts) { 4692 if (RI->getFunction() != getAnchorScope()) 4693 continue; 4694 auto *RC = C; 4695 if (RC->getType() != RI->getReturnValue()->getType()) 4696 RC = ConstantExpr::getBitCast(RC, 4697 RI->getReturnValue()->getType()); 4698 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC 4699 << " in " << *RI << " :: " << *this << "\n"); 4700 if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC)) 4701 Changed = ChangeStatus::CHANGED; 4702 } 4703 return true; 4704 }; 4705 A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this); 4706 } 4707 4708 return Changed | AAValueSimplify::manifest(A); 4709 } 4710 4711 /// See AbstractAttribute::trackStatistics() 4712 void trackStatistics() const override { 4713 STATS_DECLTRACK_FNRET_ATTR(value_simplify) 4714 } 4715 }; 4716 4717 struct AAValueSimplifyFloating : AAValueSimplifyImpl { 4718 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) 4719 : AAValueSimplifyImpl(IRP, A) {} 4720 4721 /// See AbstractAttribute::initialize(...). 4722 void initialize(Attributor &A) override { 4723 // FIXME: This might have exposed a SCC iterator update bug in the old PM. 4724 // Needs investigation. 4725 // AAValueSimplifyImpl::initialize(A); 4726 Value &V = getAnchorValue(); 4727 4728 // TODO: add other stuffs 4729 if (isa<Constant>(V)) 4730 indicatePessimisticFixpoint(); 4731 } 4732 4733 /// Check if \p ICmp is an equality comparison (==/!=) with at least one 4734 /// nullptr. If so, try to simplify it using AANonNull on the other operand. 4735 /// Return true if successful, in that case SimplifiedAssociatedValue will be 4736 /// updated and \p Changed is set appropriately. 4737 bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp, 4738 ChangeStatus &Changed) { 4739 if (!ICmp) 4740 return false; 4741 if (!ICmp->isEquality()) 4742 return false; 4743 4744 // This is a comparison with == or !-. We check for nullptr now. 4745 bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0)); 4746 bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1)); 4747 if (!Op0IsNull && !Op1IsNull) 4748 return false; 4749 4750 LLVMContext &Ctx = ICmp->getContext(); 4751 // Check for `nullptr ==/!= nullptr` first: 4752 if (Op0IsNull && Op1IsNull) { 4753 Value *NewVal = ConstantInt::get( 4754 Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ); 4755 SimplifiedAssociatedValue = NewVal; 4756 indicateOptimisticFixpoint(); 4757 assert(!SimplifiedAssociatedValue.hasValue() && 4758 "Did not expect non-fixed value for constant comparison"); 4759 Changed = ChangeStatus::CHANGED; 4760 return true; 4761 } 4762 4763 // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the 4764 // non-nullptr operand and if we assume it's non-null we can conclude the 4765 // result of the comparison. 4766 assert((Op0IsNull || Op1IsNull) && 4767 "Expected nullptr versus non-nullptr comparison at this point"); 4768 4769 // The index is the operand that we assume is not null. 4770 unsigned PtrIdx = Op0IsNull; 4771 auto &PtrNonNullAA = A.getAAFor<AANonNull>( 4772 *this, IRPosition::value(*ICmp->getOperand(PtrIdx))); 4773 if (!PtrNonNullAA.isAssumedNonNull()) 4774 return false; 4775 4776 // The new value depends on the predicate, true for != and false for ==. 4777 Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx), 4778 ICmp->getPredicate() == CmpInst::ICMP_NE); 4779 4780 assert((!SimplifiedAssociatedValue.hasValue() || 4781 SimplifiedAssociatedValue == NewVal) && 4782 "Did not expect to change value for zero-comparison"); 4783 4784 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4785 SimplifiedAssociatedValue = NewVal; 4786 4787 if (PtrNonNullAA.isKnownNonNull()) 4788 indicateOptimisticFixpoint(); 4789 4790 Changed = HasValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED; 4791 return true; 4792 } 4793 4794 /// See AbstractAttribute::updateImpl(...). 4795 ChangeStatus updateImpl(Attributor &A) override { 4796 bool HasValueBefore = SimplifiedAssociatedValue.hasValue(); 4797 4798 ChangeStatus Changed; 4799 if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()), 4800 Changed)) 4801 return Changed; 4802 4803 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &, 4804 bool Stripped) -> bool { 4805 auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V)); 4806 if (!Stripped && this == &AA) { 4807 // TODO: Look the instruction and check recursively. 4808 4809 LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V 4810 << "\n"); 4811 return false; 4812 } 4813 return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue); 4814 }; 4815 4816 bool Dummy = false; 4817 if (!genericValueTraversal<AAValueSimplify, bool>( 4818 A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(), 4819 /* UseValueSimplify */ false)) 4820 if (!askSimplifiedValueForOtherAAs(A)) 4821 return indicatePessimisticFixpoint(); 4822 4823 // If a candicate was found in this update, return CHANGED. 4824 4825 return HasValueBefore == SimplifiedAssociatedValue.hasValue() 4826 ? ChangeStatus::UNCHANGED 4827 : ChangeStatus ::CHANGED; 4828 } 4829 4830 /// See AbstractAttribute::trackStatistics() 4831 void trackStatistics() const override { 4832 STATS_DECLTRACK_FLOATING_ATTR(value_simplify) 4833 } 4834 }; 4835 4836 struct AAValueSimplifyFunction : AAValueSimplifyImpl { 4837 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) 4838 : AAValueSimplifyImpl(IRP, A) {} 4839 4840 /// See AbstractAttribute::initialize(...). 4841 void initialize(Attributor &A) override { 4842 SimplifiedAssociatedValue = &getAnchorValue(); 4843 indicateOptimisticFixpoint(); 4844 } 4845 /// See AbstractAttribute::initialize(...). 4846 ChangeStatus updateImpl(Attributor &A) override { 4847 llvm_unreachable( 4848 "AAValueSimplify(Function|CallSite)::updateImpl will not be called"); 4849 } 4850 /// See AbstractAttribute::trackStatistics() 4851 void trackStatistics() const override { 4852 STATS_DECLTRACK_FN_ATTR(value_simplify) 4853 } 4854 }; 4855 4856 struct AAValueSimplifyCallSite : AAValueSimplifyFunction { 4857 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) 4858 : AAValueSimplifyFunction(IRP, A) {} 4859 /// See AbstractAttribute::trackStatistics() 4860 void trackStatistics() const override { 4861 STATS_DECLTRACK_CS_ATTR(value_simplify) 4862 } 4863 }; 4864 4865 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned { 4866 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) 4867 : AAValueSimplifyReturned(IRP, A) {} 4868 4869 /// See AbstractAttribute::manifest(...). 4870 ChangeStatus manifest(Attributor &A) override { 4871 return AAValueSimplifyImpl::manifest(A); 4872 } 4873 4874 void trackStatistics() const override { 4875 STATS_DECLTRACK_CSRET_ATTR(value_simplify) 4876 } 4877 }; 4878 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { 4879 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) 4880 : AAValueSimplifyFloating(IRP, A) {} 4881 4882 /// See AbstractAttribute::manifest(...). 4883 ChangeStatus manifest(Attributor &A) override { 4884 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4885 4886 if (SimplifiedAssociatedValue.hasValue() && 4887 !SimplifiedAssociatedValue.getValue()) 4888 return Changed; 4889 4890 Value &V = getAssociatedValue(); 4891 auto *C = SimplifiedAssociatedValue.hasValue() 4892 ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue()) 4893 : UndefValue::get(V.getType()); 4894 if (C) { 4895 Use &U = cast<CallBase>(&getAnchorValue())->getArgOperandUse(getArgNo()); 4896 // We can replace the AssociatedValue with the constant. 4897 if (&V != C && V.getType() == C->getType()) { 4898 if (A.changeUseAfterManifest(U, *C)) 4899 Changed = ChangeStatus::CHANGED; 4900 } 4901 } 4902 4903 return Changed | AAValueSimplify::manifest(A); 4904 } 4905 4906 void trackStatistics() const override { 4907 STATS_DECLTRACK_CSARG_ATTR(value_simplify) 4908 } 4909 }; 4910 4911 /// ----------------------- Heap-To-Stack Conversion --------------------------- 4912 struct AAHeapToStackImpl : public AAHeapToStack { 4913 AAHeapToStackImpl(const IRPosition &IRP, Attributor &A) 4914 : AAHeapToStack(IRP, A) {} 4915 4916 const std::string getAsStr() const override { 4917 return "[H2S] Mallocs: " + std::to_string(MallocCalls.size()); 4918 } 4919 4920 ChangeStatus manifest(Attributor &A) override { 4921 assert(getState().isValidState() && 4922 "Attempted to manifest an invalid state!"); 4923 4924 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 4925 Function *F = getAnchorScope(); 4926 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 4927 4928 for (Instruction *MallocCall : MallocCalls) { 4929 // This malloc cannot be replaced. 4930 if (BadMallocCalls.count(MallocCall)) 4931 continue; 4932 4933 for (Instruction *FreeCall : FreesForMalloc[MallocCall]) { 4934 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); 4935 A.deleteAfterManifest(*FreeCall); 4936 HasChanged = ChangeStatus::CHANGED; 4937 } 4938 4939 LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall 4940 << "\n"); 4941 4942 Align Alignment; 4943 Constant *Size; 4944 if (isCallocLikeFn(MallocCall, TLI)) { 4945 auto *Num = cast<ConstantInt>(MallocCall->getOperand(0)); 4946 auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1)); 4947 APInt TotalSize = SizeT->getValue() * Num->getValue(); 4948 Size = 4949 ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize); 4950 } else if (isAlignedAllocLikeFn(MallocCall, TLI)) { 4951 Size = cast<ConstantInt>(MallocCall->getOperand(1)); 4952 Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0)) 4953 ->getValue() 4954 .getZExtValue()) 4955 .valueOrOne(); 4956 } else { 4957 Size = cast<ConstantInt>(MallocCall->getOperand(0)); 4958 } 4959 4960 unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace(); 4961 Instruction *AI = 4962 new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment, 4963 "", MallocCall->getNextNode()); 4964 4965 if (AI->getType() != MallocCall->getType()) 4966 AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc", 4967 AI->getNextNode()); 4968 4969 A.changeValueAfterManifest(*MallocCall, *AI); 4970 4971 if (auto *II = dyn_cast<InvokeInst>(MallocCall)) { 4972 auto *NBB = II->getNormalDest(); 4973 BranchInst::Create(NBB, MallocCall->getParent()); 4974 A.deleteAfterManifest(*MallocCall); 4975 } else { 4976 A.deleteAfterManifest(*MallocCall); 4977 } 4978 4979 // Zero out the allocated memory if it was a calloc. 4980 if (isCallocLikeFn(MallocCall, TLI)) { 4981 auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc", 4982 AI->getNextNode()); 4983 Value *Ops[] = { 4984 BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size, 4985 ConstantInt::get(Type::getInt1Ty(F->getContext()), false)}; 4986 4987 Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()}; 4988 Module *M = F->getParent(); 4989 Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys); 4990 CallInst::Create(Fn, Ops, "", BI->getNextNode()); 4991 } 4992 HasChanged = ChangeStatus::CHANGED; 4993 } 4994 4995 return HasChanged; 4996 } 4997 4998 /// Collection of all malloc calls in a function. 4999 SmallSetVector<Instruction *, 4> MallocCalls; 5000 5001 /// Collection of malloc calls that cannot be converted. 5002 DenseSet<const Instruction *> BadMallocCalls; 5003 5004 /// A map for each malloc call to the set of associated free calls. 5005 DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc; 5006 5007 ChangeStatus updateImpl(Attributor &A) override; 5008 }; 5009 5010 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) { 5011 const Function *F = getAnchorScope(); 5012 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5013 5014 MustBeExecutedContextExplorer &Explorer = 5015 A.getInfoCache().getMustBeExecutedContextExplorer(); 5016 5017 auto FreeCheck = [&](Instruction &I) { 5018 const auto &Frees = FreesForMalloc.lookup(&I); 5019 if (Frees.size() != 1) 5020 return false; 5021 Instruction *UniqueFree = *Frees.begin(); 5022 return Explorer.findInContextOf(UniqueFree, I.getNextNode()); 5023 }; 5024 5025 auto UsesCheck = [&](Instruction &I) { 5026 bool ValidUsesOnly = true; 5027 bool MustUse = true; 5028 auto Pred = [&](const Use &U, bool &Follow) -> bool { 5029 Instruction *UserI = cast<Instruction>(U.getUser()); 5030 if (isa<LoadInst>(UserI)) 5031 return true; 5032 if (auto *SI = dyn_cast<StoreInst>(UserI)) { 5033 if (SI->getValueOperand() == U.get()) { 5034 LLVM_DEBUG(dbgs() 5035 << "[H2S] escaping store to memory: " << *UserI << "\n"); 5036 ValidUsesOnly = false; 5037 } else { 5038 // A store into the malloc'ed memory is fine. 5039 } 5040 return true; 5041 } 5042 if (auto *CB = dyn_cast<CallBase>(UserI)) { 5043 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) 5044 return true; 5045 // Record malloc. 5046 if (isFreeCall(UserI, TLI)) { 5047 if (MustUse) { 5048 FreesForMalloc[&I].insert(UserI); 5049 } else { 5050 LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: " 5051 << *UserI << "\n"); 5052 ValidUsesOnly = false; 5053 } 5054 return true; 5055 } 5056 5057 unsigned ArgNo = CB->getArgOperandNo(&U); 5058 5059 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 5060 *this, IRPosition::callsite_argument(*CB, ArgNo)); 5061 5062 // If a callsite argument use is nofree, we are fine. 5063 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( 5064 *this, IRPosition::callsite_argument(*CB, ArgNo)); 5065 5066 if (!NoCaptureAA.isAssumedNoCapture() || 5067 !ArgNoFreeAA.isAssumedNoFree()) { 5068 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n"); 5069 ValidUsesOnly = false; 5070 } 5071 return true; 5072 } 5073 5074 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 5075 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 5076 MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI)); 5077 Follow = true; 5078 return true; 5079 } 5080 // Unknown user for which we can not track uses further (in a way that 5081 // makes sense). 5082 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n"); 5083 ValidUsesOnly = false; 5084 return true; 5085 }; 5086 A.checkForAllUses(Pred, *this, I); 5087 return ValidUsesOnly; 5088 }; 5089 5090 auto MallocCallocCheck = [&](Instruction &I) { 5091 if (BadMallocCalls.count(&I)) 5092 return true; 5093 5094 bool IsMalloc = isMallocLikeFn(&I, TLI); 5095 bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI); 5096 bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI); 5097 if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) { 5098 BadMallocCalls.insert(&I); 5099 return true; 5100 } 5101 5102 if (IsMalloc) { 5103 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0))) 5104 if (Size->getValue().ule(MaxHeapToStackSize)) 5105 if (UsesCheck(I) || FreeCheck(I)) { 5106 MallocCalls.insert(&I); 5107 return true; 5108 } 5109 } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) { 5110 // Only if the alignment and sizes are constant. 5111 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1))) 5112 if (Size->getValue().ule(MaxHeapToStackSize)) 5113 if (UsesCheck(I) || FreeCheck(I)) { 5114 MallocCalls.insert(&I); 5115 return true; 5116 } 5117 } else if (IsCalloc) { 5118 bool Overflow = false; 5119 if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0))) 5120 if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1))) 5121 if ((Size->getValue().umul_ov(Num->getValue(), Overflow)) 5122 .ule(MaxHeapToStackSize)) 5123 if (!Overflow && (UsesCheck(I) || FreeCheck(I))) { 5124 MallocCalls.insert(&I); 5125 return true; 5126 } 5127 } 5128 5129 BadMallocCalls.insert(&I); 5130 return true; 5131 }; 5132 5133 size_t NumBadMallocs = BadMallocCalls.size(); 5134 5135 A.checkForAllCallLikeInstructions(MallocCallocCheck, *this); 5136 5137 if (NumBadMallocs != BadMallocCalls.size()) 5138 return ChangeStatus::CHANGED; 5139 5140 return ChangeStatus::UNCHANGED; 5141 } 5142 5143 struct AAHeapToStackFunction final : public AAHeapToStackImpl { 5144 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) 5145 : AAHeapToStackImpl(IRP, A) {} 5146 5147 /// See AbstractAttribute::trackStatistics(). 5148 void trackStatistics() const override { 5149 STATS_DECL( 5150 MallocCalls, Function, 5151 "Number of malloc/calloc/aligned_alloc calls converted to allocas"); 5152 for (auto *C : MallocCalls) 5153 if (!BadMallocCalls.count(C)) 5154 ++BUILD_STAT_NAME(MallocCalls, Function); 5155 } 5156 }; 5157 5158 /// ----------------------- Privatizable Pointers ------------------------------ 5159 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { 5160 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) 5161 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} 5162 5163 ChangeStatus indicatePessimisticFixpoint() override { 5164 AAPrivatizablePtr::indicatePessimisticFixpoint(); 5165 PrivatizableType = nullptr; 5166 return ChangeStatus::CHANGED; 5167 } 5168 5169 /// Identify the type we can chose for a private copy of the underlying 5170 /// argument. None means it is not clear yet, nullptr means there is none. 5171 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; 5172 5173 /// Return a privatizable type that encloses both T0 and T1. 5174 /// TODO: This is merely a stub for now as we should manage a mapping as well. 5175 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { 5176 if (!T0.hasValue()) 5177 return T1; 5178 if (!T1.hasValue()) 5179 return T0; 5180 if (T0 == T1) 5181 return T0; 5182 return nullptr; 5183 } 5184 5185 Optional<Type *> getPrivatizableType() const override { 5186 return PrivatizableType; 5187 } 5188 5189 const std::string getAsStr() const override { 5190 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; 5191 } 5192 5193 protected: 5194 Optional<Type *> PrivatizableType; 5195 }; 5196 5197 // TODO: Do this for call site arguments (probably also other values) as well. 5198 5199 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { 5200 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) 5201 : AAPrivatizablePtrImpl(IRP, A) {} 5202 5203 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 5204 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 5205 // If this is a byval argument and we know all the call sites (so we can 5206 // rewrite them), there is no need to check them explicitly. 5207 bool AllCallSitesKnown; 5208 if (getIRPosition().hasAttr(Attribute::ByVal) && 5209 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, 5210 true, AllCallSitesKnown)) 5211 return getAssociatedValue().getType()->getPointerElementType(); 5212 5213 Optional<Type *> Ty; 5214 unsigned ArgNo = getIRPosition().getArgNo(); 5215 5216 // Make sure the associated call site argument has the same type at all call 5217 // sites and it is an allocation we know is safe to privatize, for now that 5218 // means we only allow alloca instructions. 5219 // TODO: We can additionally analyze the accesses in the callee to create 5220 // the type from that information instead. That is a little more 5221 // involved and will be done in a follow up patch. 5222 auto CallSiteCheck = [&](AbstractCallSite ACS) { 5223 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 5224 // Check if a coresponding argument was found or if it is one not 5225 // associated (which can happen for callback calls). 5226 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 5227 return false; 5228 5229 // Check that all call sites agree on a type. 5230 auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos); 5231 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); 5232 5233 LLVM_DEBUG({ 5234 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; 5235 if (CSTy.hasValue() && CSTy.getValue()) 5236 CSTy.getValue()->print(dbgs()); 5237 else if (CSTy.hasValue()) 5238 dbgs() << "<nullptr>"; 5239 else 5240 dbgs() << "<none>"; 5241 }); 5242 5243 Ty = combineTypes(Ty, CSTy); 5244 5245 LLVM_DEBUG({ 5246 dbgs() << " : New Type: "; 5247 if (Ty.hasValue() && Ty.getValue()) 5248 Ty.getValue()->print(dbgs()); 5249 else if (Ty.hasValue()) 5250 dbgs() << "<nullptr>"; 5251 else 5252 dbgs() << "<none>"; 5253 dbgs() << "\n"; 5254 }); 5255 5256 return !Ty.hasValue() || Ty.getValue(); 5257 }; 5258 5259 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown)) 5260 return nullptr; 5261 return Ty; 5262 } 5263 5264 /// See AbstractAttribute::updateImpl(...). 5265 ChangeStatus updateImpl(Attributor &A) override { 5266 PrivatizableType = identifyPrivatizableType(A); 5267 if (!PrivatizableType.hasValue()) 5268 return ChangeStatus::UNCHANGED; 5269 if (!PrivatizableType.getValue()) 5270 return indicatePessimisticFixpoint(); 5271 5272 // The dependence is optional so we don't give up once we give up on the 5273 // alignment. 5274 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), 5275 /* TrackDependence */ true, DepClassTy::OPTIONAL); 5276 5277 // Avoid arguments with padding for now. 5278 if (!getIRPosition().hasAttr(Attribute::ByVal) && 5279 !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(), 5280 A.getInfoCache().getDL())) { 5281 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n"); 5282 return indicatePessimisticFixpoint(); 5283 } 5284 5285 // Verify callee and caller agree on how the promoted argument would be 5286 // passed. 5287 // TODO: The use of the ArgumentPromotion interface here is ugly, we need a 5288 // specialized form of TargetTransformInfo::areFunctionArgsABICompatible 5289 // which doesn't require the arguments ArgumentPromotion wanted to pass. 5290 Function &Fn = *getIRPosition().getAnchorScope(); 5291 SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy; 5292 ArgsToPromote.insert(getAssociatedArgument()); 5293 const auto *TTI = 5294 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); 5295 if (!TTI || 5296 !ArgumentPromotionPass::areFunctionArgsABICompatible( 5297 Fn, *TTI, ArgsToPromote, Dummy) || 5298 ArgsToPromote.empty()) { 5299 LLVM_DEBUG( 5300 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " 5301 << Fn.getName() << "\n"); 5302 return indicatePessimisticFixpoint(); 5303 } 5304 5305 // Collect the types that will replace the privatizable type in the function 5306 // signature. 5307 SmallVector<Type *, 16> ReplacementTypes; 5308 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 5309 5310 // Register a rewrite of the argument. 5311 Argument *Arg = getAssociatedArgument(); 5312 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { 5313 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"); 5314 return indicatePessimisticFixpoint(); 5315 } 5316 5317 unsigned ArgNo = Arg->getArgNo(); 5318 5319 // Helper to check if for the given call site the associated argument is 5320 // passed to a callback where the privatization would be different. 5321 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { 5322 SmallVector<const Use *, 4> CallbackUses; 5323 AbstractCallSite::getCallbackUses(CB, CallbackUses); 5324 for (const Use *U : CallbackUses) { 5325 AbstractCallSite CBACS(U); 5326 assert(CBACS && CBACS.isCallbackCall()); 5327 for (Argument &CBArg : CBACS.getCalledFunction()->args()) { 5328 int CBArgNo = CBACS.getCallArgOperandNo(CBArg); 5329 5330 LLVM_DEBUG({ 5331 dbgs() 5332 << "[AAPrivatizablePtr] Argument " << *Arg 5333 << "check if can be privatized in the context of its parent (" 5334 << Arg->getParent()->getName() 5335 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5336 "callback (" 5337 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 5338 << ")\n[AAPrivatizablePtr] " << CBArg << " : " 5339 << CBACS.getCallArgOperand(CBArg) << " vs " 5340 << CB.getArgOperand(ArgNo) << "\n" 5341 << "[AAPrivatizablePtr] " << CBArg << " : " 5342 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; 5343 }); 5344 5345 if (CBArgNo != int(ArgNo)) 5346 continue; 5347 const auto &CBArgPrivAA = 5348 A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg)); 5349 if (CBArgPrivAA.isValidState()) { 5350 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); 5351 if (!CBArgPrivTy.hasValue()) 5352 continue; 5353 if (CBArgPrivTy.getValue() == PrivatizableType) 5354 continue; 5355 } 5356 5357 LLVM_DEBUG({ 5358 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5359 << " cannot be privatized in the context of its parent (" 5360 << Arg->getParent()->getName() 5361 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5362 "callback (" 5363 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 5364 << ").\n[AAPrivatizablePtr] for which the argument " 5365 "privatization is not compatible.\n"; 5366 }); 5367 return false; 5368 } 5369 } 5370 return true; 5371 }; 5372 5373 // Helper to check if for the given call site the associated argument is 5374 // passed to a direct call where the privatization would be different. 5375 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { 5376 CallBase *DC = cast<CallBase>(ACS.getInstruction()); 5377 int DCArgNo = ACS.getCallArgOperandNo(ArgNo); 5378 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() && 5379 "Expected a direct call operand for callback call operand"); 5380 5381 LLVM_DEBUG({ 5382 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5383 << " check if be privatized in the context of its parent (" 5384 << Arg->getParent()->getName() 5385 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5386 "direct call of (" 5387 << DCArgNo << "@" << DC->getCalledFunction()->getName() 5388 << ").\n"; 5389 }); 5390 5391 Function *DCCallee = DC->getCalledFunction(); 5392 if (unsigned(DCArgNo) < DCCallee->arg_size()) { 5393 const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 5394 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo))); 5395 if (DCArgPrivAA.isValidState()) { 5396 auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); 5397 if (!DCArgPrivTy.hasValue()) 5398 return true; 5399 if (DCArgPrivTy.getValue() == PrivatizableType) 5400 return true; 5401 } 5402 } 5403 5404 LLVM_DEBUG({ 5405 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 5406 << " cannot be privatized in the context of its parent (" 5407 << Arg->getParent()->getName() 5408 << ")\n[AAPrivatizablePtr] because it is an argument in a " 5409 "direct call of (" 5410 << ACS.getInstruction()->getCalledFunction()->getName() 5411 << ").\n[AAPrivatizablePtr] for which the argument " 5412 "privatization is not compatible.\n"; 5413 }); 5414 return false; 5415 }; 5416 5417 // Helper to check if the associated argument is used at the given abstract 5418 // call site in a way that is incompatible with the privatization assumed 5419 // here. 5420 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { 5421 if (ACS.isDirectCall()) 5422 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); 5423 if (ACS.isCallbackCall()) 5424 return IsCompatiblePrivArgOfDirectCS(ACS); 5425 return false; 5426 }; 5427 5428 bool AllCallSitesKnown; 5429 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true, 5430 AllCallSitesKnown)) 5431 return indicatePessimisticFixpoint(); 5432 5433 return ChangeStatus::UNCHANGED; 5434 } 5435 5436 /// Given a type to private \p PrivType, collect the constituates (which are 5437 /// used) in \p ReplacementTypes. 5438 static void 5439 identifyReplacementTypes(Type *PrivType, 5440 SmallVectorImpl<Type *> &ReplacementTypes) { 5441 // TODO: For now we expand the privatization type to the fullest which can 5442 // lead to dead arguments that need to be removed later. 5443 assert(PrivType && "Expected privatizable type!"); 5444 5445 // Traverse the type, extract constituate types on the outermost level. 5446 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5447 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) 5448 ReplacementTypes.push_back(PrivStructType->getElementType(u)); 5449 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5450 ReplacementTypes.append(PrivArrayType->getNumElements(), 5451 PrivArrayType->getElementType()); 5452 } else { 5453 ReplacementTypes.push_back(PrivType); 5454 } 5455 } 5456 5457 /// Initialize \p Base according to the type \p PrivType at position \p IP. 5458 /// The values needed are taken from the arguments of \p F starting at 5459 /// position \p ArgNo. 5460 static void createInitialization(Type *PrivType, Value &Base, Function &F, 5461 unsigned ArgNo, Instruction &IP) { 5462 assert(PrivType && "Expected privatizable type!"); 5463 5464 IRBuilder<NoFolder> IRB(&IP); 5465 const DataLayout &DL = F.getParent()->getDataLayout(); 5466 5467 // Traverse the type, build GEPs and stores. 5468 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5469 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 5470 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 5471 Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo(); 5472 Value *Ptr = constructPointer( 5473 PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL); 5474 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 5475 } 5476 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5477 Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo(); 5478 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy); 5479 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 5480 Value *Ptr = 5481 constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL); 5482 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 5483 } 5484 } else { 5485 new StoreInst(F.getArg(ArgNo), &Base, &IP); 5486 } 5487 } 5488 5489 /// Extract values from \p Base according to the type \p PrivType at the 5490 /// call position \p ACS. The values are appended to \p ReplacementValues. 5491 void createReplacementValues(Align Alignment, Type *PrivType, 5492 AbstractCallSite ACS, Value *Base, 5493 SmallVectorImpl<Value *> &ReplacementValues) { 5494 assert(Base && "Expected base value!"); 5495 assert(PrivType && "Expected privatizable type!"); 5496 Instruction *IP = ACS.getInstruction(); 5497 5498 IRBuilder<NoFolder> IRB(IP); 5499 const DataLayout &DL = IP->getModule()->getDataLayout(); 5500 5501 if (Base->getType()->getPointerElementType() != PrivType) 5502 Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(), 5503 "", ACS.getInstruction()); 5504 5505 // Traverse the type, build GEPs and loads. 5506 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 5507 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 5508 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 5509 Type *PointeeTy = PrivStructType->getElementType(u); 5510 Value *Ptr = 5511 constructPointer(PointeeTy->getPointerTo(), Base, 5512 PrivStructLayout->getElementOffset(u), IRB, DL); 5513 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 5514 L->setAlignment(Alignment); 5515 ReplacementValues.push_back(L); 5516 } 5517 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 5518 Type *PointeeTy = PrivArrayType->getElementType(); 5519 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 5520 Type *PointeePtrTy = PointeeTy->getPointerTo(); 5521 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 5522 Value *Ptr = 5523 constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL); 5524 LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP); 5525 L->setAlignment(Alignment); 5526 ReplacementValues.push_back(L); 5527 } 5528 } else { 5529 LoadInst *L = new LoadInst(PrivType, Base, "", IP); 5530 L->setAlignment(Alignment); 5531 ReplacementValues.push_back(L); 5532 } 5533 } 5534 5535 /// See AbstractAttribute::manifest(...) 5536 ChangeStatus manifest(Attributor &A) override { 5537 if (!PrivatizableType.hasValue()) 5538 return ChangeStatus::UNCHANGED; 5539 assert(PrivatizableType.getValue() && "Expected privatizable type!"); 5540 5541 // Collect all tail calls in the function as we cannot allow new allocas to 5542 // escape into tail recursion. 5543 // TODO: Be smarter about new allocas escaping into tail calls. 5544 SmallVector<CallInst *, 16> TailCalls; 5545 if (!A.checkForAllInstructions( 5546 [&](Instruction &I) { 5547 CallInst &CI = cast<CallInst>(I); 5548 if (CI.isTailCall()) 5549 TailCalls.push_back(&CI); 5550 return true; 5551 }, 5552 *this, {Instruction::Call})) 5553 return ChangeStatus::UNCHANGED; 5554 5555 Argument *Arg = getAssociatedArgument(); 5556 // Query AAAlign attribute for alignment of associated argument to 5557 // determine the best alignment of loads. 5558 const auto &AlignAA = A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg)); 5559 5560 // Callback to repair the associated function. A new alloca is placed at the 5561 // beginning and initialized with the values passed through arguments. The 5562 // new alloca replaces the use of the old pointer argument. 5563 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = 5564 [=](const Attributor::ArgumentReplacementInfo &ARI, 5565 Function &ReplacementFn, Function::arg_iterator ArgIt) { 5566 BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); 5567 Instruction *IP = &*EntryBB.getFirstInsertionPt(); 5568 auto *AI = new AllocaInst(PrivatizableType.getValue(), 0, 5569 Arg->getName() + ".priv", IP); 5570 createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn, 5571 ArgIt->getArgNo(), *IP); 5572 Arg->replaceAllUsesWith(AI); 5573 5574 for (CallInst *CI : TailCalls) 5575 CI->setTailCall(false); 5576 }; 5577 5578 // Callback to repair a call site of the associated function. The elements 5579 // of the privatizable type are loaded prior to the call and passed to the 5580 // new function version. 5581 Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = 5582 [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI, 5583 AbstractCallSite ACS, 5584 SmallVectorImpl<Value *> &NewArgOperands) { 5585 // When no alignment is specified for the load instruction, 5586 // natural alignment is assumed. 5587 createReplacementValues( 5588 assumeAligned(AlignAA.getAssumedAlign()), 5589 PrivatizableType.getValue(), ACS, 5590 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()), 5591 NewArgOperands); 5592 }; 5593 5594 // Collect the types that will replace the privatizable type in the function 5595 // signature. 5596 SmallVector<Type *, 16> ReplacementTypes; 5597 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 5598 5599 // Register a rewrite of the argument. 5600 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes, 5601 std::move(FnRepairCB), 5602 std::move(ACSRepairCB))) 5603 return ChangeStatus::CHANGED; 5604 return ChangeStatus::UNCHANGED; 5605 } 5606 5607 /// See AbstractAttribute::trackStatistics() 5608 void trackStatistics() const override { 5609 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr); 5610 } 5611 }; 5612 5613 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { 5614 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) 5615 : AAPrivatizablePtrImpl(IRP, A) {} 5616 5617 /// See AbstractAttribute::initialize(...). 5618 virtual void initialize(Attributor &A) override { 5619 // TODO: We can privatize more than arguments. 5620 indicatePessimisticFixpoint(); 5621 } 5622 5623 ChangeStatus updateImpl(Attributor &A) override { 5624 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" 5625 "updateImpl will not be called"); 5626 } 5627 5628 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 5629 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 5630 Value *Obj = getUnderlyingObject(&getAssociatedValue()); 5631 if (!Obj) { 5632 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n"); 5633 return nullptr; 5634 } 5635 5636 if (auto *AI = dyn_cast<AllocaInst>(Obj)) 5637 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 5638 if (CI->isOne()) 5639 return Obj->getType()->getPointerElementType(); 5640 if (auto *Arg = dyn_cast<Argument>(Obj)) { 5641 auto &PrivArgAA = 5642 A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg)); 5643 if (PrivArgAA.isAssumedPrivatizablePtr()) 5644 return Obj->getType()->getPointerElementType(); 5645 } 5646 5647 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " 5648 "alloca nor privatizable argument: " 5649 << *Obj << "!\n"); 5650 return nullptr; 5651 } 5652 5653 /// See AbstractAttribute::trackStatistics() 5654 void trackStatistics() const override { 5655 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr); 5656 } 5657 }; 5658 5659 struct AAPrivatizablePtrCallSiteArgument final 5660 : public AAPrivatizablePtrFloating { 5661 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) 5662 : AAPrivatizablePtrFloating(IRP, A) {} 5663 5664 /// See AbstractAttribute::initialize(...). 5665 void initialize(Attributor &A) override { 5666 if (getIRPosition().hasAttr(Attribute::ByVal)) 5667 indicateOptimisticFixpoint(); 5668 } 5669 5670 /// See AbstractAttribute::updateImpl(...). 5671 ChangeStatus updateImpl(Attributor &A) override { 5672 PrivatizableType = identifyPrivatizableType(A); 5673 if (!PrivatizableType.hasValue()) 5674 return ChangeStatus::UNCHANGED; 5675 if (!PrivatizableType.getValue()) 5676 return indicatePessimisticFixpoint(); 5677 5678 const IRPosition &IRP = getIRPosition(); 5679 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP); 5680 if (!NoCaptureAA.isAssumedNoCapture()) { 5681 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n"); 5682 return indicatePessimisticFixpoint(); 5683 } 5684 5685 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP); 5686 if (!NoAliasAA.isAssumedNoAlias()) { 5687 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n"); 5688 return indicatePessimisticFixpoint(); 5689 } 5690 5691 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP); 5692 if (!MemBehaviorAA.isAssumedReadOnly()) { 5693 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n"); 5694 return indicatePessimisticFixpoint(); 5695 } 5696 5697 return ChangeStatus::UNCHANGED; 5698 } 5699 5700 /// See AbstractAttribute::trackStatistics() 5701 void trackStatistics() const override { 5702 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr); 5703 } 5704 }; 5705 5706 struct AAPrivatizablePtrCallSiteReturned final 5707 : public AAPrivatizablePtrFloating { 5708 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) 5709 : AAPrivatizablePtrFloating(IRP, A) {} 5710 5711 /// See AbstractAttribute::initialize(...). 5712 void initialize(Attributor &A) override { 5713 // TODO: We can privatize more than arguments. 5714 indicatePessimisticFixpoint(); 5715 } 5716 5717 /// See AbstractAttribute::trackStatistics() 5718 void trackStatistics() const override { 5719 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr); 5720 } 5721 }; 5722 5723 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { 5724 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) 5725 : AAPrivatizablePtrFloating(IRP, A) {} 5726 5727 /// See AbstractAttribute::initialize(...). 5728 void initialize(Attributor &A) override { 5729 // TODO: We can privatize more than arguments. 5730 indicatePessimisticFixpoint(); 5731 } 5732 5733 /// See AbstractAttribute::trackStatistics() 5734 void trackStatistics() const override { 5735 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr); 5736 } 5737 }; 5738 5739 /// -------------------- Memory Behavior Attributes ---------------------------- 5740 /// Includes read-none, read-only, and write-only. 5741 /// ---------------------------------------------------------------------------- 5742 struct AAMemoryBehaviorImpl : public AAMemoryBehavior { 5743 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) 5744 : AAMemoryBehavior(IRP, A) {} 5745 5746 /// See AbstractAttribute::initialize(...). 5747 void initialize(Attributor &A) override { 5748 intersectAssumedBits(BEST_STATE); 5749 getKnownStateFromValue(getIRPosition(), getState()); 5750 IRAttribute::initialize(A); 5751 } 5752 5753 /// Return the memory behavior information encoded in the IR for \p IRP. 5754 static void getKnownStateFromValue(const IRPosition &IRP, 5755 BitIntegerState &State, 5756 bool IgnoreSubsumingPositions = false) { 5757 SmallVector<Attribute, 2> Attrs; 5758 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 5759 for (const Attribute &Attr : Attrs) { 5760 switch (Attr.getKindAsEnum()) { 5761 case Attribute::ReadNone: 5762 State.addKnownBits(NO_ACCESSES); 5763 break; 5764 case Attribute::ReadOnly: 5765 State.addKnownBits(NO_WRITES); 5766 break; 5767 case Attribute::WriteOnly: 5768 State.addKnownBits(NO_READS); 5769 break; 5770 default: 5771 llvm_unreachable("Unexpected attribute!"); 5772 } 5773 } 5774 5775 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) { 5776 if (!I->mayReadFromMemory()) 5777 State.addKnownBits(NO_READS); 5778 if (!I->mayWriteToMemory()) 5779 State.addKnownBits(NO_WRITES); 5780 } 5781 } 5782 5783 /// See AbstractAttribute::getDeducedAttributes(...). 5784 void getDeducedAttributes(LLVMContext &Ctx, 5785 SmallVectorImpl<Attribute> &Attrs) const override { 5786 assert(Attrs.size() == 0); 5787 if (isAssumedReadNone()) 5788 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 5789 else if (isAssumedReadOnly()) 5790 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly)); 5791 else if (isAssumedWriteOnly()) 5792 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly)); 5793 assert(Attrs.size() <= 1); 5794 } 5795 5796 /// See AbstractAttribute::manifest(...). 5797 ChangeStatus manifest(Attributor &A) override { 5798 if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true)) 5799 return ChangeStatus::UNCHANGED; 5800 5801 const IRPosition &IRP = getIRPosition(); 5802 5803 // Check if we would improve the existing attributes first. 5804 SmallVector<Attribute, 4> DeducedAttrs; 5805 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 5806 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 5807 return IRP.hasAttr(Attr.getKindAsEnum(), 5808 /* IgnoreSubsumingPositions */ true); 5809 })) 5810 return ChangeStatus::UNCHANGED; 5811 5812 // Clear existing attributes. 5813 IRP.removeAttrs(AttrKinds); 5814 5815 // Use the generic manifest method. 5816 return IRAttribute::manifest(A); 5817 } 5818 5819 /// See AbstractState::getAsStr(). 5820 const std::string getAsStr() const override { 5821 if (isAssumedReadNone()) 5822 return "readnone"; 5823 if (isAssumedReadOnly()) 5824 return "readonly"; 5825 if (isAssumedWriteOnly()) 5826 return "writeonly"; 5827 return "may-read/write"; 5828 } 5829 5830 /// The set of IR attributes AAMemoryBehavior deals with. 5831 static const Attribute::AttrKind AttrKinds[3]; 5832 }; 5833 5834 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { 5835 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; 5836 5837 /// Memory behavior attribute for a floating value. 5838 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { 5839 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) 5840 : AAMemoryBehaviorImpl(IRP, A) {} 5841 5842 /// See AbstractAttribute::initialize(...). 5843 void initialize(Attributor &A) override { 5844 AAMemoryBehaviorImpl::initialize(A); 5845 // Initialize the use vector with all direct uses of the associated value. 5846 for (const Use &U : getAssociatedValue().uses()) 5847 Uses.insert(&U); 5848 } 5849 5850 /// See AbstractAttribute::updateImpl(...). 5851 ChangeStatus updateImpl(Attributor &A) override; 5852 5853 /// See AbstractAttribute::trackStatistics() 5854 void trackStatistics() const override { 5855 if (isAssumedReadNone()) 5856 STATS_DECLTRACK_FLOATING_ATTR(readnone) 5857 else if (isAssumedReadOnly()) 5858 STATS_DECLTRACK_FLOATING_ATTR(readonly) 5859 else if (isAssumedWriteOnly()) 5860 STATS_DECLTRACK_FLOATING_ATTR(writeonly) 5861 } 5862 5863 private: 5864 /// Return true if users of \p UserI might access the underlying 5865 /// variable/location described by \p U and should therefore be analyzed. 5866 bool followUsersOfUseIn(Attributor &A, const Use *U, 5867 const Instruction *UserI); 5868 5869 /// Update the state according to the effect of use \p U in \p UserI. 5870 void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI); 5871 5872 protected: 5873 /// Container for (transitive) uses of the associated argument. 5874 SetVector<const Use *> Uses; 5875 }; 5876 5877 /// Memory behavior attribute for function argument. 5878 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { 5879 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) 5880 : AAMemoryBehaviorFloating(IRP, A) {} 5881 5882 /// See AbstractAttribute::initialize(...). 5883 void initialize(Attributor &A) override { 5884 intersectAssumedBits(BEST_STATE); 5885 const IRPosition &IRP = getIRPosition(); 5886 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we 5887 // can query it when we use has/getAttr. That would allow us to reuse the 5888 // initialize of the base class here. 5889 bool HasByVal = 5890 IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true); 5891 getKnownStateFromValue(IRP, getState(), 5892 /* IgnoreSubsumingPositions */ HasByVal); 5893 5894 // Initialize the use vector with all direct uses of the associated value. 5895 Argument *Arg = getAssociatedArgument(); 5896 if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) { 5897 indicatePessimisticFixpoint(); 5898 } else { 5899 // Initialize the use vector with all direct uses of the associated value. 5900 for (const Use &U : Arg->uses()) 5901 Uses.insert(&U); 5902 } 5903 } 5904 5905 ChangeStatus manifest(Attributor &A) override { 5906 // TODO: Pointer arguments are not supported on vectors of pointers yet. 5907 if (!getAssociatedValue().getType()->isPointerTy()) 5908 return ChangeStatus::UNCHANGED; 5909 5910 // TODO: From readattrs.ll: "inalloca parameters are always 5911 // considered written" 5912 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) { 5913 removeKnownBits(NO_WRITES); 5914 removeAssumedBits(NO_WRITES); 5915 } 5916 return AAMemoryBehaviorFloating::manifest(A); 5917 } 5918 5919 /// See AbstractAttribute::trackStatistics() 5920 void trackStatistics() const override { 5921 if (isAssumedReadNone()) 5922 STATS_DECLTRACK_ARG_ATTR(readnone) 5923 else if (isAssumedReadOnly()) 5924 STATS_DECLTRACK_ARG_ATTR(readonly) 5925 else if (isAssumedWriteOnly()) 5926 STATS_DECLTRACK_ARG_ATTR(writeonly) 5927 } 5928 }; 5929 5930 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { 5931 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) 5932 : AAMemoryBehaviorArgument(IRP, A) {} 5933 5934 /// See AbstractAttribute::initialize(...). 5935 void initialize(Attributor &A) override { 5936 if (Argument *Arg = getAssociatedArgument()) { 5937 if (Arg->hasByValAttr()) { 5938 addKnownBits(NO_WRITES); 5939 removeKnownBits(NO_READS); 5940 removeAssumedBits(NO_READS); 5941 } 5942 } 5943 AAMemoryBehaviorArgument::initialize(A); 5944 } 5945 5946 /// See AbstractAttribute::updateImpl(...). 5947 ChangeStatus updateImpl(Attributor &A) override { 5948 // TODO: Once we have call site specific value information we can provide 5949 // call site specific liveness liveness information and then it makes 5950 // sense to specialize attributes for call sites arguments instead of 5951 // redirecting requests to the callee argument. 5952 Argument *Arg = getAssociatedArgument(); 5953 const IRPosition &ArgPos = IRPosition::argument(*Arg); 5954 auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos); 5955 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 5956 } 5957 5958 /// See AbstractAttribute::trackStatistics() 5959 void trackStatistics() const override { 5960 if (isAssumedReadNone()) 5961 STATS_DECLTRACK_CSARG_ATTR(readnone) 5962 else if (isAssumedReadOnly()) 5963 STATS_DECLTRACK_CSARG_ATTR(readonly) 5964 else if (isAssumedWriteOnly()) 5965 STATS_DECLTRACK_CSARG_ATTR(writeonly) 5966 } 5967 }; 5968 5969 /// Memory behavior attribute for a call site return position. 5970 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { 5971 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) 5972 : AAMemoryBehaviorFloating(IRP, A) {} 5973 5974 /// See AbstractAttribute::manifest(...). 5975 ChangeStatus manifest(Attributor &A) override { 5976 // We do not annotate returned values. 5977 return ChangeStatus::UNCHANGED; 5978 } 5979 5980 /// See AbstractAttribute::trackStatistics() 5981 void trackStatistics() const override {} 5982 }; 5983 5984 /// An AA to represent the memory behavior function attributes. 5985 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { 5986 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) 5987 : AAMemoryBehaviorImpl(IRP, A) {} 5988 5989 /// See AbstractAttribute::updateImpl(Attributor &A). 5990 virtual ChangeStatus updateImpl(Attributor &A) override; 5991 5992 /// See AbstractAttribute::manifest(...). 5993 ChangeStatus manifest(Attributor &A) override { 5994 Function &F = cast<Function>(getAnchorValue()); 5995 if (isAssumedReadNone()) { 5996 F.removeFnAttr(Attribute::ArgMemOnly); 5997 F.removeFnAttr(Attribute::InaccessibleMemOnly); 5998 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); 5999 } 6000 return AAMemoryBehaviorImpl::manifest(A); 6001 } 6002 6003 /// See AbstractAttribute::trackStatistics() 6004 void trackStatistics() const override { 6005 if (isAssumedReadNone()) 6006 STATS_DECLTRACK_FN_ATTR(readnone) 6007 else if (isAssumedReadOnly()) 6008 STATS_DECLTRACK_FN_ATTR(readonly) 6009 else if (isAssumedWriteOnly()) 6010 STATS_DECLTRACK_FN_ATTR(writeonly) 6011 } 6012 }; 6013 6014 /// AAMemoryBehavior attribute for call sites. 6015 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl { 6016 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) 6017 : AAMemoryBehaviorImpl(IRP, A) {} 6018 6019 /// See AbstractAttribute::initialize(...). 6020 void initialize(Attributor &A) override { 6021 AAMemoryBehaviorImpl::initialize(A); 6022 Function *F = getAssociatedFunction(); 6023 if (!F || !A.isFunctionIPOAmendable(*F)) { 6024 indicatePessimisticFixpoint(); 6025 return; 6026 } 6027 } 6028 6029 /// See AbstractAttribute::updateImpl(...). 6030 ChangeStatus updateImpl(Attributor &A) override { 6031 // TODO: Once we have call site specific value information we can provide 6032 // call site specific liveness liveness information and then it makes 6033 // sense to specialize attributes for call sites arguments instead of 6034 // redirecting requests to the callee argument. 6035 Function *F = getAssociatedFunction(); 6036 const IRPosition &FnPos = IRPosition::function(*F); 6037 auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos); 6038 return clampStateAndIndicateChange(getState(), FnAA.getState()); 6039 } 6040 6041 /// See AbstractAttribute::trackStatistics() 6042 void trackStatistics() const override { 6043 if (isAssumedReadNone()) 6044 STATS_DECLTRACK_CS_ATTR(readnone) 6045 else if (isAssumedReadOnly()) 6046 STATS_DECLTRACK_CS_ATTR(readonly) 6047 else if (isAssumedWriteOnly()) 6048 STATS_DECLTRACK_CS_ATTR(writeonly) 6049 } 6050 }; 6051 6052 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { 6053 6054 // The current assumed state used to determine a change. 6055 auto AssumedState = getAssumed(); 6056 6057 auto CheckRWInst = [&](Instruction &I) { 6058 // If the instruction has an own memory behavior state, use it to restrict 6059 // the local state. No further analysis is required as the other memory 6060 // state is as optimistic as it gets. 6061 if (const auto *CB = dyn_cast<CallBase>(&I)) { 6062 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 6063 *this, IRPosition::callsite_function(*CB)); 6064 intersectAssumedBits(MemBehaviorAA.getAssumed()); 6065 return !isAtFixpoint(); 6066 } 6067 6068 // Remove access kind modifiers if necessary. 6069 if (I.mayReadFromMemory()) 6070 removeAssumedBits(NO_READS); 6071 if (I.mayWriteToMemory()) 6072 removeAssumedBits(NO_WRITES); 6073 return !isAtFixpoint(); 6074 }; 6075 6076 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this)) 6077 return indicatePessimisticFixpoint(); 6078 6079 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 6080 : ChangeStatus::UNCHANGED; 6081 } 6082 6083 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { 6084 6085 const IRPosition &IRP = getIRPosition(); 6086 const IRPosition &FnPos = IRPosition::function_scope(IRP); 6087 AAMemoryBehavior::StateType &S = getState(); 6088 6089 // First, check the function scope. We take the known information and we avoid 6090 // work if the assumed information implies the current assumed information for 6091 // this attribute. This is a valid for all but byval arguments. 6092 Argument *Arg = IRP.getAssociatedArgument(); 6093 AAMemoryBehavior::base_t FnMemAssumedState = 6094 AAMemoryBehavior::StateType::getWorstState(); 6095 if (!Arg || !Arg->hasByValAttr()) { 6096 const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>( 6097 *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL); 6098 FnMemAssumedState = FnMemAA.getAssumed(); 6099 S.addKnownBits(FnMemAA.getKnown()); 6100 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed()) 6101 return ChangeStatus::UNCHANGED; 6102 } 6103 6104 // Make sure the value is not captured (except through "return"), if 6105 // it is, any information derived would be irrelevant anyway as we cannot 6106 // check the potential aliases introduced by the capture. However, no need 6107 // to fall back to anythign less optimistic than the function state. 6108 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 6109 *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL); 6110 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 6111 S.intersectAssumedBits(FnMemAssumedState); 6112 return ChangeStatus::CHANGED; 6113 } 6114 6115 // The current assumed state used to determine a change. 6116 auto AssumedState = S.getAssumed(); 6117 6118 // Liveness information to exclude dead users. 6119 // TODO: Take the FnPos once we have call site specific liveness information. 6120 const auto &LivenessAA = A.getAAFor<AAIsDead>( 6121 *this, IRPosition::function(*IRP.getAssociatedFunction()), 6122 /* TrackDependence */ false); 6123 6124 // Visit and expand uses until all are analyzed or a fixpoint is reached. 6125 for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) { 6126 const Use *U = Uses[i]; 6127 Instruction *UserI = cast<Instruction>(U->getUser()); 6128 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI 6129 << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA)) 6130 << "]\n"); 6131 if (A.isAssumedDead(*U, this, &LivenessAA)) 6132 continue; 6133 6134 // Droppable users, e.g., llvm::assume does not actually perform any action. 6135 if (UserI->isDroppable()) 6136 continue; 6137 6138 // Check if the users of UserI should also be visited. 6139 if (followUsersOfUseIn(A, U, UserI)) 6140 for (const Use &UserIUse : UserI->uses()) 6141 Uses.insert(&UserIUse); 6142 6143 // If UserI might touch memory we analyze the use in detail. 6144 if (UserI->mayReadOrWriteMemory()) 6145 analyzeUseIn(A, U, UserI); 6146 } 6147 6148 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 6149 : ChangeStatus::UNCHANGED; 6150 } 6151 6152 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U, 6153 const Instruction *UserI) { 6154 // The loaded value is unrelated to the pointer argument, no need to 6155 // follow the users of the load. 6156 if (isa<LoadInst>(UserI)) 6157 return false; 6158 6159 // By default we follow all uses assuming UserI might leak information on U, 6160 // we have special handling for call sites operands though. 6161 const auto *CB = dyn_cast<CallBase>(UserI); 6162 if (!CB || !CB->isArgOperand(U)) 6163 return true; 6164 6165 // If the use is a call argument known not to be captured, the users of 6166 // the call do not need to be visited because they have to be unrelated to 6167 // the input. Note that this check is not trivial even though we disallow 6168 // general capturing of the underlying argument. The reason is that the 6169 // call might the argument "through return", which we allow and for which we 6170 // need to check call users. 6171 if (U->get()->getType()->isPointerTy()) { 6172 unsigned ArgNo = CB->getArgOperandNo(U); 6173 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 6174 *this, IRPosition::callsite_argument(*CB, ArgNo), 6175 /* TrackDependence */ true, DepClassTy::OPTIONAL); 6176 return !ArgNoCaptureAA.isAssumedNoCapture(); 6177 } 6178 6179 return true; 6180 } 6181 6182 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U, 6183 const Instruction *UserI) { 6184 assert(UserI->mayReadOrWriteMemory()); 6185 6186 switch (UserI->getOpcode()) { 6187 default: 6188 // TODO: Handle all atomics and other side-effect operations we know of. 6189 break; 6190 case Instruction::Load: 6191 // Loads cause the NO_READS property to disappear. 6192 removeAssumedBits(NO_READS); 6193 return; 6194 6195 case Instruction::Store: 6196 // Stores cause the NO_WRITES property to disappear if the use is the 6197 // pointer operand. Note that we do assume that capturing was taken care of 6198 // somewhere else. 6199 if (cast<StoreInst>(UserI)->getPointerOperand() == U->get()) 6200 removeAssumedBits(NO_WRITES); 6201 return; 6202 6203 case Instruction::Call: 6204 case Instruction::CallBr: 6205 case Instruction::Invoke: { 6206 // For call sites we look at the argument memory behavior attribute (this 6207 // could be recursive!) in order to restrict our own state. 6208 const auto *CB = cast<CallBase>(UserI); 6209 6210 // Give up on operand bundles. 6211 if (CB->isBundleOperand(U)) { 6212 indicatePessimisticFixpoint(); 6213 return; 6214 } 6215 6216 // Calling a function does read the function pointer, maybe write it if the 6217 // function is self-modifying. 6218 if (CB->isCallee(U)) { 6219 removeAssumedBits(NO_READS); 6220 break; 6221 } 6222 6223 // Adjust the possible access behavior based on the information on the 6224 // argument. 6225 IRPosition Pos; 6226 if (U->get()->getType()->isPointerTy()) 6227 Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U)); 6228 else 6229 Pos = IRPosition::callsite_function(*CB); 6230 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 6231 *this, Pos, 6232 /* TrackDependence */ true, DepClassTy::OPTIONAL); 6233 // "assumed" has at most the same bits as the MemBehaviorAA assumed 6234 // and at least "known". 6235 intersectAssumedBits(MemBehaviorAA.getAssumed()); 6236 return; 6237 } 6238 }; 6239 6240 // Generally, look at the "may-properties" and adjust the assumed state if we 6241 // did not trigger special handling before. 6242 if (UserI->mayReadFromMemory()) 6243 removeAssumedBits(NO_READS); 6244 if (UserI->mayWriteToMemory()) 6245 removeAssumedBits(NO_WRITES); 6246 } 6247 6248 } // namespace 6249 6250 /// -------------------- Memory Locations Attributes --------------------------- 6251 /// Includes read-none, argmemonly, inaccessiblememonly, 6252 /// inaccessiblememorargmemonly 6253 /// ---------------------------------------------------------------------------- 6254 6255 std::string AAMemoryLocation::getMemoryLocationsAsStr( 6256 AAMemoryLocation::MemoryLocationsKind MLK) { 6257 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) 6258 return "all memory"; 6259 if (MLK == AAMemoryLocation::NO_LOCATIONS) 6260 return "no memory"; 6261 std::string S = "memory:"; 6262 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) 6263 S += "stack,"; 6264 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) 6265 S += "constant,"; 6266 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) 6267 S += "internal global,"; 6268 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) 6269 S += "external global,"; 6270 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) 6271 S += "argument,"; 6272 if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) 6273 S += "inaccessible,"; 6274 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) 6275 S += "malloced,"; 6276 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) 6277 S += "unknown,"; 6278 S.pop_back(); 6279 return S; 6280 } 6281 6282 namespace { 6283 struct AAMemoryLocationImpl : public AAMemoryLocation { 6284 6285 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) 6286 : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { 6287 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 6288 AccessKind2Accesses[u] = nullptr; 6289 } 6290 6291 ~AAMemoryLocationImpl() { 6292 // The AccessSets are allocated via a BumpPtrAllocator, we call 6293 // the destructor manually. 6294 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 6295 if (AccessKind2Accesses[u]) 6296 AccessKind2Accesses[u]->~AccessSet(); 6297 } 6298 6299 /// See AbstractAttribute::initialize(...). 6300 void initialize(Attributor &A) override { 6301 intersectAssumedBits(BEST_STATE); 6302 getKnownStateFromValue(A, getIRPosition(), getState()); 6303 IRAttribute::initialize(A); 6304 } 6305 6306 /// Return the memory behavior information encoded in the IR for \p IRP. 6307 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, 6308 BitIntegerState &State, 6309 bool IgnoreSubsumingPositions = false) { 6310 // For internal functions we ignore `argmemonly` and 6311 // `inaccessiblememorargmemonly` as we might break it via interprocedural 6312 // constant propagation. It is unclear if this is the best way but it is 6313 // unlikely this will cause real performance problems. If we are deriving 6314 // attributes for the anchor function we even remove the attribute in 6315 // addition to ignoring it. 6316 bool UseArgMemOnly = true; 6317 Function *AnchorFn = IRP.getAnchorScope(); 6318 if (AnchorFn && A.isRunOn(*AnchorFn)) 6319 UseArgMemOnly = !AnchorFn->hasLocalLinkage(); 6320 6321 SmallVector<Attribute, 2> Attrs; 6322 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 6323 for (const Attribute &Attr : Attrs) { 6324 switch (Attr.getKindAsEnum()) { 6325 case Attribute::ReadNone: 6326 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM); 6327 break; 6328 case Attribute::InaccessibleMemOnly: 6329 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 6330 break; 6331 case Attribute::ArgMemOnly: 6332 if (UseArgMemOnly) 6333 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true)); 6334 else 6335 IRP.removeAttrs({Attribute::ArgMemOnly}); 6336 break; 6337 case Attribute::InaccessibleMemOrArgMemOnly: 6338 if (UseArgMemOnly) 6339 State.addKnownBits(inverseLocation( 6340 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 6341 else 6342 IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly}); 6343 break; 6344 default: 6345 llvm_unreachable("Unexpected attribute!"); 6346 } 6347 } 6348 } 6349 6350 /// See AbstractAttribute::getDeducedAttributes(...). 6351 void getDeducedAttributes(LLVMContext &Ctx, 6352 SmallVectorImpl<Attribute> &Attrs) const override { 6353 assert(Attrs.size() == 0); 6354 if (isAssumedReadNone()) { 6355 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 6356 } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { 6357 if (isAssumedInaccessibleMemOnly()) 6358 Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly)); 6359 else if (isAssumedArgMemOnly()) 6360 Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly)); 6361 else if (isAssumedInaccessibleOrArgMemOnly()) 6362 Attrs.push_back( 6363 Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly)); 6364 } 6365 assert(Attrs.size() <= 1); 6366 } 6367 6368 /// See AbstractAttribute::manifest(...). 6369 ChangeStatus manifest(Attributor &A) override { 6370 const IRPosition &IRP = getIRPosition(); 6371 6372 // Check if we would improve the existing attributes first. 6373 SmallVector<Attribute, 4> DeducedAttrs; 6374 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 6375 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 6376 return IRP.hasAttr(Attr.getKindAsEnum(), 6377 /* IgnoreSubsumingPositions */ true); 6378 })) 6379 return ChangeStatus::UNCHANGED; 6380 6381 // Clear existing attributes. 6382 IRP.removeAttrs(AttrKinds); 6383 if (isAssumedReadNone()) 6384 IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds); 6385 6386 // Use the generic manifest method. 6387 return IRAttribute::manifest(A); 6388 } 6389 6390 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). 6391 bool checkForAllAccessesToMemoryKind( 6392 function_ref<bool(const Instruction *, const Value *, AccessKind, 6393 MemoryLocationsKind)> 6394 Pred, 6395 MemoryLocationsKind RequestedMLK) const override { 6396 if (!isValidState()) 6397 return false; 6398 6399 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); 6400 if (AssumedMLK == NO_LOCATIONS) 6401 return true; 6402 6403 unsigned Idx = 0; 6404 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; 6405 CurMLK *= 2, ++Idx) { 6406 if (CurMLK & RequestedMLK) 6407 continue; 6408 6409 if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) 6410 for (const AccessInfo &AI : *Accesses) 6411 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) 6412 return false; 6413 } 6414 6415 return true; 6416 } 6417 6418 ChangeStatus indicatePessimisticFixpoint() override { 6419 // If we give up and indicate a pessimistic fixpoint this instruction will 6420 // become an access for all potential access kinds: 6421 // TODO: Add pointers for argmemonly and globals to improve the results of 6422 // checkForAllAccessesToMemoryKind. 6423 bool Changed = false; 6424 MemoryLocationsKind KnownMLK = getKnown(); 6425 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 6426 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) 6427 if (!(CurMLK & KnownMLK)) 6428 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed, 6429 getAccessKindFromInst(I)); 6430 return AAMemoryLocation::indicatePessimisticFixpoint(); 6431 } 6432 6433 protected: 6434 /// Helper struct to tie together an instruction that has a read or write 6435 /// effect with the pointer it accesses (if any). 6436 struct AccessInfo { 6437 6438 /// The instruction that caused the access. 6439 const Instruction *I; 6440 6441 /// The base pointer that is accessed, or null if unknown. 6442 const Value *Ptr; 6443 6444 /// The kind of access (read/write/read+write). 6445 AccessKind Kind; 6446 6447 bool operator==(const AccessInfo &RHS) const { 6448 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; 6449 } 6450 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { 6451 if (LHS.I != RHS.I) 6452 return LHS.I < RHS.I; 6453 if (LHS.Ptr != RHS.Ptr) 6454 return LHS.Ptr < RHS.Ptr; 6455 if (LHS.Kind != RHS.Kind) 6456 return LHS.Kind < RHS.Kind; 6457 return false; 6458 } 6459 }; 6460 6461 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the 6462 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. 6463 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; 6464 AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()]; 6465 6466 /// Categorize the pointer arguments of CB that might access memory in 6467 /// AccessedLoc and update the state and access map accordingly. 6468 void 6469 categorizeArgumentPointerLocations(Attributor &A, CallBase &CB, 6470 AAMemoryLocation::StateType &AccessedLocs, 6471 bool &Changed); 6472 6473 /// Return the kind(s) of location that may be accessed by \p V. 6474 AAMemoryLocation::MemoryLocationsKind 6475 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); 6476 6477 /// Return the access kind as determined by \p I. 6478 AccessKind getAccessKindFromInst(const Instruction *I) { 6479 AccessKind AK = READ_WRITE; 6480 if (I) { 6481 AK = I->mayReadFromMemory() ? READ : NONE; 6482 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); 6483 } 6484 return AK; 6485 } 6486 6487 /// Update the state \p State and the AccessKind2Accesses given that \p I is 6488 /// an access of kind \p AK to a \p MLK memory location with the access 6489 /// pointer \p Ptr. 6490 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, 6491 MemoryLocationsKind MLK, const Instruction *I, 6492 const Value *Ptr, bool &Changed, 6493 AccessKind AK = READ_WRITE) { 6494 6495 assert(isPowerOf2_32(MLK) && "Expected a single location set!"); 6496 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)]; 6497 if (!Accesses) 6498 Accesses = new (Allocator) AccessSet(); 6499 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second; 6500 State.removeAssumedBits(MLK); 6501 } 6502 6503 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or 6504 /// arguments, and update the state and access map accordingly. 6505 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, 6506 AAMemoryLocation::StateType &State, bool &Changed); 6507 6508 /// Used to allocate access sets. 6509 BumpPtrAllocator &Allocator; 6510 6511 /// The set of IR attributes AAMemoryLocation deals with. 6512 static const Attribute::AttrKind AttrKinds[4]; 6513 }; 6514 6515 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = { 6516 Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly, 6517 Attribute::InaccessibleMemOrArgMemOnly}; 6518 6519 void AAMemoryLocationImpl::categorizePtrValue( 6520 Attributor &A, const Instruction &I, const Value &Ptr, 6521 AAMemoryLocation::StateType &State, bool &Changed) { 6522 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for " 6523 << Ptr << " [" 6524 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n"); 6525 6526 auto StripGEPCB = [](Value *V) -> Value * { 6527 auto *GEP = dyn_cast<GEPOperator>(V); 6528 while (GEP) { 6529 V = GEP->getPointerOperand(); 6530 GEP = dyn_cast<GEPOperator>(V); 6531 } 6532 return V; 6533 }; 6534 6535 auto VisitValueCB = [&](Value &V, const Instruction *, 6536 AAMemoryLocation::StateType &T, 6537 bool Stripped) -> bool { 6538 MemoryLocationsKind MLK = NO_LOCATIONS; 6539 assert(!isa<GEPOperator>(V) && "GEPs should have been stripped."); 6540 if (isa<UndefValue>(V)) 6541 return true; 6542 if (auto *Arg = dyn_cast<Argument>(&V)) { 6543 if (Arg->hasByValAttr()) 6544 MLK = NO_LOCAL_MEM; 6545 else 6546 MLK = NO_ARGUMENT_MEM; 6547 } else if (auto *GV = dyn_cast<GlobalValue>(&V)) { 6548 if (GV->hasLocalLinkage()) 6549 MLK = NO_GLOBAL_INTERNAL_MEM; 6550 else 6551 MLK = NO_GLOBAL_EXTERNAL_MEM; 6552 } else if (isa<ConstantPointerNull>(V) && 6553 !NullPointerIsDefined(getAssociatedFunction(), 6554 V.getType()->getPointerAddressSpace())) { 6555 return true; 6556 } else if (isa<AllocaInst>(V)) { 6557 MLK = NO_LOCAL_MEM; 6558 } else if (const auto *CB = dyn_cast<CallBase>(&V)) { 6559 const auto &NoAliasAA = 6560 A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB)); 6561 if (NoAliasAA.isAssumedNoAlias()) 6562 MLK = NO_MALLOCED_MEM; 6563 else 6564 MLK = NO_UNKOWN_MEM; 6565 } else { 6566 MLK = NO_UNKOWN_MEM; 6567 } 6568 6569 assert(MLK != NO_LOCATIONS && "No location specified!"); 6570 updateStateAndAccessesMap(T, MLK, &I, &V, Changed, 6571 getAccessKindFromInst(&I)); 6572 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: " 6573 << V << " -> " << getMemoryLocationsAsStr(T.getAssumed()) 6574 << "\n"); 6575 return true; 6576 }; 6577 6578 if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>( 6579 A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(), 6580 /* UseValueSimplify */ true, 6581 /* MaxValues */ 32, StripGEPCB)) { 6582 LLVM_DEBUG( 6583 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"); 6584 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed, 6585 getAccessKindFromInst(&I)); 6586 } else { 6587 LLVM_DEBUG( 6588 dbgs() 6589 << "[AAMemoryLocation] Accessed locations with pointer locations: " 6590 << getMemoryLocationsAsStr(State.getAssumed()) << "\n"); 6591 } 6592 } 6593 6594 void AAMemoryLocationImpl::categorizeArgumentPointerLocations( 6595 Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs, 6596 bool &Changed) { 6597 for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) { 6598 6599 // Skip non-pointer arguments. 6600 const Value *ArgOp = CB.getArgOperand(ArgNo); 6601 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 6602 continue; 6603 6604 // Skip readnone arguments. 6605 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo); 6606 const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>( 6607 *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL); 6608 6609 if (ArgOpMemLocationAA.isAssumedReadNone()) 6610 continue; 6611 6612 // Categorize potentially accessed pointer arguments as if there was an 6613 // access instruction with them as pointer. 6614 categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed); 6615 } 6616 } 6617 6618 AAMemoryLocation::MemoryLocationsKind 6619 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, 6620 bool &Changed) { 6621 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for " 6622 << I << "\n"); 6623 6624 AAMemoryLocation::StateType AccessedLocs; 6625 AccessedLocs.intersectAssumedBits(NO_LOCATIONS); 6626 6627 if (auto *CB = dyn_cast<CallBase>(&I)) { 6628 6629 // First check if we assume any memory is access is visible. 6630 const auto &CBMemLocationAA = 6631 A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB)); 6632 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I 6633 << " [" << CBMemLocationAA << "]\n"); 6634 6635 if (CBMemLocationAA.isAssumedReadNone()) 6636 return NO_LOCATIONS; 6637 6638 if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) { 6639 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr, 6640 Changed, getAccessKindFromInst(&I)); 6641 return AccessedLocs.getAssumed(); 6642 } 6643 6644 uint32_t CBAssumedNotAccessedLocs = 6645 CBMemLocationAA.getAssumedNotAccessedLocation(); 6646 6647 // Set the argmemonly and global bit as we handle them separately below. 6648 uint32_t CBAssumedNotAccessedLocsNoArgMem = 6649 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; 6650 6651 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { 6652 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) 6653 continue; 6654 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed, 6655 getAccessKindFromInst(&I)); 6656 } 6657 6658 // Now handle global memory if it might be accessed. This is slightly tricky 6659 // as NO_GLOBAL_MEM has multiple bits set. 6660 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); 6661 if (HasGlobalAccesses) { 6662 auto AccessPred = [&](const Instruction *, const Value *Ptr, 6663 AccessKind Kind, MemoryLocationsKind MLK) { 6664 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed, 6665 getAccessKindFromInst(&I)); 6666 return true; 6667 }; 6668 if (!CBMemLocationAA.checkForAllAccessesToMemoryKind( 6669 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false))) 6670 return AccessedLocs.getWorstState(); 6671 } 6672 6673 LLVM_DEBUG( 6674 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " 6675 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 6676 6677 // Now handle argument memory if it might be accessed. 6678 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); 6679 if (HasArgAccesses) 6680 categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed); 6681 6682 LLVM_DEBUG( 6683 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " 6684 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 6685 6686 return AccessedLocs.getAssumed(); 6687 } 6688 6689 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) { 6690 LLVM_DEBUG( 6691 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " 6692 << I << " [" << *Ptr << "]\n"); 6693 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed); 6694 return AccessedLocs.getAssumed(); 6695 } 6696 6697 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " 6698 << I << "\n"); 6699 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed, 6700 getAccessKindFromInst(&I)); 6701 return AccessedLocs.getAssumed(); 6702 } 6703 6704 /// An AA to represent the memory behavior function attributes. 6705 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { 6706 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) 6707 : AAMemoryLocationImpl(IRP, A) {} 6708 6709 /// See AbstractAttribute::updateImpl(Attributor &A). 6710 virtual ChangeStatus updateImpl(Attributor &A) override { 6711 6712 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 6713 *this, getIRPosition(), /* TrackDependence */ false); 6714 if (MemBehaviorAA.isAssumedReadNone()) { 6715 if (MemBehaviorAA.isKnownReadNone()) 6716 return indicateOptimisticFixpoint(); 6717 assert(isAssumedReadNone() && 6718 "AAMemoryLocation was not read-none but AAMemoryBehavior was!"); 6719 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 6720 return ChangeStatus::UNCHANGED; 6721 } 6722 6723 // The current assumed state used to determine a change. 6724 auto AssumedState = getAssumed(); 6725 bool Changed = false; 6726 6727 auto CheckRWInst = [&](Instruction &I) { 6728 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); 6729 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I 6730 << ": " << getMemoryLocationsAsStr(MLK) << "\n"); 6731 removeAssumedBits(inverseLocation(MLK, false, false)); 6732 // Stop once only the valid bit set in the *not assumed location*, thus 6733 // once we don't actually exclude any memory locations in the state. 6734 return getAssumedNotAccessedLocation() != VALID_STATE; 6735 }; 6736 6737 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this)) 6738 return indicatePessimisticFixpoint(); 6739 6740 Changed |= AssumedState != getAssumed(); 6741 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 6742 } 6743 6744 /// See AbstractAttribute::trackStatistics() 6745 void trackStatistics() const override { 6746 if (isAssumedReadNone()) 6747 STATS_DECLTRACK_FN_ATTR(readnone) 6748 else if (isAssumedArgMemOnly()) 6749 STATS_DECLTRACK_FN_ATTR(argmemonly) 6750 else if (isAssumedInaccessibleMemOnly()) 6751 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly) 6752 else if (isAssumedInaccessibleOrArgMemOnly()) 6753 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly) 6754 } 6755 }; 6756 6757 /// AAMemoryLocation attribute for call sites. 6758 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { 6759 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) 6760 : AAMemoryLocationImpl(IRP, A) {} 6761 6762 /// See AbstractAttribute::initialize(...). 6763 void initialize(Attributor &A) override { 6764 AAMemoryLocationImpl::initialize(A); 6765 Function *F = getAssociatedFunction(); 6766 if (!F || !A.isFunctionIPOAmendable(*F)) { 6767 indicatePessimisticFixpoint(); 6768 return; 6769 } 6770 } 6771 6772 /// See AbstractAttribute::updateImpl(...). 6773 ChangeStatus updateImpl(Attributor &A) override { 6774 // TODO: Once we have call site specific value information we can provide 6775 // call site specific liveness liveness information and then it makes 6776 // sense to specialize attributes for call sites arguments instead of 6777 // redirecting requests to the callee argument. 6778 Function *F = getAssociatedFunction(); 6779 const IRPosition &FnPos = IRPosition::function(*F); 6780 auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos); 6781 bool Changed = false; 6782 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 6783 AccessKind Kind, MemoryLocationsKind MLK) { 6784 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed, 6785 getAccessKindFromInst(I)); 6786 return true; 6787 }; 6788 if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS)) 6789 return indicatePessimisticFixpoint(); 6790 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 6791 } 6792 6793 /// See AbstractAttribute::trackStatistics() 6794 void trackStatistics() const override { 6795 if (isAssumedReadNone()) 6796 STATS_DECLTRACK_CS_ATTR(readnone) 6797 } 6798 }; 6799 6800 /// ------------------ Value Constant Range Attribute ------------------------- 6801 6802 struct AAValueConstantRangeImpl : AAValueConstantRange { 6803 using StateType = IntegerRangeState; 6804 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) 6805 : AAValueConstantRange(IRP, A) {} 6806 6807 /// See AbstractAttribute::getAsStr(). 6808 const std::string getAsStr() const override { 6809 std::string Str; 6810 llvm::raw_string_ostream OS(Str); 6811 OS << "range(" << getBitWidth() << ")<"; 6812 getKnown().print(OS); 6813 OS << " / "; 6814 getAssumed().print(OS); 6815 OS << ">"; 6816 return OS.str(); 6817 } 6818 6819 /// Helper function to get a SCEV expr for the associated value at program 6820 /// point \p I. 6821 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { 6822 if (!getAnchorScope()) 6823 return nullptr; 6824 6825 ScalarEvolution *SE = 6826 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 6827 *getAnchorScope()); 6828 6829 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( 6830 *getAnchorScope()); 6831 6832 if (!SE || !LI) 6833 return nullptr; 6834 6835 const SCEV *S = SE->getSCEV(&getAssociatedValue()); 6836 if (!I) 6837 return S; 6838 6839 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent())); 6840 } 6841 6842 /// Helper function to get a range from SCEV for the associated value at 6843 /// program point \p I. 6844 ConstantRange getConstantRangeFromSCEV(Attributor &A, 6845 const Instruction *I = nullptr) const { 6846 if (!getAnchorScope()) 6847 return getWorstState(getBitWidth()); 6848 6849 ScalarEvolution *SE = 6850 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 6851 *getAnchorScope()); 6852 6853 const SCEV *S = getSCEV(A, I); 6854 if (!SE || !S) 6855 return getWorstState(getBitWidth()); 6856 6857 return SE->getUnsignedRange(S); 6858 } 6859 6860 /// Helper function to get a range from LVI for the associated value at 6861 /// program point \p I. 6862 ConstantRange 6863 getConstantRangeFromLVI(Attributor &A, 6864 const Instruction *CtxI = nullptr) const { 6865 if (!getAnchorScope()) 6866 return getWorstState(getBitWidth()); 6867 6868 LazyValueInfo *LVI = 6869 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( 6870 *getAnchorScope()); 6871 6872 if (!LVI || !CtxI) 6873 return getWorstState(getBitWidth()); 6874 return LVI->getConstantRange(&getAssociatedValue(), 6875 const_cast<BasicBlock *>(CtxI->getParent()), 6876 const_cast<Instruction *>(CtxI)); 6877 } 6878 6879 /// See AAValueConstantRange::getKnownConstantRange(..). 6880 ConstantRange 6881 getKnownConstantRange(Attributor &A, 6882 const Instruction *CtxI = nullptr) const override { 6883 if (!CtxI || CtxI == getCtxI()) 6884 return getKnown(); 6885 6886 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 6887 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 6888 return getKnown().intersectWith(SCEVR).intersectWith(LVIR); 6889 } 6890 6891 /// See AAValueConstantRange::getAssumedConstantRange(..). 6892 ConstantRange 6893 getAssumedConstantRange(Attributor &A, 6894 const Instruction *CtxI = nullptr) const override { 6895 // TODO: Make SCEV use Attributor assumption. 6896 // We may be able to bound a variable range via assumptions in 6897 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to 6898 // evolve to x^2 + x, then we can say that y is in [2, 12]. 6899 6900 if (!CtxI || CtxI == getCtxI()) 6901 return getAssumed(); 6902 6903 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 6904 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 6905 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR); 6906 } 6907 6908 /// See AbstractAttribute::initialize(..). 6909 void initialize(Attributor &A) override { 6910 // Intersect a range given by SCEV. 6911 intersectKnown(getConstantRangeFromSCEV(A, getCtxI())); 6912 6913 // Intersect a range given by LVI. 6914 intersectKnown(getConstantRangeFromLVI(A, getCtxI())); 6915 } 6916 6917 /// Helper function to create MDNode for range metadata. 6918 static MDNode * 6919 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, 6920 const ConstantRange &AssumedConstantRange) { 6921 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get( 6922 Ty, AssumedConstantRange.getLower())), 6923 ConstantAsMetadata::get(ConstantInt::get( 6924 Ty, AssumedConstantRange.getUpper()))}; 6925 return MDNode::get(Ctx, LowAndHigh); 6926 } 6927 6928 /// Return true if \p Assumed is included in \p KnownRanges. 6929 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { 6930 6931 if (Assumed.isFullSet()) 6932 return false; 6933 6934 if (!KnownRanges) 6935 return true; 6936 6937 // If multiple ranges are annotated in IR, we give up to annotate assumed 6938 // range for now. 6939 6940 // TODO: If there exists a known range which containts assumed range, we 6941 // can say assumed range is better. 6942 if (KnownRanges->getNumOperands() > 2) 6943 return false; 6944 6945 ConstantInt *Lower = 6946 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0)); 6947 ConstantInt *Upper = 6948 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1)); 6949 6950 ConstantRange Known(Lower->getValue(), Upper->getValue()); 6951 return Known.contains(Assumed) && Known != Assumed; 6952 } 6953 6954 /// Helper function to set range metadata. 6955 static bool 6956 setRangeMetadataIfisBetterRange(Instruction *I, 6957 const ConstantRange &AssumedConstantRange) { 6958 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range); 6959 if (isBetterRange(AssumedConstantRange, OldRangeMD)) { 6960 if (!AssumedConstantRange.isEmptySet()) { 6961 I->setMetadata(LLVMContext::MD_range, 6962 getMDNodeForConstantRange(I->getType(), I->getContext(), 6963 AssumedConstantRange)); 6964 return true; 6965 } 6966 } 6967 return false; 6968 } 6969 6970 /// See AbstractAttribute::manifest() 6971 ChangeStatus manifest(Attributor &A) override { 6972 ChangeStatus Changed = ChangeStatus::UNCHANGED; 6973 ConstantRange AssumedConstantRange = getAssumedConstantRange(A); 6974 assert(!AssumedConstantRange.isFullSet() && "Invalid state"); 6975 6976 auto &V = getAssociatedValue(); 6977 if (!AssumedConstantRange.isEmptySet() && 6978 !AssumedConstantRange.isSingleElement()) { 6979 if (Instruction *I = dyn_cast<Instruction>(&V)) 6980 if (isa<CallInst>(I) || isa<LoadInst>(I)) 6981 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) 6982 Changed = ChangeStatus::CHANGED; 6983 } 6984 6985 return Changed; 6986 } 6987 }; 6988 6989 struct AAValueConstantRangeArgument final 6990 : AAArgumentFromCallSiteArguments< 6991 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> { 6992 using Base = AAArgumentFromCallSiteArguments< 6993 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>; 6994 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) 6995 : Base(IRP, A) {} 6996 6997 /// See AbstractAttribute::initialize(..). 6998 void initialize(Attributor &A) override { 6999 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 7000 indicatePessimisticFixpoint(); 7001 } else { 7002 Base::initialize(A); 7003 } 7004 } 7005 7006 /// See AbstractAttribute::trackStatistics() 7007 void trackStatistics() const override { 7008 STATS_DECLTRACK_ARG_ATTR(value_range) 7009 } 7010 }; 7011 7012 struct AAValueConstantRangeReturned 7013 : AAReturnedFromReturnedValues<AAValueConstantRange, 7014 AAValueConstantRangeImpl> { 7015 using Base = AAReturnedFromReturnedValues<AAValueConstantRange, 7016 AAValueConstantRangeImpl>; 7017 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) 7018 : Base(IRP, A) {} 7019 7020 /// See AbstractAttribute::initialize(...). 7021 void initialize(Attributor &A) override {} 7022 7023 /// See AbstractAttribute::trackStatistics() 7024 void trackStatistics() const override { 7025 STATS_DECLTRACK_FNRET_ATTR(value_range) 7026 } 7027 }; 7028 7029 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { 7030 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) 7031 : AAValueConstantRangeImpl(IRP, A) {} 7032 7033 /// See AbstractAttribute::initialize(...). 7034 void initialize(Attributor &A) override { 7035 AAValueConstantRangeImpl::initialize(A); 7036 Value &V = getAssociatedValue(); 7037 7038 if (auto *C = dyn_cast<ConstantInt>(&V)) { 7039 unionAssumed(ConstantRange(C->getValue())); 7040 indicateOptimisticFixpoint(); 7041 return; 7042 } 7043 7044 if (isa<UndefValue>(&V)) { 7045 // Collapse the undef state to 0. 7046 unionAssumed(ConstantRange(APInt(getBitWidth(), 0))); 7047 indicateOptimisticFixpoint(); 7048 return; 7049 } 7050 7051 if (isa<CallBase>(&V)) 7052 return; 7053 7054 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V)) 7055 return; 7056 // If it is a load instruction with range metadata, use it. 7057 if (LoadInst *LI = dyn_cast<LoadInst>(&V)) 7058 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) { 7059 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 7060 return; 7061 } 7062 7063 // We can work with PHI and select instruction as we traverse their operands 7064 // during update. 7065 if (isa<SelectInst>(V) || isa<PHINode>(V)) 7066 return; 7067 7068 // Otherwise we give up. 7069 indicatePessimisticFixpoint(); 7070 7071 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: " 7072 << getAssociatedValue() << "\n"); 7073 } 7074 7075 bool calculateBinaryOperator( 7076 Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, 7077 const Instruction *CtxI, 7078 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 7079 Value *LHS = BinOp->getOperand(0); 7080 Value *RHS = BinOp->getOperand(1); 7081 // TODO: Allow non integers as well. 7082 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 7083 return false; 7084 7085 auto &LHSAA = 7086 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS)); 7087 QuerriedAAs.push_back(&LHSAA); 7088 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 7089 7090 auto &RHSAA = 7091 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS)); 7092 QuerriedAAs.push_back(&RHSAA); 7093 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 7094 7095 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange); 7096 7097 T.unionAssumed(AssumedRange); 7098 7099 // TODO: Track a known state too. 7100 7101 return T.isValidState(); 7102 } 7103 7104 bool calculateCastInst( 7105 Attributor &A, CastInst *CastI, IntegerRangeState &T, 7106 const Instruction *CtxI, 7107 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 7108 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!"); 7109 // TODO: Allow non integers as well. 7110 Value &OpV = *CastI->getOperand(0); 7111 if (!OpV.getType()->isIntegerTy()) 7112 return false; 7113 7114 auto &OpAA = 7115 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV)); 7116 QuerriedAAs.push_back(&OpAA); 7117 T.unionAssumed( 7118 OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth())); 7119 return T.isValidState(); 7120 } 7121 7122 bool 7123 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, 7124 const Instruction *CtxI, 7125 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 7126 Value *LHS = CmpI->getOperand(0); 7127 Value *RHS = CmpI->getOperand(1); 7128 // TODO: Allow non integers as well. 7129 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 7130 return false; 7131 7132 auto &LHSAA = 7133 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS)); 7134 QuerriedAAs.push_back(&LHSAA); 7135 auto &RHSAA = 7136 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS)); 7137 QuerriedAAs.push_back(&RHSAA); 7138 7139 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 7140 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 7141 7142 // If one of them is empty set, we can't decide. 7143 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) 7144 return true; 7145 7146 bool MustTrue = false, MustFalse = false; 7147 7148 auto AllowedRegion = 7149 ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange); 7150 7151 auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion( 7152 CmpI->getPredicate(), RHSAARange); 7153 7154 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet()) 7155 MustFalse = true; 7156 7157 if (SatisfyingRegion.contains(LHSAARange)) 7158 MustTrue = true; 7159 7160 assert((!MustTrue || !MustFalse) && 7161 "Either MustTrue or MustFalse should be false!"); 7162 7163 if (MustTrue) 7164 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); 7165 else if (MustFalse) 7166 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); 7167 else 7168 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); 7169 7170 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA 7171 << " " << RHSAA << "\n"); 7172 7173 // TODO: Track a known state too. 7174 return T.isValidState(); 7175 } 7176 7177 /// See AbstractAttribute::updateImpl(...). 7178 ChangeStatus updateImpl(Attributor &A) override { 7179 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 7180 IntegerRangeState &T, bool Stripped) -> bool { 7181 Instruction *I = dyn_cast<Instruction>(&V); 7182 if (!I || isa<CallBase>(I)) { 7183 7184 // If the value is not instruction, we query AA to Attributor. 7185 const auto &AA = 7186 A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V)); 7187 7188 // Clamp operator is not used to utilize a program point CtxI. 7189 T.unionAssumed(AA.getAssumedConstantRange(A, CtxI)); 7190 7191 return T.isValidState(); 7192 } 7193 7194 SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; 7195 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) { 7196 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) 7197 return false; 7198 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) { 7199 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) 7200 return false; 7201 } else if (auto *CastI = dyn_cast<CastInst>(I)) { 7202 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) 7203 return false; 7204 } else { 7205 // Give up with other instructions. 7206 // TODO: Add other instructions 7207 7208 T.indicatePessimisticFixpoint(); 7209 return false; 7210 } 7211 7212 // Catch circular reasoning in a pessimistic way for now. 7213 // TODO: Check how the range evolves and if we stripped anything, see also 7214 // AADereferenceable or AAAlign for similar situations. 7215 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { 7216 if (QueriedAA != this) 7217 continue; 7218 // If we are in a stady state we do not need to worry. 7219 if (T.getAssumed() == getState().getAssumed()) 7220 continue; 7221 T.indicatePessimisticFixpoint(); 7222 } 7223 7224 return T.isValidState(); 7225 }; 7226 7227 IntegerRangeState T(getBitWidth()); 7228 7229 if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>( 7230 A, getIRPosition(), *this, T, VisitValueCB, getCtxI(), 7231 /* UseValueSimplify */ false)) 7232 return indicatePessimisticFixpoint(); 7233 7234 return clampStateAndIndicateChange(getState(), T); 7235 } 7236 7237 /// See AbstractAttribute::trackStatistics() 7238 void trackStatistics() const override { 7239 STATS_DECLTRACK_FLOATING_ATTR(value_range) 7240 } 7241 }; 7242 7243 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { 7244 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) 7245 : AAValueConstantRangeImpl(IRP, A) {} 7246 7247 /// See AbstractAttribute::initialize(...). 7248 ChangeStatus updateImpl(Attributor &A) override { 7249 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will " 7250 "not be called"); 7251 } 7252 7253 /// See AbstractAttribute::trackStatistics() 7254 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) } 7255 }; 7256 7257 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { 7258 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) 7259 : AAValueConstantRangeFunction(IRP, A) {} 7260 7261 /// See AbstractAttribute::trackStatistics() 7262 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) } 7263 }; 7264 7265 struct AAValueConstantRangeCallSiteReturned 7266 : AACallSiteReturnedFromReturned<AAValueConstantRange, 7267 AAValueConstantRangeImpl> { 7268 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) 7269 : AACallSiteReturnedFromReturned<AAValueConstantRange, 7270 AAValueConstantRangeImpl>(IRP, A) {} 7271 7272 /// See AbstractAttribute::initialize(...). 7273 void initialize(Attributor &A) override { 7274 // If it is a load instruction with range metadata, use the metadata. 7275 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue())) 7276 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range)) 7277 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 7278 7279 AAValueConstantRangeImpl::initialize(A); 7280 } 7281 7282 /// See AbstractAttribute::trackStatistics() 7283 void trackStatistics() const override { 7284 STATS_DECLTRACK_CSRET_ATTR(value_range) 7285 } 7286 }; 7287 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { 7288 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) 7289 : AAValueConstantRangeFloating(IRP, A) {} 7290 7291 /// See AbstractAttribute::trackStatistics() 7292 void trackStatistics() const override { 7293 STATS_DECLTRACK_CSARG_ATTR(value_range) 7294 } 7295 }; 7296 7297 /// ------------------ Potential Values Attribute ------------------------- 7298 7299 struct AAPotentialValuesImpl : AAPotentialValues { 7300 using StateType = PotentialConstantIntValuesState; 7301 7302 AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A) 7303 : AAPotentialValues(IRP, A) {} 7304 7305 /// See AbstractAttribute::getAsStr(). 7306 const std::string getAsStr() const override { 7307 std::string Str; 7308 llvm::raw_string_ostream OS(Str); 7309 OS << getState(); 7310 return OS.str(); 7311 } 7312 7313 /// See AbstractAttribute::updateImpl(...). 7314 ChangeStatus updateImpl(Attributor &A) override { 7315 return indicatePessimisticFixpoint(); 7316 } 7317 }; 7318 7319 struct AAPotentialValuesArgument final 7320 : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 7321 PotentialConstantIntValuesState> { 7322 using Base = 7323 AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 7324 PotentialConstantIntValuesState>; 7325 AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A) 7326 : Base(IRP, A) {} 7327 7328 /// See AbstractAttribute::initialize(..). 7329 void initialize(Attributor &A) override { 7330 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 7331 indicatePessimisticFixpoint(); 7332 } else { 7333 Base::initialize(A); 7334 } 7335 } 7336 7337 /// See AbstractAttribute::trackStatistics() 7338 void trackStatistics() const override { 7339 STATS_DECLTRACK_ARG_ATTR(potential_values) 7340 } 7341 }; 7342 7343 struct AAPotentialValuesReturned 7344 : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> { 7345 using Base = 7346 AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>; 7347 AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A) 7348 : Base(IRP, A) {} 7349 7350 /// See AbstractAttribute::trackStatistics() 7351 void trackStatistics() const override { 7352 STATS_DECLTRACK_FNRET_ATTR(potential_values) 7353 } 7354 }; 7355 7356 struct AAPotentialValuesFloating : AAPotentialValuesImpl { 7357 AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A) 7358 : AAPotentialValuesImpl(IRP, A) {} 7359 7360 /// See AbstractAttribute::initialize(..). 7361 void initialize(Attributor &A) override { 7362 Value &V = getAssociatedValue(); 7363 7364 if (auto *C = dyn_cast<ConstantInt>(&V)) { 7365 unionAssumed(C->getValue()); 7366 indicateOptimisticFixpoint(); 7367 return; 7368 } 7369 7370 if (isa<UndefValue>(&V)) { 7371 unionAssumedWithUndef(); 7372 indicateOptimisticFixpoint(); 7373 return; 7374 } 7375 7376 if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V)) 7377 return; 7378 7379 if (isa<SelectInst>(V) || isa<PHINode>(V)) 7380 return; 7381 7382 indicatePessimisticFixpoint(); 7383 7384 LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: " 7385 << getAssociatedValue() << "\n"); 7386 } 7387 7388 static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS, 7389 const APInt &RHS) { 7390 ICmpInst::Predicate Pred = ICI->getPredicate(); 7391 switch (Pred) { 7392 case ICmpInst::ICMP_UGT: 7393 return LHS.ugt(RHS); 7394 case ICmpInst::ICMP_SGT: 7395 return LHS.sgt(RHS); 7396 case ICmpInst::ICMP_EQ: 7397 return LHS.eq(RHS); 7398 case ICmpInst::ICMP_UGE: 7399 return LHS.uge(RHS); 7400 case ICmpInst::ICMP_SGE: 7401 return LHS.sge(RHS); 7402 case ICmpInst::ICMP_ULT: 7403 return LHS.ult(RHS); 7404 case ICmpInst::ICMP_SLT: 7405 return LHS.slt(RHS); 7406 case ICmpInst::ICMP_NE: 7407 return LHS.ne(RHS); 7408 case ICmpInst::ICMP_ULE: 7409 return LHS.ule(RHS); 7410 case ICmpInst::ICMP_SLE: 7411 return LHS.sle(RHS); 7412 default: 7413 llvm_unreachable("Invalid ICmp predicate!"); 7414 } 7415 } 7416 7417 static APInt calculateCastInst(const CastInst *CI, const APInt &Src, 7418 uint32_t ResultBitWidth) { 7419 Instruction::CastOps CastOp = CI->getOpcode(); 7420 switch (CastOp) { 7421 default: 7422 llvm_unreachable("unsupported or not integer cast"); 7423 case Instruction::Trunc: 7424 return Src.trunc(ResultBitWidth); 7425 case Instruction::SExt: 7426 return Src.sext(ResultBitWidth); 7427 case Instruction::ZExt: 7428 return Src.zext(ResultBitWidth); 7429 case Instruction::BitCast: 7430 return Src; 7431 } 7432 } 7433 7434 static APInt calculateBinaryOperator(const BinaryOperator *BinOp, 7435 const APInt &LHS, const APInt &RHS, 7436 bool &SkipOperation, bool &Unsupported) { 7437 Instruction::BinaryOps BinOpcode = BinOp->getOpcode(); 7438 // Unsupported is set to true when the binary operator is not supported. 7439 // SkipOperation is set to true when UB occur with the given operand pair 7440 // (LHS, RHS). 7441 // TODO: we should look at nsw and nuw keywords to handle operations 7442 // that create poison or undef value. 7443 switch (BinOpcode) { 7444 default: 7445 Unsupported = true; 7446 return LHS; 7447 case Instruction::Add: 7448 return LHS + RHS; 7449 case Instruction::Sub: 7450 return LHS - RHS; 7451 case Instruction::Mul: 7452 return LHS * RHS; 7453 case Instruction::UDiv: 7454 if (RHS.isNullValue()) { 7455 SkipOperation = true; 7456 return LHS; 7457 } 7458 return LHS.udiv(RHS); 7459 case Instruction::SDiv: 7460 if (RHS.isNullValue()) { 7461 SkipOperation = true; 7462 return LHS; 7463 } 7464 return LHS.sdiv(RHS); 7465 case Instruction::URem: 7466 if (RHS.isNullValue()) { 7467 SkipOperation = true; 7468 return LHS; 7469 } 7470 return LHS.urem(RHS); 7471 case Instruction::SRem: 7472 if (RHS.isNullValue()) { 7473 SkipOperation = true; 7474 return LHS; 7475 } 7476 return LHS.srem(RHS); 7477 case Instruction::Shl: 7478 return LHS.shl(RHS); 7479 case Instruction::LShr: 7480 return LHS.lshr(RHS); 7481 case Instruction::AShr: 7482 return LHS.ashr(RHS); 7483 case Instruction::And: 7484 return LHS & RHS; 7485 case Instruction::Or: 7486 return LHS | RHS; 7487 case Instruction::Xor: 7488 return LHS ^ RHS; 7489 } 7490 } 7491 7492 bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp, 7493 const APInt &LHS, const APInt &RHS) { 7494 bool SkipOperation = false; 7495 bool Unsupported = false; 7496 APInt Result = 7497 calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported); 7498 if (Unsupported) 7499 return false; 7500 // If SkipOperation is true, we can ignore this operand pair (L, R). 7501 if (!SkipOperation) 7502 unionAssumed(Result); 7503 return isValidState(); 7504 } 7505 7506 ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) { 7507 auto AssumedBefore = getAssumed(); 7508 Value *LHS = ICI->getOperand(0); 7509 Value *RHS = ICI->getOperand(1); 7510 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 7511 return indicatePessimisticFixpoint(); 7512 7513 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS)); 7514 if (!LHSAA.isValidState()) 7515 return indicatePessimisticFixpoint(); 7516 7517 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS)); 7518 if (!RHSAA.isValidState()) 7519 return indicatePessimisticFixpoint(); 7520 7521 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 7522 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 7523 7524 // TODO: make use of undef flag to limit potential values aggressively. 7525 bool MaybeTrue = false, MaybeFalse = false; 7526 const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0); 7527 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 7528 // The result of any comparison between undefs can be soundly replaced 7529 // with undef. 7530 unionAssumedWithUndef(); 7531 } else if (LHSAA.undefIsContained()) { 7532 bool MaybeTrue = false, MaybeFalse = false; 7533 for (const APInt &R : RHSAAPVS) { 7534 bool CmpResult = calculateICmpInst(ICI, Zero, R); 7535 MaybeTrue |= CmpResult; 7536 MaybeFalse |= !CmpResult; 7537 if (MaybeTrue & MaybeFalse) 7538 return indicatePessimisticFixpoint(); 7539 } 7540 } else if (RHSAA.undefIsContained()) { 7541 for (const APInt &L : LHSAAPVS) { 7542 bool CmpResult = calculateICmpInst(ICI, L, Zero); 7543 MaybeTrue |= CmpResult; 7544 MaybeFalse |= !CmpResult; 7545 if (MaybeTrue & MaybeFalse) 7546 return indicatePessimisticFixpoint(); 7547 } 7548 } else { 7549 for (const APInt &L : LHSAAPVS) { 7550 for (const APInt &R : RHSAAPVS) { 7551 bool CmpResult = calculateICmpInst(ICI, L, R); 7552 MaybeTrue |= CmpResult; 7553 MaybeFalse |= !CmpResult; 7554 if (MaybeTrue & MaybeFalse) 7555 return indicatePessimisticFixpoint(); 7556 } 7557 } 7558 } 7559 if (MaybeTrue) 7560 unionAssumed(APInt(/* numBits */ 1, /* val */ 1)); 7561 if (MaybeFalse) 7562 unionAssumed(APInt(/* numBits */ 1, /* val */ 0)); 7563 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 7564 : ChangeStatus::CHANGED; 7565 } 7566 7567 ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) { 7568 auto AssumedBefore = getAssumed(); 7569 Value *LHS = SI->getTrueValue(); 7570 Value *RHS = SI->getFalseValue(); 7571 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 7572 return indicatePessimisticFixpoint(); 7573 7574 // TODO: Use assumed simplified condition value 7575 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS)); 7576 if (!LHSAA.isValidState()) 7577 return indicatePessimisticFixpoint(); 7578 7579 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS)); 7580 if (!RHSAA.isValidState()) 7581 return indicatePessimisticFixpoint(); 7582 7583 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) 7584 // select i1 *, undef , undef => undef 7585 unionAssumedWithUndef(); 7586 else { 7587 unionAssumed(LHSAA); 7588 unionAssumed(RHSAA); 7589 } 7590 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 7591 : ChangeStatus::CHANGED; 7592 } 7593 7594 ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) { 7595 auto AssumedBefore = getAssumed(); 7596 if (!CI->isIntegerCast()) 7597 return indicatePessimisticFixpoint(); 7598 assert(CI->getNumOperands() == 1 && "Expected cast to be unary!"); 7599 uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth(); 7600 Value *Src = CI->getOperand(0); 7601 auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src)); 7602 if (!SrcAA.isValidState()) 7603 return indicatePessimisticFixpoint(); 7604 const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet(); 7605 if (SrcAA.undefIsContained()) 7606 unionAssumedWithUndef(); 7607 else { 7608 for (const APInt &S : SrcAAPVS) { 7609 APInt T = calculateCastInst(CI, S, ResultBitWidth); 7610 unionAssumed(T); 7611 } 7612 } 7613 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 7614 : ChangeStatus::CHANGED; 7615 } 7616 7617 ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) { 7618 auto AssumedBefore = getAssumed(); 7619 Value *LHS = BinOp->getOperand(0); 7620 Value *RHS = BinOp->getOperand(1); 7621 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 7622 return indicatePessimisticFixpoint(); 7623 7624 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS)); 7625 if (!LHSAA.isValidState()) 7626 return indicatePessimisticFixpoint(); 7627 7628 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS)); 7629 if (!RHSAA.isValidState()) 7630 return indicatePessimisticFixpoint(); 7631 7632 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 7633 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 7634 const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0); 7635 7636 // TODO: make use of undef flag to limit potential values aggressively. 7637 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 7638 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero)) 7639 return indicatePessimisticFixpoint(); 7640 } else if (LHSAA.undefIsContained()) { 7641 for (const APInt &R : RHSAAPVS) { 7642 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R)) 7643 return indicatePessimisticFixpoint(); 7644 } 7645 } else if (RHSAA.undefIsContained()) { 7646 for (const APInt &L : LHSAAPVS) { 7647 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero)) 7648 return indicatePessimisticFixpoint(); 7649 } 7650 } else { 7651 for (const APInt &L : LHSAAPVS) { 7652 for (const APInt &R : RHSAAPVS) { 7653 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R)) 7654 return indicatePessimisticFixpoint(); 7655 } 7656 } 7657 } 7658 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 7659 : ChangeStatus::CHANGED; 7660 } 7661 7662 ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) { 7663 auto AssumedBefore = getAssumed(); 7664 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 7665 Value *IncomingValue = PHI->getIncomingValue(u); 7666 auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>( 7667 *this, IRPosition::value(*IncomingValue)); 7668 if (!PotentialValuesAA.isValidState()) 7669 return indicatePessimisticFixpoint(); 7670 if (PotentialValuesAA.undefIsContained()) 7671 unionAssumedWithUndef(); 7672 else 7673 unionAssumed(PotentialValuesAA.getAssumed()); 7674 } 7675 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 7676 : ChangeStatus::CHANGED; 7677 } 7678 7679 /// See AbstractAttribute::updateImpl(...). 7680 ChangeStatus updateImpl(Attributor &A) override { 7681 Value &V = getAssociatedValue(); 7682 Instruction *I = dyn_cast<Instruction>(&V); 7683 7684 if (auto *ICI = dyn_cast<ICmpInst>(I)) 7685 return updateWithICmpInst(A, ICI); 7686 7687 if (auto *SI = dyn_cast<SelectInst>(I)) 7688 return updateWithSelectInst(A, SI); 7689 7690 if (auto *CI = dyn_cast<CastInst>(I)) 7691 return updateWithCastInst(A, CI); 7692 7693 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) 7694 return updateWithBinaryOperator(A, BinOp); 7695 7696 if (auto *PHI = dyn_cast<PHINode>(I)) 7697 return updateWithPHINode(A, PHI); 7698 7699 return indicatePessimisticFixpoint(); 7700 } 7701 7702 /// See AbstractAttribute::trackStatistics() 7703 void trackStatistics() const override { 7704 STATS_DECLTRACK_FLOATING_ATTR(potential_values) 7705 } 7706 }; 7707 7708 struct AAPotentialValuesFunction : AAPotentialValuesImpl { 7709 AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A) 7710 : AAPotentialValuesImpl(IRP, A) {} 7711 7712 /// See AbstractAttribute::initialize(...). 7713 ChangeStatus updateImpl(Attributor &A) override { 7714 llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will " 7715 "not be called"); 7716 } 7717 7718 /// See AbstractAttribute::trackStatistics() 7719 void trackStatistics() const override { 7720 STATS_DECLTRACK_FN_ATTR(potential_values) 7721 } 7722 }; 7723 7724 struct AAPotentialValuesCallSite : AAPotentialValuesFunction { 7725 AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A) 7726 : AAPotentialValuesFunction(IRP, A) {} 7727 7728 /// See AbstractAttribute::trackStatistics() 7729 void trackStatistics() const override { 7730 STATS_DECLTRACK_CS_ATTR(potential_values) 7731 } 7732 }; 7733 7734 struct AAPotentialValuesCallSiteReturned 7735 : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> { 7736 AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A) 7737 : AACallSiteReturnedFromReturned<AAPotentialValues, 7738 AAPotentialValuesImpl>(IRP, A) {} 7739 7740 /// See AbstractAttribute::trackStatistics() 7741 void trackStatistics() const override { 7742 STATS_DECLTRACK_CSRET_ATTR(potential_values) 7743 } 7744 }; 7745 7746 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating { 7747 AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A) 7748 : AAPotentialValuesFloating(IRP, A) {} 7749 7750 /// See AbstractAttribute::initialize(..). 7751 void initialize(Attributor &A) override { 7752 Value &V = getAssociatedValue(); 7753 7754 if (auto *C = dyn_cast<ConstantInt>(&V)) { 7755 unionAssumed(C->getValue()); 7756 indicateOptimisticFixpoint(); 7757 return; 7758 } 7759 7760 if (isa<UndefValue>(&V)) { 7761 unionAssumedWithUndef(); 7762 indicateOptimisticFixpoint(); 7763 return; 7764 } 7765 } 7766 7767 /// See AbstractAttribute::updateImpl(...). 7768 ChangeStatus updateImpl(Attributor &A) override { 7769 Value &V = getAssociatedValue(); 7770 auto AssumedBefore = getAssumed(); 7771 auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V)); 7772 const auto &S = AA.getAssumed(); 7773 unionAssumed(S); 7774 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 7775 : ChangeStatus::CHANGED; 7776 } 7777 7778 /// See AbstractAttribute::trackStatistics() 7779 void trackStatistics() const override { 7780 STATS_DECLTRACK_CSARG_ATTR(potential_values) 7781 } 7782 }; 7783 7784 /// ------------------------ NoUndef Attribute --------------------------------- 7785 struct AANoUndefImpl : AANoUndef { 7786 AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {} 7787 7788 /// See AbstractAttribute::initialize(...). 7789 void initialize(Attributor &A) override { 7790 if (getIRPosition().hasAttr({Attribute::NoUndef})) { 7791 indicateOptimisticFixpoint(); 7792 return; 7793 } 7794 Value &V = getAssociatedValue(); 7795 if (isa<UndefValue>(V)) 7796 indicatePessimisticFixpoint(); 7797 else if (isa<FreezeInst>(V)) 7798 indicateOptimisticFixpoint(); 7799 else if (getPositionKind() != IRPosition::IRP_RETURNED && 7800 isGuaranteedNotToBeUndefOrPoison(&V)) 7801 indicateOptimisticFixpoint(); 7802 else 7803 AANoUndef::initialize(A); 7804 } 7805 7806 /// See followUsesInMBEC 7807 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 7808 AANoUndef::StateType &State) { 7809 const Value *UseV = U->get(); 7810 const DominatorTree *DT = nullptr; 7811 if (Function *F = getAnchorScope()) 7812 DT = A.getInfoCache().getAnalysisResultForFunction<DominatorTreeAnalysis>( 7813 *F); 7814 State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, I, DT)); 7815 bool TrackUse = false; 7816 // Track use for instructions which must produce undef or poison bits when 7817 // at least one operand contains such bits. 7818 if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I)) 7819 TrackUse = true; 7820 return TrackUse; 7821 } 7822 7823 /// See AbstractAttribute::getAsStr(). 7824 const std::string getAsStr() const override { 7825 return getAssumed() ? "noundef" : "may-undef-or-poison"; 7826 } 7827 7828 ChangeStatus manifest(Attributor &A) override { 7829 // We don't manifest noundef attribute for dead positions because the 7830 // associated values with dead positions would be replaced with undef 7831 // values. 7832 if (A.isAssumedDead(getIRPosition(), nullptr, nullptr)) 7833 return ChangeStatus::UNCHANGED; 7834 // A position whose simplified value does not have any value is 7835 // considered to be dead. We don't manifest noundef in such positions for 7836 // the same reason above. 7837 auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>( 7838 *this, getIRPosition(), /* TrackDependence */ false); 7839 if (!ValueSimplifyAA.getAssumedSimplifiedValue(A).hasValue()) 7840 return ChangeStatus::UNCHANGED; 7841 return AANoUndef::manifest(A); 7842 } 7843 }; 7844 7845 struct AANoUndefFloating : public AANoUndefImpl { 7846 AANoUndefFloating(const IRPosition &IRP, Attributor &A) 7847 : AANoUndefImpl(IRP, A) {} 7848 7849 /// See AbstractAttribute::initialize(...). 7850 void initialize(Attributor &A) override { 7851 AANoUndefImpl::initialize(A); 7852 if (!getState().isAtFixpoint()) 7853 if (Instruction *CtxI = getCtxI()) 7854 followUsesInMBEC(*this, A, getState(), *CtxI); 7855 } 7856 7857 /// See AbstractAttribute::updateImpl(...). 7858 ChangeStatus updateImpl(Attributor &A) override { 7859 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 7860 AANoUndef::StateType &T, bool Stripped) -> bool { 7861 const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V)); 7862 if (!Stripped && this == &AA) { 7863 T.indicatePessimisticFixpoint(); 7864 } else { 7865 const AANoUndef::StateType &S = 7866 static_cast<const AANoUndef::StateType &>(AA.getState()); 7867 T ^= S; 7868 } 7869 return T.isValidState(); 7870 }; 7871 7872 StateType T; 7873 if (!genericValueTraversal<AANoUndef, StateType>( 7874 A, getIRPosition(), *this, T, VisitValueCB, getCtxI())) 7875 return indicatePessimisticFixpoint(); 7876 7877 return clampStateAndIndicateChange(getState(), T); 7878 } 7879 7880 /// See AbstractAttribute::trackStatistics() 7881 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 7882 }; 7883 7884 struct AANoUndefReturned final 7885 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> { 7886 AANoUndefReturned(const IRPosition &IRP, Attributor &A) 7887 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {} 7888 7889 /// See AbstractAttribute::trackStatistics() 7890 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 7891 }; 7892 7893 struct AANoUndefArgument final 7894 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> { 7895 AANoUndefArgument(const IRPosition &IRP, Attributor &A) 7896 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {} 7897 7898 /// See AbstractAttribute::trackStatistics() 7899 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) } 7900 }; 7901 7902 struct AANoUndefCallSiteArgument final : AANoUndefFloating { 7903 AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A) 7904 : AANoUndefFloating(IRP, A) {} 7905 7906 /// See AbstractAttribute::trackStatistics() 7907 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) } 7908 }; 7909 7910 struct AANoUndefCallSiteReturned final 7911 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> { 7912 AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A) 7913 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {} 7914 7915 /// See AbstractAttribute::trackStatistics() 7916 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) } 7917 }; 7918 } // namespace 7919 7920 const char AAReturnedValues::ID = 0; 7921 const char AANoUnwind::ID = 0; 7922 const char AANoSync::ID = 0; 7923 const char AANoFree::ID = 0; 7924 const char AANonNull::ID = 0; 7925 const char AANoRecurse::ID = 0; 7926 const char AAWillReturn::ID = 0; 7927 const char AAUndefinedBehavior::ID = 0; 7928 const char AANoAlias::ID = 0; 7929 const char AAReachability::ID = 0; 7930 const char AANoReturn::ID = 0; 7931 const char AAIsDead::ID = 0; 7932 const char AADereferenceable::ID = 0; 7933 const char AAAlign::ID = 0; 7934 const char AANoCapture::ID = 0; 7935 const char AAValueSimplify::ID = 0; 7936 const char AAHeapToStack::ID = 0; 7937 const char AAPrivatizablePtr::ID = 0; 7938 const char AAMemoryBehavior::ID = 0; 7939 const char AAMemoryLocation::ID = 0; 7940 const char AAValueConstantRange::ID = 0; 7941 const char AAPotentialValues::ID = 0; 7942 const char AANoUndef::ID = 0; 7943 7944 // Macro magic to create the static generator function for attributes that 7945 // follow the naming scheme. 7946 7947 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ 7948 case IRPosition::PK: \ 7949 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); 7950 7951 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ 7952 case IRPosition::PK: \ 7953 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ 7954 ++NumAAs; \ 7955 break; 7956 7957 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7958 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7959 CLASS *AA = nullptr; \ 7960 switch (IRP.getPositionKind()) { \ 7961 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7962 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 7963 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 7964 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 7965 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 7966 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 7967 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7968 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 7969 } \ 7970 return *AA; \ 7971 } 7972 7973 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7974 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7975 CLASS *AA = nullptr; \ 7976 switch (IRP.getPositionKind()) { \ 7977 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7978 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ 7979 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 7980 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 7981 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 7982 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 7983 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 7984 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 7985 } \ 7986 return *AA; \ 7987 } 7988 7989 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 7990 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 7991 CLASS *AA = nullptr; \ 7992 switch (IRP.getPositionKind()) { \ 7993 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 7994 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 7995 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 7996 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 7997 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 7998 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 7999 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 8000 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 8001 } \ 8002 return *AA; \ 8003 } 8004 8005 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 8006 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 8007 CLASS *AA = nullptr; \ 8008 switch (IRP.getPositionKind()) { \ 8009 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 8010 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 8011 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 8012 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 8013 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 8014 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 8015 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 8016 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 8017 } \ 8018 return *AA; \ 8019 } 8020 8021 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 8022 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 8023 CLASS *AA = nullptr; \ 8024 switch (IRP.getPositionKind()) { \ 8025 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 8026 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 8027 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 8028 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 8029 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 8030 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 8031 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 8032 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 8033 } \ 8034 return *AA; \ 8035 } 8036 8037 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) 8038 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) 8039 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) 8040 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) 8041 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) 8042 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) 8043 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) 8044 8045 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) 8046 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) 8047 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) 8048 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) 8049 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) 8050 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) 8051 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) 8052 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues) 8053 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef) 8054 8055 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) 8056 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) 8057 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) 8058 8059 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) 8060 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability) 8061 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) 8062 8063 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) 8064 8065 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION 8066 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION 8067 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION 8068 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION 8069 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION 8070 #undef SWITCH_PK_CREATE 8071 #undef SWITCH_PK_INV 8072