1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // See the Attributor.h file comment and the class descriptions in that file for 10 // more information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/IPO/Attributor.h" 15 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/MapVector.h" 18 #include "llvm/ADT/SCCIterator.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SetOperations.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/Analysis/AssumeBundleQueries.h" 25 #include "llvm/Analysis/AssumptionCache.h" 26 #include "llvm/Analysis/CaptureTracking.h" 27 #include "llvm/Analysis/InstructionSimplify.h" 28 #include "llvm/Analysis/LazyValueInfo.h" 29 #include "llvm/Analysis/MemoryBuiltins.h" 30 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 31 #include "llvm/Analysis/ScalarEvolution.h" 32 #include "llvm/Analysis/TargetTransformInfo.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/IR/Assumptions.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/IRBuilder.h" 38 #include "llvm/IR/Instruction.h" 39 #include "llvm/IR/Instructions.h" 40 #include "llvm/IR/IntrinsicInst.h" 41 #include "llvm/IR/Value.h" 42 #include "llvm/IR/NoFolder.h" 43 #include "llvm/Support/Alignment.h" 44 #include "llvm/Support/Casting.h" 45 #include "llvm/Support/CommandLine.h" 46 #include "llvm/Support/ErrorHandling.h" 47 #include "llvm/Support/FileSystem.h" 48 #include "llvm/Support/MathExtras.h" 49 #include "llvm/Support/raw_ostream.h" 50 #include "llvm/Transforms/IPO/ArgumentPromotion.h" 51 #include "llvm/Transforms/Utils/Local.h" 52 #include <cassert> 53 54 using namespace llvm; 55 56 #define DEBUG_TYPE "attributor" 57 58 static cl::opt<bool> ManifestInternal( 59 "attributor-manifest-internal", cl::Hidden, 60 cl::desc("Manifest Attributor internal string attributes."), 61 cl::init(false)); 62 63 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), 64 cl::Hidden); 65 66 template <> 67 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0; 68 69 static cl::opt<unsigned, true> MaxPotentialValues( 70 "attributor-max-potential-values", cl::Hidden, 71 cl::desc("Maximum number of potential values to be " 72 "tracked for each position."), 73 cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues), 74 cl::init(7)); 75 76 static cl::opt<unsigned> MaxInterferingAccesses( 77 "attributor-max-interfering-accesses", cl::Hidden, 78 cl::desc("Maximum number of interfering accesses to " 79 "check before assuming all might interfere."), 80 cl::init(6)); 81 82 STATISTIC(NumAAs, "Number of abstract attributes created"); 83 84 // Some helper macros to deal with statistics tracking. 85 // 86 // Usage: 87 // For simple IR attribute tracking overload trackStatistics in the abstract 88 // attribute and choose the right STATS_DECLTRACK_********* macro, 89 // e.g.,: 90 // void trackStatistics() const override { 91 // STATS_DECLTRACK_ARG_ATTR(returned) 92 // } 93 // If there is a single "increment" side one can use the macro 94 // STATS_DECLTRACK with a custom message. If there are multiple increment 95 // sides, STATS_DECL and STATS_TRACK can also be used separately. 96 // 97 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ 98 ("Number of " #TYPE " marked '" #NAME "'") 99 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME 100 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); 101 #define STATS_DECL(NAME, TYPE, MSG) \ 102 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); 103 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); 104 #define STATS_DECLTRACK(NAME, TYPE, MSG) \ 105 { \ 106 STATS_DECL(NAME, TYPE, MSG) \ 107 STATS_TRACK(NAME, TYPE) \ 108 } 109 #define STATS_DECLTRACK_ARG_ATTR(NAME) \ 110 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) 111 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ 112 STATS_DECLTRACK(NAME, CSArguments, \ 113 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) 114 #define STATS_DECLTRACK_FN_ATTR(NAME) \ 115 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) 116 #define STATS_DECLTRACK_CS_ATTR(NAME) \ 117 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) 118 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ 119 STATS_DECLTRACK(NAME, FunctionReturn, \ 120 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) 121 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ 122 STATS_DECLTRACK(NAME, CSReturn, \ 123 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) 124 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ 125 STATS_DECLTRACK(NAME, Floating, \ 126 ("Number of floating values known to be '" #NAME "'")) 127 128 // Specialization of the operator<< for abstract attributes subclasses. This 129 // disambiguates situations where multiple operators are applicable. 130 namespace llvm { 131 #define PIPE_OPERATOR(CLASS) \ 132 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ 133 return OS << static_cast<const AbstractAttribute &>(AA); \ 134 } 135 136 PIPE_OPERATOR(AAIsDead) 137 PIPE_OPERATOR(AANoUnwind) 138 PIPE_OPERATOR(AANoSync) 139 PIPE_OPERATOR(AANoRecurse) 140 PIPE_OPERATOR(AAWillReturn) 141 PIPE_OPERATOR(AANoReturn) 142 PIPE_OPERATOR(AAReturnedValues) 143 PIPE_OPERATOR(AANonNull) 144 PIPE_OPERATOR(AANoAlias) 145 PIPE_OPERATOR(AADereferenceable) 146 PIPE_OPERATOR(AAAlign) 147 PIPE_OPERATOR(AANoCapture) 148 PIPE_OPERATOR(AAValueSimplify) 149 PIPE_OPERATOR(AANoFree) 150 PIPE_OPERATOR(AAHeapToStack) 151 PIPE_OPERATOR(AAReachability) 152 PIPE_OPERATOR(AAMemoryBehavior) 153 PIPE_OPERATOR(AAMemoryLocation) 154 PIPE_OPERATOR(AAValueConstantRange) 155 PIPE_OPERATOR(AAPrivatizablePtr) 156 PIPE_OPERATOR(AAUndefinedBehavior) 157 PIPE_OPERATOR(AAPotentialValues) 158 PIPE_OPERATOR(AANoUndef) 159 PIPE_OPERATOR(AACallEdges) 160 PIPE_OPERATOR(AAFunctionReachability) 161 PIPE_OPERATOR(AAPointerInfo) 162 PIPE_OPERATOR(AAAssumptionInfo) 163 164 #undef PIPE_OPERATOR 165 166 template <> 167 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, 168 const DerefState &R) { 169 ChangeStatus CS0 = 170 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); 171 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); 172 return CS0 | CS1; 173 } 174 175 } // namespace llvm 176 177 /// Get pointer operand of memory accessing instruction. If \p I is 178 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, 179 /// is set to false and the instruction is volatile, return nullptr. 180 static const Value *getPointerOperand(const Instruction *I, 181 bool AllowVolatile) { 182 if (!AllowVolatile && I->isVolatile()) 183 return nullptr; 184 185 if (auto *LI = dyn_cast<LoadInst>(I)) { 186 return LI->getPointerOperand(); 187 } 188 189 if (auto *SI = dyn_cast<StoreInst>(I)) { 190 return SI->getPointerOperand(); 191 } 192 193 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { 194 return CXI->getPointerOperand(); 195 } 196 197 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { 198 return RMWI->getPointerOperand(); 199 } 200 201 return nullptr; 202 } 203 204 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and 205 /// advanced by \p Offset bytes. To aid later analysis the method tries to build 206 /// getelement pointer instructions that traverse the natural type of \p Ptr if 207 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence 208 /// through a cast to i8*. 209 /// 210 /// TODO: This could probably live somewhere more prominantly if it doesn't 211 /// already exist. 212 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr, 213 int64_t Offset, IRBuilder<NoFolder> &IRB, 214 const DataLayout &DL) { 215 assert(Offset >= 0 && "Negative offset not supported yet!"); 216 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset 217 << "-bytes as " << *ResTy << "\n"); 218 219 if (Offset) { 220 Type *Ty = PtrElemTy; 221 APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset); 222 SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset); 223 224 SmallVector<Value *, 4> ValIndices; 225 std::string GEPName = Ptr->getName().str(); 226 for (const APInt &Index : IntIndices) { 227 ValIndices.push_back(IRB.getInt(Index)); 228 GEPName += "." + std::to_string(Index.getZExtValue()); 229 } 230 231 // Create a GEP for the indices collected above. 232 Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName); 233 234 // If an offset is left we use byte-wise adjustment. 235 if (IntOffset != 0) { 236 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); 237 Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset), 238 GEPName + ".b" + Twine(IntOffset.getZExtValue())); 239 } 240 } 241 242 // Ensure the result has the requested type. 243 Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy, 244 Ptr->getName() + ".cast"); 245 246 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n"); 247 return Ptr; 248 } 249 250 /// Recursively visit all values that might become \p IRP at some point. This 251 /// will be done by looking through cast instructions, selects, phis, and calls 252 /// with the "returned" attribute. Once we cannot look through the value any 253 /// further, the callback \p VisitValueCB is invoked and passed the current 254 /// value, the \p State, and a flag to indicate if we stripped anything. 255 /// Stripped means that we unpacked the value associated with \p IRP at least 256 /// once. Note that the value used for the callback may still be the value 257 /// associated with \p IRP (due to PHIs). To limit how much effort is invested, 258 /// we will never visit more values than specified by \p MaxValues. 259 /// If \p Intraprocedural is set to true only values valid in the scope of 260 /// \p CtxI will be visited and simplification into other scopes is prevented. 261 template <typename StateTy> 262 static bool genericValueTraversal( 263 Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA, 264 StateTy &State, 265 function_ref<bool(Value &, const Instruction *, StateTy &, bool)> 266 VisitValueCB, 267 const Instruction *CtxI, bool &UsedAssumedInformation, 268 bool UseValueSimplify = true, int MaxValues = 16, 269 function_ref<Value *(Value *)> StripCB = nullptr, 270 bool Intraprocedural = false) { 271 272 struct LivenessInfo { 273 const AAIsDead *LivenessAA = nullptr; 274 bool AnyDead = false; 275 }; 276 SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs; 277 auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & { 278 LivenessInfo &LI = LivenessAAs[&F]; 279 if (!LI.LivenessAA) 280 LI.LivenessAA = &A.getAAFor<AAIsDead>(QueryingAA, IRPosition::function(F), 281 DepClassTy::NONE); 282 return LI; 283 }; 284 285 Value *InitialV = &IRP.getAssociatedValue(); 286 using Item = std::pair<Value *, const Instruction *>; 287 SmallSet<Item, 16> Visited; 288 SmallVector<Item, 16> Worklist; 289 Worklist.push_back({InitialV, CtxI}); 290 291 int Iteration = 0; 292 do { 293 Item I = Worklist.pop_back_val(); 294 Value *V = I.first; 295 CtxI = I.second; 296 if (StripCB) 297 V = StripCB(V); 298 299 // Check if we should process the current value. To prevent endless 300 // recursion keep a record of the values we followed! 301 if (!Visited.insert(I).second) 302 continue; 303 304 // Make sure we limit the compile time for complex expressions. 305 if (Iteration++ >= MaxValues) { 306 LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: " 307 << Iteration << "!\n"); 308 return false; 309 } 310 311 // Explicitly look through calls with a "returned" attribute if we do 312 // not have a pointer as stripPointerCasts only works on them. 313 Value *NewV = nullptr; 314 if (V->getType()->isPointerTy()) { 315 NewV = V->stripPointerCasts(); 316 } else { 317 auto *CB = dyn_cast<CallBase>(V); 318 if (CB && CB->getCalledFunction()) { 319 for (Argument &Arg : CB->getCalledFunction()->args()) 320 if (Arg.hasReturnedAttr()) { 321 NewV = CB->getArgOperand(Arg.getArgNo()); 322 break; 323 } 324 } 325 } 326 if (NewV && NewV != V) { 327 Worklist.push_back({NewV, CtxI}); 328 continue; 329 } 330 331 // Look through select instructions, visit assumed potential values. 332 if (auto *SI = dyn_cast<SelectInst>(V)) { 333 Optional<Constant *> C = A.getAssumedConstant( 334 *SI->getCondition(), QueryingAA, UsedAssumedInformation); 335 bool NoValueYet = !C.hasValue(); 336 if (NoValueYet || isa_and_nonnull<UndefValue>(*C)) 337 continue; 338 if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) { 339 if (CI->isZero()) 340 Worklist.push_back({SI->getFalseValue(), CtxI}); 341 else 342 Worklist.push_back({SI->getTrueValue(), CtxI}); 343 continue; 344 } 345 // We could not simplify the condition, assume both values.( 346 Worklist.push_back({SI->getTrueValue(), CtxI}); 347 Worklist.push_back({SI->getFalseValue(), CtxI}); 348 continue; 349 } 350 351 // Look through phi nodes, visit all live operands. 352 if (auto *PHI = dyn_cast<PHINode>(V)) { 353 LivenessInfo &LI = GetLivenessInfo(*PHI->getFunction()); 354 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 355 BasicBlock *IncomingBB = PHI->getIncomingBlock(u); 356 if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI->getParent())) { 357 LI.AnyDead = true; 358 UsedAssumedInformation |= !LI.LivenessAA->isAtFixpoint(); 359 continue; 360 } 361 Worklist.push_back( 362 {PHI->getIncomingValue(u), IncomingBB->getTerminator()}); 363 } 364 continue; 365 } 366 367 if (auto *Arg = dyn_cast<Argument>(V)) { 368 if (!Intraprocedural && !Arg->hasPassPointeeByValueCopyAttr()) { 369 SmallVector<Item> CallSiteValues; 370 bool UsedAssumedInformation = false; 371 if (A.checkForAllCallSites( 372 [&](AbstractCallSite ACS) { 373 // Callbacks might not have a corresponding call site operand, 374 // stick with the argument in that case. 375 Value *CSOp = ACS.getCallArgOperand(*Arg); 376 if (!CSOp) 377 return false; 378 CallSiteValues.push_back({CSOp, ACS.getInstruction()}); 379 return true; 380 }, 381 *Arg->getParent(), true, &QueryingAA, UsedAssumedInformation)) { 382 Worklist.append(CallSiteValues); 383 continue; 384 } 385 } 386 } 387 388 if (UseValueSimplify && !isa<Constant>(V)) { 389 Optional<Value *> SimpleV = 390 A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation); 391 if (!SimpleV.hasValue()) 392 continue; 393 Value *NewV = SimpleV.getValue(); 394 if (NewV && NewV != V) { 395 if (!Intraprocedural || !CtxI || 396 AA::isValidInScope(*NewV, CtxI->getFunction())) { 397 Worklist.push_back({NewV, CtxI}); 398 continue; 399 } 400 } 401 } 402 403 if (auto *LI = dyn_cast<LoadInst>(V)) { 404 bool UsedAssumedInformation = false; 405 SmallSetVector<Value *, 4> PotentialCopies; 406 if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies, QueryingAA, 407 UsedAssumedInformation, 408 /* OnlyExact */ true)) { 409 // Values have to be dynamically unique or we loose the fact that a 410 // single llvm::Value might represent two runtime values (e.g., stack 411 // locations in different recursive calls). 412 bool DynamicallyUnique = 413 llvm::all_of(PotentialCopies, [&A, &QueryingAA](Value *PC) { 414 return AA::isDynamicallyUnique(A, QueryingAA, *PC); 415 }); 416 if (DynamicallyUnique && 417 (!Intraprocedural || !CtxI || 418 llvm::all_of(PotentialCopies, [CtxI](Value *PC) { 419 return AA::isValidInScope(*PC, CtxI->getFunction()); 420 }))) { 421 for (auto *PotentialCopy : PotentialCopies) 422 Worklist.push_back({PotentialCopy, CtxI}); 423 continue; 424 } 425 } 426 } 427 428 // Once a leaf is reached we inform the user through the callback. 429 if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) { 430 LLVM_DEBUG(dbgs() << "Generic value traversal visit callback failed for: " 431 << *V << "!\n"); 432 return false; 433 } 434 } while (!Worklist.empty()); 435 436 // If we actually used liveness information so we have to record a dependence. 437 for (auto &It : LivenessAAs) 438 if (It.second.AnyDead) 439 A.recordDependence(*It.second.LivenessAA, QueryingAA, 440 DepClassTy::OPTIONAL); 441 442 // All values have been visited. 443 return true; 444 } 445 446 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr, 447 SmallVectorImpl<Value *> &Objects, 448 const AbstractAttribute &QueryingAA, 449 const Instruction *CtxI, 450 bool &UsedAssumedInformation, 451 bool Intraprocedural) { 452 auto StripCB = [&](Value *V) { return getUnderlyingObject(V); }; 453 SmallPtrSet<Value *, 8> SeenObjects; 454 auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *, 455 SmallVectorImpl<Value *> &Objects, 456 bool) -> bool { 457 if (SeenObjects.insert(&Val).second) 458 Objects.push_back(&Val); 459 return true; 460 }; 461 if (!genericValueTraversal<decltype(Objects)>( 462 A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI, 463 UsedAssumedInformation, true, 32, StripCB, Intraprocedural)) 464 return false; 465 return true; 466 } 467 468 static const Value * 469 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA, 470 const Value *Val, const DataLayout &DL, APInt &Offset, 471 bool GetMinOffset, bool AllowNonInbounds, 472 bool UseAssumed = false) { 473 474 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { 475 const IRPosition &Pos = IRPosition::value(V); 476 // Only track dependence if we are going to use the assumed info. 477 const AAValueConstantRange &ValueConstantRangeAA = 478 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, 479 UseAssumed ? DepClassTy::OPTIONAL 480 : DepClassTy::NONE); 481 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() 482 : ValueConstantRangeAA.getKnown(); 483 if (Range.isFullSet()) 484 return false; 485 486 // We can only use the lower part of the range because the upper part can 487 // be higher than what the value can really be. 488 if (GetMinOffset) 489 ROffset = Range.getSignedMin(); 490 else 491 ROffset = Range.getSignedMax(); 492 return true; 493 }; 494 495 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, 496 /* AllowInvariant */ true, 497 AttributorAnalysis); 498 } 499 500 static const Value * 501 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA, 502 const Value *Ptr, int64_t &BytesOffset, 503 const DataLayout &DL, bool AllowNonInbounds = false) { 504 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 505 const Value *Base = 506 stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt, 507 /* GetMinOffset */ true, AllowNonInbounds); 508 509 BytesOffset = OffsetAPInt.getSExtValue(); 510 return Base; 511 } 512 513 /// Clamp the information known for all returned values of a function 514 /// (identified by \p QueryingAA) into \p S. 515 template <typename AAType, typename StateType = typename AAType::StateType> 516 static void clampReturnedValueStates( 517 Attributor &A, const AAType &QueryingAA, StateType &S, 518 const IRPosition::CallBaseContext *CBContext = nullptr) { 519 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " 520 << QueryingAA << " into " << S << "\n"); 521 522 assert((QueryingAA.getIRPosition().getPositionKind() == 523 IRPosition::IRP_RETURNED || 524 QueryingAA.getIRPosition().getPositionKind() == 525 IRPosition::IRP_CALL_SITE_RETURNED) && 526 "Can only clamp returned value states for a function returned or call " 527 "site returned position!"); 528 529 // Use an optional state as there might not be any return values and we want 530 // to join (IntegerState::operator&) the state of all there are. 531 Optional<StateType> T; 532 533 // Callback for each possibly returned value. 534 auto CheckReturnValue = [&](Value &RV) -> bool { 535 const IRPosition &RVPos = IRPosition::value(RV, CBContext); 536 const AAType &AA = 537 A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED); 538 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() 539 << " @ " << RVPos << "\n"); 540 const StateType &AAS = AA.getState(); 541 if (T.hasValue()) 542 *T &= AAS; 543 else 544 T = AAS; 545 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T 546 << "\n"); 547 return T->isValidState(); 548 }; 549 550 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) 551 S.indicatePessimisticFixpoint(); 552 else if (T.hasValue()) 553 S ^= *T; 554 } 555 556 namespace { 557 /// Helper class for generic deduction: return value -> returned position. 558 template <typename AAType, typename BaseType, 559 typename StateType = typename BaseType::StateType, 560 bool PropagateCallBaseContext = false> 561 struct AAReturnedFromReturnedValues : public BaseType { 562 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) 563 : BaseType(IRP, A) {} 564 565 /// See AbstractAttribute::updateImpl(...). 566 ChangeStatus updateImpl(Attributor &A) override { 567 StateType S(StateType::getBestState(this->getState())); 568 clampReturnedValueStates<AAType, StateType>( 569 A, *this, S, 570 PropagateCallBaseContext ? this->getCallBaseContext() : nullptr); 571 // TODO: If we know we visited all returned values, thus no are assumed 572 // dead, we can take the known information from the state T. 573 return clampStateAndIndicateChange<StateType>(this->getState(), S); 574 } 575 }; 576 577 /// Clamp the information known at all call sites for a given argument 578 /// (identified by \p QueryingAA) into \p S. 579 template <typename AAType, typename StateType = typename AAType::StateType> 580 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, 581 StateType &S) { 582 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " 583 << QueryingAA << " into " << S << "\n"); 584 585 assert(QueryingAA.getIRPosition().getPositionKind() == 586 IRPosition::IRP_ARGUMENT && 587 "Can only clamp call site argument states for an argument position!"); 588 589 // Use an optional state as there might not be any return values and we want 590 // to join (IntegerState::operator&) the state of all there are. 591 Optional<StateType> T; 592 593 // The argument number which is also the call site argument number. 594 unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo(); 595 596 auto CallSiteCheck = [&](AbstractCallSite ACS) { 597 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 598 // Check if a coresponding argument was found or if it is on not associated 599 // (which can happen for callback calls). 600 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 601 return false; 602 603 const AAType &AA = 604 A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED); 605 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() 606 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n"); 607 const StateType &AAS = AA.getState(); 608 if (T.hasValue()) 609 *T &= AAS; 610 else 611 T = AAS; 612 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T 613 << "\n"); 614 return T->isValidState(); 615 }; 616 617 bool UsedAssumedInformation = false; 618 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, 619 UsedAssumedInformation)) 620 S.indicatePessimisticFixpoint(); 621 else if (T.hasValue()) 622 S ^= *T; 623 } 624 625 /// This function is the bridge between argument position and the call base 626 /// context. 627 template <typename AAType, typename BaseType, 628 typename StateType = typename AAType::StateType> 629 bool getArgumentStateFromCallBaseContext(Attributor &A, 630 BaseType &QueryingAttribute, 631 IRPosition &Pos, StateType &State) { 632 assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && 633 "Expected an 'argument' position !"); 634 const CallBase *CBContext = Pos.getCallBaseContext(); 635 if (!CBContext) 636 return false; 637 638 int ArgNo = Pos.getCallSiteArgNo(); 639 assert(ArgNo >= 0 && "Invalid Arg No!"); 640 641 const auto &AA = A.getAAFor<AAType>( 642 QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo), 643 DepClassTy::REQUIRED); 644 const StateType &CBArgumentState = 645 static_cast<const StateType &>(AA.getState()); 646 647 LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument" 648 << "Position:" << Pos << "CB Arg state:" << CBArgumentState 649 << "\n"); 650 651 // NOTE: If we want to do call site grouping it should happen here. 652 State ^= CBArgumentState; 653 return true; 654 } 655 656 /// Helper class for generic deduction: call site argument -> argument position. 657 template <typename AAType, typename BaseType, 658 typename StateType = typename AAType::StateType, 659 bool BridgeCallBaseContext = false> 660 struct AAArgumentFromCallSiteArguments : public BaseType { 661 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) 662 : BaseType(IRP, A) {} 663 664 /// See AbstractAttribute::updateImpl(...). 665 ChangeStatus updateImpl(Attributor &A) override { 666 StateType S = StateType::getBestState(this->getState()); 667 668 if (BridgeCallBaseContext) { 669 bool Success = 670 getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>( 671 A, *this, this->getIRPosition(), S); 672 if (Success) 673 return clampStateAndIndicateChange<StateType>(this->getState(), S); 674 } 675 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); 676 677 // TODO: If we know we visited all incoming values, thus no are assumed 678 // dead, we can take the known information from the state T. 679 return clampStateAndIndicateChange<StateType>(this->getState(), S); 680 } 681 }; 682 683 /// Helper class for generic replication: function returned -> cs returned. 684 template <typename AAType, typename BaseType, 685 typename StateType = typename BaseType::StateType, 686 bool IntroduceCallBaseContext = false> 687 struct AACallSiteReturnedFromReturned : public BaseType { 688 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) 689 : BaseType(IRP, A) {} 690 691 /// See AbstractAttribute::updateImpl(...). 692 ChangeStatus updateImpl(Attributor &A) override { 693 assert(this->getIRPosition().getPositionKind() == 694 IRPosition::IRP_CALL_SITE_RETURNED && 695 "Can only wrap function returned positions for call site returned " 696 "positions!"); 697 auto &S = this->getState(); 698 699 const Function *AssociatedFunction = 700 this->getIRPosition().getAssociatedFunction(); 701 if (!AssociatedFunction) 702 return S.indicatePessimisticFixpoint(); 703 704 CallBase &CBContext = cast<CallBase>(this->getAnchorValue()); 705 if (IntroduceCallBaseContext) 706 LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:" 707 << CBContext << "\n"); 708 709 IRPosition FnPos = IRPosition::returned( 710 *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr); 711 const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED); 712 return clampStateAndIndicateChange(S, AA.getState()); 713 } 714 }; 715 716 /// Helper function to accumulate uses. 717 template <class AAType, typename StateType = typename AAType::StateType> 718 static void followUsesInContext(AAType &AA, Attributor &A, 719 MustBeExecutedContextExplorer &Explorer, 720 const Instruction *CtxI, 721 SetVector<const Use *> &Uses, 722 StateType &State) { 723 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); 724 for (unsigned u = 0; u < Uses.size(); ++u) { 725 const Use *U = Uses[u]; 726 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { 727 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); 728 if (Found && AA.followUseInMBEC(A, U, UserI, State)) 729 for (const Use &Us : UserI->uses()) 730 Uses.insert(&Us); 731 } 732 } 733 } 734 735 /// Use the must-be-executed-context around \p I to add information into \p S. 736 /// The AAType class is required to have `followUseInMBEC` method with the 737 /// following signature and behaviour: 738 /// 739 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) 740 /// U - Underlying use. 741 /// I - The user of the \p U. 742 /// Returns true if the value should be tracked transitively. 743 /// 744 template <class AAType, typename StateType = typename AAType::StateType> 745 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, 746 Instruction &CtxI) { 747 748 // Container for (transitive) uses of the associated value. 749 SetVector<const Use *> Uses; 750 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) 751 Uses.insert(&U); 752 753 MustBeExecutedContextExplorer &Explorer = 754 A.getInfoCache().getMustBeExecutedContextExplorer(); 755 756 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); 757 758 if (S.isAtFixpoint()) 759 return; 760 761 SmallVector<const BranchInst *, 4> BrInsts; 762 auto Pred = [&](const Instruction *I) { 763 if (const BranchInst *Br = dyn_cast<BranchInst>(I)) 764 if (Br->isConditional()) 765 BrInsts.push_back(Br); 766 return true; 767 }; 768 769 // Here, accumulate conditional branch instructions in the context. We 770 // explore the child paths and collect the known states. The disjunction of 771 // those states can be merged to its own state. Let ParentState_i be a state 772 // to indicate the known information for an i-th branch instruction in the 773 // context. ChildStates are created for its successors respectively. 774 // 775 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} 776 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} 777 // ... 778 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} 779 // 780 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m 781 // 782 // FIXME: Currently, recursive branches are not handled. For example, we 783 // can't deduce that ptr must be dereferenced in below function. 784 // 785 // void f(int a, int c, int *ptr) { 786 // if(a) 787 // if (b) { 788 // *ptr = 0; 789 // } else { 790 // *ptr = 1; 791 // } 792 // else { 793 // if (b) { 794 // *ptr = 0; 795 // } else { 796 // *ptr = 1; 797 // } 798 // } 799 // } 800 801 Explorer.checkForAllContext(&CtxI, Pred); 802 for (const BranchInst *Br : BrInsts) { 803 StateType ParentState; 804 805 // The known state of the parent state is a conjunction of children's 806 // known states so it is initialized with a best state. 807 ParentState.indicateOptimisticFixpoint(); 808 809 for (const BasicBlock *BB : Br->successors()) { 810 StateType ChildState; 811 812 size_t BeforeSize = Uses.size(); 813 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); 814 815 // Erase uses which only appear in the child. 816 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) 817 It = Uses.erase(It); 818 819 ParentState &= ChildState; 820 } 821 822 // Use only known state. 823 S += ParentState; 824 } 825 } 826 } // namespace 827 828 /// ------------------------ PointerInfo --------------------------------------- 829 830 namespace llvm { 831 namespace AA { 832 namespace PointerInfo { 833 834 struct State; 835 836 } // namespace PointerInfo 837 } // namespace AA 838 839 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage. 840 template <> 841 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> { 842 using Access = AAPointerInfo::Access; 843 static inline Access getEmptyKey(); 844 static inline Access getTombstoneKey(); 845 static unsigned getHashValue(const Access &A); 846 static bool isEqual(const Access &LHS, const Access &RHS); 847 }; 848 849 /// Helper that allows OffsetAndSize as a key in a DenseMap. 850 template <> 851 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize> 852 : DenseMapInfo<std::pair<int64_t, int64_t>> {}; 853 854 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign 855 /// but the instruction 856 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> { 857 using Base = DenseMapInfo<Instruction *>; 858 using Access = AAPointerInfo::Access; 859 static inline Access getEmptyKey(); 860 static inline Access getTombstoneKey(); 861 static unsigned getHashValue(const Access &A); 862 static bool isEqual(const Access &LHS, const Access &RHS); 863 }; 864 865 } // namespace llvm 866 867 /// Implementation of the DenseMapInfo. 868 /// 869 ///{ 870 inline llvm::AccessAsInstructionInfo::Access 871 llvm::AccessAsInstructionInfo::getEmptyKey() { 872 return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr); 873 } 874 inline llvm::AccessAsInstructionInfo::Access 875 llvm::AccessAsInstructionInfo::getTombstoneKey() { 876 return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ, 877 nullptr); 878 } 879 unsigned llvm::AccessAsInstructionInfo::getHashValue( 880 const llvm::AccessAsInstructionInfo::Access &A) { 881 return Base::getHashValue(A.getRemoteInst()); 882 } 883 bool llvm::AccessAsInstructionInfo::isEqual( 884 const llvm::AccessAsInstructionInfo::Access &LHS, 885 const llvm::AccessAsInstructionInfo::Access &RHS) { 886 return LHS.getRemoteInst() == RHS.getRemoteInst(); 887 } 888 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access 889 llvm::DenseMapInfo<AAPointerInfo::Access>::getEmptyKey() { 890 return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ, 891 nullptr); 892 } 893 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access 894 llvm::DenseMapInfo<AAPointerInfo::Access>::getTombstoneKey() { 895 return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE, 896 nullptr); 897 } 898 899 unsigned llvm::DenseMapInfo<AAPointerInfo::Access>::getHashValue( 900 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &A) { 901 return detail::combineHashValue( 902 DenseMapInfo<Instruction *>::getHashValue(A.getRemoteInst()), 903 (A.isWrittenValueYetUndetermined() 904 ? ~0 905 : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) + 906 A.getKind(); 907 } 908 909 bool llvm::DenseMapInfo<AAPointerInfo::Access>::isEqual( 910 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &LHS, 911 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &RHS) { 912 return LHS == RHS; 913 } 914 ///} 915 916 /// A type to track pointer/struct usage and accesses for AAPointerInfo. 917 struct AA::PointerInfo::State : public AbstractState { 918 919 /// Return the best possible representable state. 920 static State getBestState(const State &SIS) { return State(); } 921 922 /// Return the worst possible representable state. 923 static State getWorstState(const State &SIS) { 924 State R; 925 R.indicatePessimisticFixpoint(); 926 return R; 927 } 928 929 State() = default; 930 State(const State &SIS) : AccessBins(SIS.AccessBins) {} 931 State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {} 932 933 const State &getAssumed() const { return *this; } 934 935 /// See AbstractState::isValidState(). 936 bool isValidState() const override { return BS.isValidState(); } 937 938 /// See AbstractState::isAtFixpoint(). 939 bool isAtFixpoint() const override { return BS.isAtFixpoint(); } 940 941 /// See AbstractState::indicateOptimisticFixpoint(). 942 ChangeStatus indicateOptimisticFixpoint() override { 943 BS.indicateOptimisticFixpoint(); 944 return ChangeStatus::UNCHANGED; 945 } 946 947 /// See AbstractState::indicatePessimisticFixpoint(). 948 ChangeStatus indicatePessimisticFixpoint() override { 949 BS.indicatePessimisticFixpoint(); 950 return ChangeStatus::CHANGED; 951 } 952 953 State &operator=(const State &R) { 954 if (this == &R) 955 return *this; 956 BS = R.BS; 957 AccessBins = R.AccessBins; 958 return *this; 959 } 960 961 State &operator=(State &&R) { 962 if (this == &R) 963 return *this; 964 std::swap(BS, R.BS); 965 std::swap(AccessBins, R.AccessBins); 966 return *this; 967 } 968 969 bool operator==(const State &R) const { 970 if (BS != R.BS) 971 return false; 972 if (AccessBins.size() != R.AccessBins.size()) 973 return false; 974 auto It = begin(), RIt = R.begin(), E = end(); 975 while (It != E) { 976 if (It->getFirst() != RIt->getFirst()) 977 return false; 978 auto &Accs = It->getSecond(); 979 auto &RAccs = RIt->getSecond(); 980 if (Accs.size() != RAccs.size()) 981 return false; 982 auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end(); 983 while (AccIt != AccE) { 984 if (*AccIt != *RAccIt) 985 return false; 986 ++AccIt; 987 ++RAccIt; 988 } 989 ++It; 990 ++RIt; 991 } 992 return true; 993 } 994 bool operator!=(const State &R) const { return !(*this == R); } 995 996 /// We store accesses in a set with the instruction as key. 997 using Accesses = DenseSet<AAPointerInfo::Access, AccessAsInstructionInfo>; 998 999 /// We store all accesses in bins denoted by their offset and size. 1000 using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses>; 1001 1002 AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); } 1003 AccessBinsTy::const_iterator end() const { return AccessBins.end(); } 1004 1005 protected: 1006 /// The bins with all the accesses for the associated pointer. 1007 DenseMap<AAPointerInfo::OffsetAndSize, Accesses> AccessBins; 1008 1009 /// Add a new access to the state at offset \p Offset and with size \p Size. 1010 /// The access is associated with \p I, writes \p Content (if anything), and 1011 /// is of kind \p Kind. 1012 /// \Returns CHANGED, if the state changed, UNCHANGED otherwise. 1013 ChangeStatus addAccess(int64_t Offset, int64_t Size, Instruction &I, 1014 Optional<Value *> Content, 1015 AAPointerInfo::AccessKind Kind, Type *Ty, 1016 Instruction *RemoteI = nullptr, 1017 Accesses *BinPtr = nullptr) { 1018 AAPointerInfo::OffsetAndSize Key{Offset, Size}; 1019 Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key]; 1020 AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty); 1021 // Check if we have an access for this instruction in this bin, if not, 1022 // simply add it. 1023 auto It = Bin.find(Acc); 1024 if (It == Bin.end()) { 1025 Bin.insert(Acc); 1026 return ChangeStatus::CHANGED; 1027 } 1028 // If the existing access is the same as then new one, nothing changed. 1029 AAPointerInfo::Access Before = *It; 1030 // The new one will be combined with the existing one. 1031 *It &= Acc; 1032 return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; 1033 } 1034 1035 /// See AAPointerInfo::forallInterferingAccesses. 1036 bool forallInterferingAccesses( 1037 AAPointerInfo::OffsetAndSize OAS, 1038 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { 1039 if (!isValidState()) 1040 return false; 1041 1042 for (auto &It : AccessBins) { 1043 AAPointerInfo::OffsetAndSize ItOAS = It.getFirst(); 1044 if (!OAS.mayOverlap(ItOAS)) 1045 continue; 1046 bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown(); 1047 for (auto &Access : It.getSecond()) 1048 if (!CB(Access, IsExact)) 1049 return false; 1050 } 1051 return true; 1052 } 1053 1054 /// See AAPointerInfo::forallInterferingAccesses. 1055 bool forallInterferingAccesses( 1056 Instruction &I, 1057 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { 1058 if (!isValidState()) 1059 return false; 1060 1061 // First find the offset and size of I. 1062 AAPointerInfo::OffsetAndSize OAS(-1, -1); 1063 for (auto &It : AccessBins) { 1064 for (auto &Access : It.getSecond()) { 1065 if (Access.getRemoteInst() == &I) { 1066 OAS = It.getFirst(); 1067 break; 1068 } 1069 } 1070 if (OAS.getSize() != -1) 1071 break; 1072 } 1073 // No access for I was found, we are done. 1074 if (OAS.getSize() == -1) 1075 return true; 1076 1077 // Now that we have an offset and size, find all overlapping ones and use 1078 // the callback on the accesses. 1079 return forallInterferingAccesses(OAS, CB); 1080 } 1081 1082 private: 1083 /// State to track fixpoint and validity. 1084 BooleanState BS; 1085 }; 1086 1087 namespace { 1088 struct AAPointerInfoImpl 1089 : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> { 1090 using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>; 1091 AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {} 1092 1093 /// See AbstractAttribute::initialize(...). 1094 void initialize(Attributor &A) override { AAPointerInfo::initialize(A); } 1095 1096 /// See AbstractAttribute::getAsStr(). 1097 const std::string getAsStr() const override { 1098 return std::string("PointerInfo ") + 1099 (isValidState() ? (std::string("#") + 1100 std::to_string(AccessBins.size()) + " bins") 1101 : "<invalid>"); 1102 } 1103 1104 /// See AbstractAttribute::manifest(...). 1105 ChangeStatus manifest(Attributor &A) override { 1106 return AAPointerInfo::manifest(A); 1107 } 1108 1109 bool forallInterferingAccesses( 1110 OffsetAndSize OAS, 1111 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) 1112 const override { 1113 return State::forallInterferingAccesses(OAS, CB); 1114 } 1115 bool forallInterferingAccesses( 1116 Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I, 1117 function_ref<bool(const Access &, bool)> UserCB) const override { 1118 SmallPtrSet<const Access *, 8> DominatingWrites; 1119 SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses; 1120 1121 Function &Scope = *I.getFunction(); 1122 const auto &NoSyncAA = A.getAAFor<AANoSync>( 1123 QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL); 1124 const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>( 1125 IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL); 1126 const bool NoSync = NoSyncAA.isAssumedNoSync(); 1127 1128 // Helper to determine if we need to consider threading, which we cannot 1129 // right now. However, if the function is (assumed) nosync or the thread 1130 // executing all instructions is the main thread only we can ignore 1131 // threading. 1132 auto CanIgnoreThreading = [&](const Instruction &I) -> bool { 1133 if (NoSync) 1134 return true; 1135 if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I)) 1136 return true; 1137 return false; 1138 }; 1139 1140 // Helper to determine if the access is executed by the same thread as the 1141 // load, for now it is sufficient to avoid any potential threading effects 1142 // as we cannot deal with them anyway. 1143 auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool { 1144 return CanIgnoreThreading(*Acc.getLocalInst()); 1145 }; 1146 1147 // TODO: Use inter-procedural reachability and dominance. 1148 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1149 QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL); 1150 1151 const bool FindInterferingWrites = I.mayReadFromMemory(); 1152 const bool FindInterferingReads = I.mayWriteToMemory(); 1153 const bool UseDominanceReasoning = FindInterferingWrites; 1154 const bool CanUseCFGResoning = CanIgnoreThreading(I); 1155 InformationCache &InfoCache = A.getInfoCache(); 1156 const DominatorTree *DT = 1157 NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning 1158 ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( 1159 Scope) 1160 : nullptr; 1161 1162 enum GPUAddressSpace : unsigned { 1163 Generic = 0, 1164 Global = 1, 1165 Shared = 3, 1166 Constant = 4, 1167 Local = 5, 1168 }; 1169 1170 // Helper to check if a value has "kernel lifetime", that is it will not 1171 // outlive a GPU kernel. This is true for shared, constant, and local 1172 // globals on AMD and NVIDIA GPUs. 1173 auto HasKernelLifetime = [&](Value *V, Module &M) { 1174 Triple T(M.getTargetTriple()); 1175 if (!(T.isAMDGPU() || T.isNVPTX())) 1176 return false; 1177 switch (V->getType()->getPointerAddressSpace()) { 1178 case GPUAddressSpace::Shared: 1179 case GPUAddressSpace::Constant: 1180 case GPUAddressSpace::Local: 1181 return true; 1182 default: 1183 return false; 1184 }; 1185 }; 1186 1187 // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query 1188 // to determine if we should look at reachability from the callee. For 1189 // certain pointers we know the lifetime and we do not have to step into the 1190 // callee to determine reachability as the pointer would be dead in the 1191 // callee. See the conditional initialization below. 1192 std::function<bool(const Function &)> IsLiveInCalleeCB; 1193 1194 if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) { 1195 // If the alloca containing function is not recursive the alloca 1196 // must be dead in the callee. 1197 const Function *AIFn = AI->getFunction(); 1198 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1199 *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL); 1200 if (NoRecurseAA.isAssumedNoRecurse()) { 1201 IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; }; 1202 } 1203 } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) { 1204 // If the global has kernel lifetime we can stop if we reach a kernel 1205 // as it is "dead" in the (unknown) callees. 1206 if (HasKernelLifetime(GV, *GV->getParent())) 1207 IsLiveInCalleeCB = [](const Function &Fn) { 1208 return !Fn.hasFnAttribute("kernel"); 1209 }; 1210 } 1211 1212 auto AccessCB = [&](const Access &Acc, bool Exact) { 1213 if ((!FindInterferingWrites || !Acc.isWrite()) && 1214 (!FindInterferingReads || !Acc.isRead())) 1215 return true; 1216 1217 // For now we only filter accesses based on CFG reasoning which does not 1218 // work yet if we have threading effects, or the access is complicated. 1219 if (CanUseCFGResoning) { 1220 if ((!Acc.isWrite() || 1221 !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA, 1222 IsLiveInCalleeCB)) && 1223 (!Acc.isRead() || 1224 !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA, 1225 IsLiveInCalleeCB))) 1226 return true; 1227 if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) && 1228 IsSameThreadAsLoad(Acc)) { 1229 if (DT->dominates(Acc.getLocalInst(), &I)) 1230 DominatingWrites.insert(&Acc); 1231 } 1232 } 1233 1234 InterferingAccesses.push_back({&Acc, Exact}); 1235 return true; 1236 }; 1237 if (!State::forallInterferingAccesses(I, AccessCB)) 1238 return false; 1239 1240 // If we cannot use CFG reasoning we only filter the non-write accesses 1241 // and are done here. 1242 if (!CanUseCFGResoning) { 1243 for (auto &It : InterferingAccesses) 1244 if (!UserCB(*It.first, It.second)) 1245 return false; 1246 return true; 1247 } 1248 1249 // Helper to determine if we can skip a specific write access. This is in 1250 // the worst case quadratic as we are looking for another write that will 1251 // hide the effect of this one. 1252 auto CanSkipAccess = [&](const Access &Acc, bool Exact) { 1253 if (!IsSameThreadAsLoad(Acc)) 1254 return false; 1255 if (!DominatingWrites.count(&Acc)) 1256 return false; 1257 for (const Access *DomAcc : DominatingWrites) { 1258 assert(Acc.getLocalInst()->getFunction() == 1259 DomAcc->getLocalInst()->getFunction() && 1260 "Expected dominating writes to be in the same function!"); 1261 1262 if (DomAcc != &Acc && 1263 DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) { 1264 return true; 1265 } 1266 } 1267 return false; 1268 }; 1269 1270 // Run the user callback on all accesses we cannot skip and return if that 1271 // succeeded for all or not. 1272 unsigned NumInterferingAccesses = InterferingAccesses.size(); 1273 for (auto &It : InterferingAccesses) { 1274 if (!DT || NumInterferingAccesses > MaxInterferingAccesses || 1275 !CanSkipAccess(*It.first, It.second)) { 1276 if (!UserCB(*It.first, It.second)) 1277 return false; 1278 } 1279 } 1280 return true; 1281 } 1282 1283 ChangeStatus translateAndAddCalleeState(Attributor &A, 1284 const AAPointerInfo &CalleeAA, 1285 int64_t CallArgOffset, CallBase &CB) { 1286 using namespace AA::PointerInfo; 1287 if (!CalleeAA.getState().isValidState() || !isValidState()) 1288 return indicatePessimisticFixpoint(); 1289 1290 const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA); 1291 bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr(); 1292 1293 // Combine the accesses bin by bin. 1294 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1295 for (auto &It : CalleeImplAA.getState()) { 1296 OffsetAndSize OAS = OffsetAndSize::getUnknown(); 1297 if (CallArgOffset != OffsetAndSize::Unknown) 1298 OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset, 1299 It.first.getSize()); 1300 Accesses &Bin = AccessBins[OAS]; 1301 for (const AAPointerInfo::Access &RAcc : It.second) { 1302 if (IsByval && !RAcc.isRead()) 1303 continue; 1304 bool UsedAssumedInformation = false; 1305 Optional<Value *> Content = A.translateArgumentToCallSiteContent( 1306 RAcc.getContent(), CB, *this, UsedAssumedInformation); 1307 AccessKind AK = 1308 AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ 1309 : AccessKind::AK_READ_WRITE)); 1310 Changed = 1311 Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK, 1312 RAcc.getType(), RAcc.getRemoteInst(), &Bin); 1313 } 1314 } 1315 return Changed; 1316 } 1317 1318 /// Statistic tracking for all AAPointerInfo implementations. 1319 /// See AbstractAttribute::trackStatistics(). 1320 void trackPointerInfoStatistics(const IRPosition &IRP) const {} 1321 }; 1322 1323 struct AAPointerInfoFloating : public AAPointerInfoImpl { 1324 using AccessKind = AAPointerInfo::AccessKind; 1325 AAPointerInfoFloating(const IRPosition &IRP, Attributor &A) 1326 : AAPointerInfoImpl(IRP, A) {} 1327 1328 /// See AbstractAttribute::initialize(...). 1329 void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); } 1330 1331 /// Deal with an access and signal if it was handled successfully. 1332 bool handleAccess(Attributor &A, Instruction &I, Value &Ptr, 1333 Optional<Value *> Content, AccessKind Kind, int64_t Offset, 1334 ChangeStatus &Changed, Type *Ty, 1335 int64_t Size = OffsetAndSize::Unknown) { 1336 using namespace AA::PointerInfo; 1337 // No need to find a size if one is given or the offset is unknown. 1338 if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown && 1339 Ty) { 1340 const DataLayout &DL = A.getDataLayout(); 1341 TypeSize AccessSize = DL.getTypeStoreSize(Ty); 1342 if (!AccessSize.isScalable()) 1343 Size = AccessSize.getFixedSize(); 1344 } 1345 Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty); 1346 return true; 1347 }; 1348 1349 /// Helper struct, will support ranges eventually. 1350 struct OffsetInfo { 1351 int64_t Offset = OffsetAndSize::Unknown; 1352 1353 bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; } 1354 }; 1355 1356 /// See AbstractAttribute::updateImpl(...). 1357 ChangeStatus updateImpl(Attributor &A) override { 1358 using namespace AA::PointerInfo; 1359 State S = getState(); 1360 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1361 Value &AssociatedValue = getAssociatedValue(); 1362 1363 const DataLayout &DL = A.getDataLayout(); 1364 DenseMap<Value *, OffsetInfo> OffsetInfoMap; 1365 OffsetInfoMap[&AssociatedValue] = OffsetInfo{0}; 1366 1367 auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI, 1368 bool &Follow) { 1369 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1370 UsrOI = PtrOI; 1371 Follow = true; 1372 return true; 1373 }; 1374 1375 const auto *TLI = getAnchorScope() 1376 ? A.getInfoCache().getTargetLibraryInfoForFunction( 1377 *getAnchorScope()) 1378 : nullptr; 1379 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 1380 Value *CurPtr = U.get(); 1381 User *Usr = U.getUser(); 1382 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " 1383 << *Usr << "\n"); 1384 assert(OffsetInfoMap.count(CurPtr) && 1385 "The current pointer offset should have been seeded!"); 1386 1387 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) { 1388 if (CE->isCast()) 1389 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); 1390 if (CE->isCompare()) 1391 return true; 1392 if (!isa<GEPOperator>(CE)) { 1393 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE 1394 << "\n"); 1395 return false; 1396 } 1397 } 1398 if (auto *GEP = dyn_cast<GEPOperator>(Usr)) { 1399 // Note the order here, the Usr access might change the map, CurPtr is 1400 // already in it though. 1401 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1402 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1403 UsrOI = PtrOI; 1404 1405 // TODO: Use range information. 1406 if (PtrOI.Offset == OffsetAndSize::Unknown || 1407 !GEP->hasAllConstantIndices()) { 1408 UsrOI.Offset = OffsetAndSize::Unknown; 1409 Follow = true; 1410 return true; 1411 } 1412 1413 SmallVector<Value *, 8> Indices; 1414 for (Use &Idx : GEP->indices()) { 1415 if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) { 1416 Indices.push_back(CIdx); 1417 continue; 1418 } 1419 1420 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP 1421 << " : " << *Idx << "\n"); 1422 return false; 1423 } 1424 UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType( 1425 GEP->getSourceElementType(), Indices); 1426 Follow = true; 1427 return true; 1428 } 1429 if (isa<CastInst>(Usr) || isa<SelectInst>(Usr)) 1430 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); 1431 1432 // For PHIs we need to take care of the recurrence explicitly as the value 1433 // might change while we iterate through a loop. For now, we give up if 1434 // the PHI is not invariant. 1435 if (isa<PHINode>(Usr)) { 1436 // Note the order here, the Usr access might change the map, CurPtr is 1437 // already in it though. 1438 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1439 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1440 // Check if the PHI is invariant (so far). 1441 if (UsrOI == PtrOI) 1442 return true; 1443 1444 // Check if the PHI operand has already an unknown offset as we can't 1445 // improve on that anymore. 1446 if (PtrOI.Offset == OffsetAndSize::Unknown) { 1447 UsrOI = PtrOI; 1448 Follow = true; 1449 return true; 1450 } 1451 1452 // Check if the PHI operand is not dependent on the PHI itself. 1453 // TODO: This is not great as we look at the pointer type. However, it 1454 // is unclear where the Offset size comes from with typeless pointers. 1455 APInt Offset( 1456 DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()), 1457 0); 1458 if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets( 1459 DL, Offset, /* AllowNonInbounds */ true)) { 1460 if (Offset != PtrOI.Offset) { 1461 LLVM_DEBUG(dbgs() 1462 << "[AAPointerInfo] PHI operand pointer offset mismatch " 1463 << *CurPtr << " in " << *Usr << "\n"); 1464 return false; 1465 } 1466 return HandlePassthroughUser(Usr, PtrOI, Follow); 1467 } 1468 1469 // TODO: Approximate in case we know the direction of the recurrence. 1470 LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex " 1471 << *CurPtr << " in " << *Usr << "\n"); 1472 UsrOI = PtrOI; 1473 UsrOI.Offset = OffsetAndSize::Unknown; 1474 Follow = true; 1475 return true; 1476 } 1477 1478 if (auto *LoadI = dyn_cast<LoadInst>(Usr)) 1479 return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr, 1480 AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset, 1481 Changed, LoadI->getType()); 1482 if (auto *StoreI = dyn_cast<StoreInst>(Usr)) { 1483 if (StoreI->getValueOperand() == CurPtr) { 1484 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store " 1485 << *StoreI << "\n"); 1486 return false; 1487 } 1488 bool UsedAssumedInformation = false; 1489 Optional<Value *> Content = A.getAssumedSimplified( 1490 *StoreI->getValueOperand(), *this, UsedAssumedInformation); 1491 return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE, 1492 OffsetInfoMap[CurPtr].Offset, Changed, 1493 StoreI->getValueOperand()->getType()); 1494 } 1495 if (auto *CB = dyn_cast<CallBase>(Usr)) { 1496 if (CB->isLifetimeStartOrEnd()) 1497 return true; 1498 if (TLI && isFreeCall(CB, TLI)) 1499 return true; 1500 if (CB->isArgOperand(&U)) { 1501 unsigned ArgNo = CB->getArgOperandNo(&U); 1502 const auto &CSArgPI = A.getAAFor<AAPointerInfo>( 1503 *this, IRPosition::callsite_argument(*CB, ArgNo), 1504 DepClassTy::REQUIRED); 1505 Changed = translateAndAddCalleeState( 1506 A, CSArgPI, OffsetInfoMap[CurPtr].Offset, *CB) | 1507 Changed; 1508 return true; 1509 } 1510 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB 1511 << "\n"); 1512 // TODO: Allow some call uses 1513 return false; 1514 } 1515 1516 LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n"); 1517 return false; 1518 }; 1519 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { 1520 if (OffsetInfoMap.count(NewU)) 1521 return OffsetInfoMap[NewU] == OffsetInfoMap[OldU]; 1522 OffsetInfoMap[NewU] = OffsetInfoMap[OldU]; 1523 return true; 1524 }; 1525 if (!A.checkForAllUses(UsePred, *this, AssociatedValue, 1526 /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL, 1527 EquivalentUseCB)) 1528 return indicatePessimisticFixpoint(); 1529 1530 LLVM_DEBUG({ 1531 dbgs() << "Accesses by bin after update:\n"; 1532 for (auto &It : AccessBins) { 1533 dbgs() << "[" << It.first.getOffset() << "-" 1534 << It.first.getOffset() + It.first.getSize() 1535 << "] : " << It.getSecond().size() << "\n"; 1536 for (auto &Acc : It.getSecond()) { 1537 dbgs() << " - " << Acc.getKind() << " - " << *Acc.getLocalInst() 1538 << "\n"; 1539 if (Acc.getLocalInst() != Acc.getRemoteInst()) 1540 dbgs() << " --> " 1541 << *Acc.getRemoteInst() << "\n"; 1542 if (!Acc.isWrittenValueYetUndetermined()) 1543 dbgs() << " - " << Acc.getWrittenValue() << "\n"; 1544 } 1545 } 1546 }); 1547 1548 return Changed; 1549 } 1550 1551 /// See AbstractAttribute::trackStatistics() 1552 void trackStatistics() const override { 1553 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1554 } 1555 }; 1556 1557 struct AAPointerInfoReturned final : AAPointerInfoImpl { 1558 AAPointerInfoReturned(const IRPosition &IRP, Attributor &A) 1559 : AAPointerInfoImpl(IRP, A) {} 1560 1561 /// See AbstractAttribute::updateImpl(...). 1562 ChangeStatus updateImpl(Attributor &A) override { 1563 return indicatePessimisticFixpoint(); 1564 } 1565 1566 /// See AbstractAttribute::trackStatistics() 1567 void trackStatistics() const override { 1568 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1569 } 1570 }; 1571 1572 struct AAPointerInfoArgument final : AAPointerInfoFloating { 1573 AAPointerInfoArgument(const IRPosition &IRP, Attributor &A) 1574 : AAPointerInfoFloating(IRP, A) {} 1575 1576 /// See AbstractAttribute::initialize(...). 1577 void initialize(Attributor &A) override { 1578 AAPointerInfoFloating::initialize(A); 1579 if (getAnchorScope()->isDeclaration()) 1580 indicatePessimisticFixpoint(); 1581 } 1582 1583 /// See AbstractAttribute::trackStatistics() 1584 void trackStatistics() const override { 1585 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1586 } 1587 }; 1588 1589 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating { 1590 AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) 1591 : AAPointerInfoFloating(IRP, A) {} 1592 1593 /// See AbstractAttribute::updateImpl(...). 1594 ChangeStatus updateImpl(Attributor &A) override { 1595 using namespace AA::PointerInfo; 1596 // We handle memory intrinsics explicitly, at least the first (= 1597 // destination) and second (=source) arguments as we know how they are 1598 // accessed. 1599 if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) { 1600 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 1601 int64_t LengthVal = OffsetAndSize::Unknown; 1602 if (Length) 1603 LengthVal = Length->getSExtValue(); 1604 Value &Ptr = getAssociatedValue(); 1605 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 1606 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1607 if (ArgNo == 0) { 1608 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed, 1609 nullptr, LengthVal); 1610 } else if (ArgNo == 1) { 1611 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed, 1612 nullptr, LengthVal); 1613 } else { 1614 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic " 1615 << *MI << "\n"); 1616 return indicatePessimisticFixpoint(); 1617 } 1618 return Changed; 1619 } 1620 1621 // TODO: Once we have call site specific value information we can provide 1622 // call site specific liveness information and then it makes 1623 // sense to specialize attributes for call sites arguments instead of 1624 // redirecting requests to the callee argument. 1625 Argument *Arg = getAssociatedArgument(); 1626 if (!Arg) 1627 return indicatePessimisticFixpoint(); 1628 const IRPosition &ArgPos = IRPosition::argument(*Arg); 1629 auto &ArgAA = 1630 A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED); 1631 return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI())); 1632 } 1633 1634 /// See AbstractAttribute::trackStatistics() 1635 void trackStatistics() const override { 1636 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1637 } 1638 }; 1639 1640 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating { 1641 AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) 1642 : AAPointerInfoFloating(IRP, A) {} 1643 1644 /// See AbstractAttribute::trackStatistics() 1645 void trackStatistics() const override { 1646 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1647 } 1648 }; 1649 } // namespace 1650 1651 /// -----------------------NoUnwind Function Attribute-------------------------- 1652 1653 namespace { 1654 struct AANoUnwindImpl : AANoUnwind { 1655 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} 1656 1657 const std::string getAsStr() const override { 1658 return getAssumed() ? "nounwind" : "may-unwind"; 1659 } 1660 1661 /// See AbstractAttribute::updateImpl(...). 1662 ChangeStatus updateImpl(Attributor &A) override { 1663 auto Opcodes = { 1664 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 1665 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 1666 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 1667 1668 auto CheckForNoUnwind = [&](Instruction &I) { 1669 if (!I.mayThrow()) 1670 return true; 1671 1672 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1673 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( 1674 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 1675 return NoUnwindAA.isAssumedNoUnwind(); 1676 } 1677 return false; 1678 }; 1679 1680 bool UsedAssumedInformation = false; 1681 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes, 1682 UsedAssumedInformation)) 1683 return indicatePessimisticFixpoint(); 1684 1685 return ChangeStatus::UNCHANGED; 1686 } 1687 }; 1688 1689 struct AANoUnwindFunction final : public AANoUnwindImpl { 1690 AANoUnwindFunction(const IRPosition &IRP, Attributor &A) 1691 : AANoUnwindImpl(IRP, A) {} 1692 1693 /// See AbstractAttribute::trackStatistics() 1694 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } 1695 }; 1696 1697 /// NoUnwind attribute deduction for a call sites. 1698 struct AANoUnwindCallSite final : AANoUnwindImpl { 1699 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) 1700 : AANoUnwindImpl(IRP, A) {} 1701 1702 /// See AbstractAttribute::initialize(...). 1703 void initialize(Attributor &A) override { 1704 AANoUnwindImpl::initialize(A); 1705 Function *F = getAssociatedFunction(); 1706 if (!F || F->isDeclaration()) 1707 indicatePessimisticFixpoint(); 1708 } 1709 1710 /// See AbstractAttribute::updateImpl(...). 1711 ChangeStatus updateImpl(Attributor &A) override { 1712 // TODO: Once we have call site specific value information we can provide 1713 // call site specific liveness information and then it makes 1714 // sense to specialize attributes for call sites arguments instead of 1715 // redirecting requests to the callee argument. 1716 Function *F = getAssociatedFunction(); 1717 const IRPosition &FnPos = IRPosition::function(*F); 1718 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED); 1719 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1720 } 1721 1722 /// See AbstractAttribute::trackStatistics() 1723 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } 1724 }; 1725 } // namespace 1726 1727 /// --------------------- Function Return Values ------------------------------- 1728 1729 namespace { 1730 /// "Attribute" that collects all potential returned values and the return 1731 /// instructions that they arise from. 1732 /// 1733 /// If there is a unique returned value R, the manifest method will: 1734 /// - mark R with the "returned" attribute, if R is an argument. 1735 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { 1736 1737 /// Mapping of values potentially returned by the associated function to the 1738 /// return instructions that might return them. 1739 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; 1740 1741 /// State flags 1742 /// 1743 ///{ 1744 bool IsFixed = false; 1745 bool IsValidState = true; 1746 ///} 1747 1748 public: 1749 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) 1750 : AAReturnedValues(IRP, A) {} 1751 1752 /// See AbstractAttribute::initialize(...). 1753 void initialize(Attributor &A) override { 1754 // Reset the state. 1755 IsFixed = false; 1756 IsValidState = true; 1757 ReturnedValues.clear(); 1758 1759 Function *F = getAssociatedFunction(); 1760 if (!F || F->isDeclaration()) { 1761 indicatePessimisticFixpoint(); 1762 return; 1763 } 1764 assert(!F->getReturnType()->isVoidTy() && 1765 "Did not expect a void return type!"); 1766 1767 // The map from instruction opcodes to those instructions in the function. 1768 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); 1769 1770 // Look through all arguments, if one is marked as returned we are done. 1771 for (Argument &Arg : F->args()) { 1772 if (Arg.hasReturnedAttr()) { 1773 auto &ReturnInstSet = ReturnedValues[&Arg]; 1774 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) 1775 for (Instruction *RI : *Insts) 1776 ReturnInstSet.insert(cast<ReturnInst>(RI)); 1777 1778 indicateOptimisticFixpoint(); 1779 return; 1780 } 1781 } 1782 1783 if (!A.isFunctionIPOAmendable(*F)) 1784 indicatePessimisticFixpoint(); 1785 } 1786 1787 /// See AbstractAttribute::manifest(...). 1788 ChangeStatus manifest(Attributor &A) override; 1789 1790 /// See AbstractAttribute::getState(...). 1791 AbstractState &getState() override { return *this; } 1792 1793 /// See AbstractAttribute::getState(...). 1794 const AbstractState &getState() const override { return *this; } 1795 1796 /// See AbstractAttribute::updateImpl(Attributor &A). 1797 ChangeStatus updateImpl(Attributor &A) override; 1798 1799 llvm::iterator_range<iterator> returned_values() override { 1800 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1801 } 1802 1803 llvm::iterator_range<const_iterator> returned_values() const override { 1804 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1805 } 1806 1807 /// Return the number of potential return values, -1 if unknown. 1808 size_t getNumReturnValues() const override { 1809 return isValidState() ? ReturnedValues.size() : -1; 1810 } 1811 1812 /// Return an assumed unique return value if a single candidate is found. If 1813 /// there cannot be one, return a nullptr. If it is not clear yet, return the 1814 /// Optional::NoneType. 1815 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 1816 1817 /// See AbstractState::checkForAllReturnedValues(...). 1818 bool checkForAllReturnedValuesAndReturnInsts( 1819 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1820 const override; 1821 1822 /// Pretty print the attribute similar to the IR representation. 1823 const std::string getAsStr() const override; 1824 1825 /// See AbstractState::isAtFixpoint(). 1826 bool isAtFixpoint() const override { return IsFixed; } 1827 1828 /// See AbstractState::isValidState(). 1829 bool isValidState() const override { return IsValidState; } 1830 1831 /// See AbstractState::indicateOptimisticFixpoint(...). 1832 ChangeStatus indicateOptimisticFixpoint() override { 1833 IsFixed = true; 1834 return ChangeStatus::UNCHANGED; 1835 } 1836 1837 ChangeStatus indicatePessimisticFixpoint() override { 1838 IsFixed = true; 1839 IsValidState = false; 1840 return ChangeStatus::CHANGED; 1841 } 1842 }; 1843 1844 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 1845 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1846 1847 // Bookkeeping. 1848 assert(isValidState()); 1849 STATS_DECLTRACK(KnownReturnValues, FunctionReturn, 1850 "Number of function with known return values"); 1851 1852 // Check if we have an assumed unique return value that we could manifest. 1853 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); 1854 1855 if (!UniqueRV.hasValue() || !UniqueRV.getValue()) 1856 return Changed; 1857 1858 // Bookkeeping. 1859 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, 1860 "Number of function with unique return"); 1861 // If the assumed unique return value is an argument, annotate it. 1862 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { 1863 if (UniqueRVArg->getType()->canLosslesslyBitCastTo( 1864 getAssociatedFunction()->getReturnType())) { 1865 getIRPosition() = IRPosition::argument(*UniqueRVArg); 1866 Changed = IRAttribute::manifest(A); 1867 } 1868 } 1869 return Changed; 1870 } 1871 1872 const std::string AAReturnedValuesImpl::getAsStr() const { 1873 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 1874 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")"; 1875 } 1876 1877 Optional<Value *> 1878 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { 1879 // If checkForAllReturnedValues provides a unique value, ignoring potential 1880 // undef values that can also be present, it is assumed to be the actual 1881 // return value and forwarded to the caller of this method. If there are 1882 // multiple, a nullptr is returned indicating there cannot be a unique 1883 // returned value. 1884 Optional<Value *> UniqueRV; 1885 Type *Ty = getAssociatedFunction()->getReturnType(); 1886 1887 auto Pred = [&](Value &RV) -> bool { 1888 UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty); 1889 return UniqueRV != Optional<Value *>(nullptr); 1890 }; 1891 1892 if (!A.checkForAllReturnedValues(Pred, *this)) 1893 UniqueRV = nullptr; 1894 1895 return UniqueRV; 1896 } 1897 1898 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( 1899 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1900 const { 1901 if (!isValidState()) 1902 return false; 1903 1904 // Check all returned values but ignore call sites as long as we have not 1905 // encountered an overdefined one during an update. 1906 for (auto &It : ReturnedValues) { 1907 Value *RV = It.first; 1908 if (!Pred(*RV, It.second)) 1909 return false; 1910 } 1911 1912 return true; 1913 } 1914 1915 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 1916 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1917 1918 auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret, 1919 bool) -> bool { 1920 assert(AA::isValidInScope(V, Ret.getFunction()) && 1921 "Assumed returned value should be valid in function scope!"); 1922 if (ReturnedValues[&V].insert(&Ret)) 1923 Changed = ChangeStatus::CHANGED; 1924 return true; 1925 }; 1926 1927 bool UsedAssumedInformation = false; 1928 auto ReturnInstCB = [&](Instruction &I) { 1929 ReturnInst &Ret = cast<ReturnInst>(I); 1930 return genericValueTraversal<ReturnInst>( 1931 A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB, 1932 &I, UsedAssumedInformation, /* UseValueSimplify */ true, 1933 /* MaxValues */ 16, 1934 /* StripCB */ nullptr, /* Intraprocedural */ true); 1935 }; 1936 1937 // Discover returned values from all live returned instructions in the 1938 // associated function. 1939 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret}, 1940 UsedAssumedInformation)) 1941 return indicatePessimisticFixpoint(); 1942 return Changed; 1943 } 1944 1945 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { 1946 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) 1947 : AAReturnedValuesImpl(IRP, A) {} 1948 1949 /// See AbstractAttribute::trackStatistics() 1950 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } 1951 }; 1952 1953 /// Returned values information for a call sites. 1954 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { 1955 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) 1956 : AAReturnedValuesImpl(IRP, A) {} 1957 1958 /// See AbstractAttribute::initialize(...). 1959 void initialize(Attributor &A) override { 1960 // TODO: Once we have call site specific value information we can provide 1961 // call site specific liveness information and then it makes 1962 // sense to specialize attributes for call sites instead of 1963 // redirecting requests to the callee. 1964 llvm_unreachable("Abstract attributes for returned values are not " 1965 "supported for call sites yet!"); 1966 } 1967 1968 /// See AbstractAttribute::updateImpl(...). 1969 ChangeStatus updateImpl(Attributor &A) override { 1970 return indicatePessimisticFixpoint(); 1971 } 1972 1973 /// See AbstractAttribute::trackStatistics() 1974 void trackStatistics() const override {} 1975 }; 1976 } // namespace 1977 1978 /// ------------------------ NoSync Function Attribute ------------------------- 1979 1980 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) { 1981 if (!I->isAtomic()) 1982 return false; 1983 1984 if (auto *FI = dyn_cast<FenceInst>(I)) 1985 // All legal orderings for fence are stronger than monotonic. 1986 return FI->getSyncScopeID() != SyncScope::SingleThread; 1987 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) { 1988 // Unordered is not a legal ordering for cmpxchg. 1989 return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic || 1990 AI->getFailureOrdering() != AtomicOrdering::Monotonic); 1991 } 1992 1993 AtomicOrdering Ordering; 1994 switch (I->getOpcode()) { 1995 case Instruction::AtomicRMW: 1996 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 1997 break; 1998 case Instruction::Store: 1999 Ordering = cast<StoreInst>(I)->getOrdering(); 2000 break; 2001 case Instruction::Load: 2002 Ordering = cast<LoadInst>(I)->getOrdering(); 2003 break; 2004 default: 2005 llvm_unreachable( 2006 "New atomic operations need to be known in the attributor."); 2007 } 2008 2009 return (Ordering != AtomicOrdering::Unordered && 2010 Ordering != AtomicOrdering::Monotonic); 2011 } 2012 2013 /// Return true if this intrinsic is nosync. This is only used for intrinsics 2014 /// which would be nosync except that they have a volatile flag. All other 2015 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td. 2016 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) { 2017 if (auto *MI = dyn_cast<MemIntrinsic>(I)) 2018 return !MI->isVolatile(); 2019 return false; 2020 } 2021 2022 namespace { 2023 struct AANoSyncImpl : AANoSync { 2024 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} 2025 2026 const std::string getAsStr() const override { 2027 return getAssumed() ? "nosync" : "may-sync"; 2028 } 2029 2030 /// See AbstractAttribute::updateImpl(...). 2031 ChangeStatus updateImpl(Attributor &A) override; 2032 }; 2033 2034 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { 2035 2036 auto CheckRWInstForNoSync = [&](Instruction &I) { 2037 return AA::isNoSyncInst(A, I, *this); 2038 }; 2039 2040 auto CheckForNoSync = [&](Instruction &I) { 2041 // At this point we handled all read/write effects and they are all 2042 // nosync, so they can be skipped. 2043 if (I.mayReadOrWriteMemory()) 2044 return true; 2045 2046 // non-convergent and readnone imply nosync. 2047 return !cast<CallBase>(I).isConvergent(); 2048 }; 2049 2050 bool UsedAssumedInformation = false; 2051 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this, 2052 UsedAssumedInformation) || 2053 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this, 2054 UsedAssumedInformation)) 2055 return indicatePessimisticFixpoint(); 2056 2057 return ChangeStatus::UNCHANGED; 2058 } 2059 2060 struct AANoSyncFunction final : public AANoSyncImpl { 2061 AANoSyncFunction(const IRPosition &IRP, Attributor &A) 2062 : AANoSyncImpl(IRP, A) {} 2063 2064 /// See AbstractAttribute::trackStatistics() 2065 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } 2066 }; 2067 2068 /// NoSync attribute deduction for a call sites. 2069 struct AANoSyncCallSite final : AANoSyncImpl { 2070 AANoSyncCallSite(const IRPosition &IRP, Attributor &A) 2071 : AANoSyncImpl(IRP, A) {} 2072 2073 /// See AbstractAttribute::initialize(...). 2074 void initialize(Attributor &A) override { 2075 AANoSyncImpl::initialize(A); 2076 Function *F = getAssociatedFunction(); 2077 if (!F || F->isDeclaration()) 2078 indicatePessimisticFixpoint(); 2079 } 2080 2081 /// See AbstractAttribute::updateImpl(...). 2082 ChangeStatus updateImpl(Attributor &A) override { 2083 // TODO: Once we have call site specific value information we can provide 2084 // call site specific liveness information and then it makes 2085 // sense to specialize attributes for call sites arguments instead of 2086 // redirecting requests to the callee argument. 2087 Function *F = getAssociatedFunction(); 2088 const IRPosition &FnPos = IRPosition::function(*F); 2089 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED); 2090 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2091 } 2092 2093 /// See AbstractAttribute::trackStatistics() 2094 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } 2095 }; 2096 } // namespace 2097 2098 /// ------------------------ No-Free Attributes ---------------------------- 2099 2100 namespace { 2101 struct AANoFreeImpl : public AANoFree { 2102 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} 2103 2104 /// See AbstractAttribute::updateImpl(...). 2105 ChangeStatus updateImpl(Attributor &A) override { 2106 auto CheckForNoFree = [&](Instruction &I) { 2107 const auto &CB = cast<CallBase>(I); 2108 if (CB.hasFnAttr(Attribute::NoFree)) 2109 return true; 2110 2111 const auto &NoFreeAA = A.getAAFor<AANoFree>( 2112 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 2113 return NoFreeAA.isAssumedNoFree(); 2114 }; 2115 2116 bool UsedAssumedInformation = false; 2117 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this, 2118 UsedAssumedInformation)) 2119 return indicatePessimisticFixpoint(); 2120 return ChangeStatus::UNCHANGED; 2121 } 2122 2123 /// See AbstractAttribute::getAsStr(). 2124 const std::string getAsStr() const override { 2125 return getAssumed() ? "nofree" : "may-free"; 2126 } 2127 }; 2128 2129 struct AANoFreeFunction final : public AANoFreeImpl { 2130 AANoFreeFunction(const IRPosition &IRP, Attributor &A) 2131 : AANoFreeImpl(IRP, A) {} 2132 2133 /// See AbstractAttribute::trackStatistics() 2134 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } 2135 }; 2136 2137 /// NoFree attribute deduction for a call sites. 2138 struct AANoFreeCallSite final : AANoFreeImpl { 2139 AANoFreeCallSite(const IRPosition &IRP, Attributor &A) 2140 : AANoFreeImpl(IRP, A) {} 2141 2142 /// See AbstractAttribute::initialize(...). 2143 void initialize(Attributor &A) override { 2144 AANoFreeImpl::initialize(A); 2145 Function *F = getAssociatedFunction(); 2146 if (!F || F->isDeclaration()) 2147 indicatePessimisticFixpoint(); 2148 } 2149 2150 /// See AbstractAttribute::updateImpl(...). 2151 ChangeStatus updateImpl(Attributor &A) override { 2152 // TODO: Once we have call site specific value information we can provide 2153 // call site specific liveness information and then it makes 2154 // sense to specialize attributes for call sites arguments instead of 2155 // redirecting requests to the callee argument. 2156 Function *F = getAssociatedFunction(); 2157 const IRPosition &FnPos = IRPosition::function(*F); 2158 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED); 2159 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2160 } 2161 2162 /// See AbstractAttribute::trackStatistics() 2163 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } 2164 }; 2165 2166 /// NoFree attribute for floating values. 2167 struct AANoFreeFloating : AANoFreeImpl { 2168 AANoFreeFloating(const IRPosition &IRP, Attributor &A) 2169 : AANoFreeImpl(IRP, A) {} 2170 2171 /// See AbstractAttribute::trackStatistics() 2172 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} 2173 2174 /// See Abstract Attribute::updateImpl(...). 2175 ChangeStatus updateImpl(Attributor &A) override { 2176 const IRPosition &IRP = getIRPosition(); 2177 2178 const auto &NoFreeAA = A.getAAFor<AANoFree>( 2179 *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL); 2180 if (NoFreeAA.isAssumedNoFree()) 2181 return ChangeStatus::UNCHANGED; 2182 2183 Value &AssociatedValue = getIRPosition().getAssociatedValue(); 2184 auto Pred = [&](const Use &U, bool &Follow) -> bool { 2185 Instruction *UserI = cast<Instruction>(U.getUser()); 2186 if (auto *CB = dyn_cast<CallBase>(UserI)) { 2187 if (CB->isBundleOperand(&U)) 2188 return false; 2189 if (!CB->isArgOperand(&U)) 2190 return true; 2191 unsigned ArgNo = CB->getArgOperandNo(&U); 2192 2193 const auto &NoFreeArg = A.getAAFor<AANoFree>( 2194 *this, IRPosition::callsite_argument(*CB, ArgNo), 2195 DepClassTy::REQUIRED); 2196 return NoFreeArg.isAssumedNoFree(); 2197 } 2198 2199 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 2200 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 2201 Follow = true; 2202 return true; 2203 } 2204 if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) || 2205 isa<ReturnInst>(UserI)) 2206 return true; 2207 2208 // Unknown user. 2209 return false; 2210 }; 2211 if (!A.checkForAllUses(Pred, *this, AssociatedValue)) 2212 return indicatePessimisticFixpoint(); 2213 2214 return ChangeStatus::UNCHANGED; 2215 } 2216 }; 2217 2218 /// NoFree attribute for a call site argument. 2219 struct AANoFreeArgument final : AANoFreeFloating { 2220 AANoFreeArgument(const IRPosition &IRP, Attributor &A) 2221 : AANoFreeFloating(IRP, A) {} 2222 2223 /// See AbstractAttribute::trackStatistics() 2224 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } 2225 }; 2226 2227 /// NoFree attribute for call site arguments. 2228 struct AANoFreeCallSiteArgument final : AANoFreeFloating { 2229 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) 2230 : AANoFreeFloating(IRP, A) {} 2231 2232 /// See AbstractAttribute::updateImpl(...). 2233 ChangeStatus updateImpl(Attributor &A) override { 2234 // TODO: Once we have call site specific value information we can provide 2235 // call site specific liveness information and then it makes 2236 // sense to specialize attributes for call sites arguments instead of 2237 // redirecting requests to the callee argument. 2238 Argument *Arg = getAssociatedArgument(); 2239 if (!Arg) 2240 return indicatePessimisticFixpoint(); 2241 const IRPosition &ArgPos = IRPosition::argument(*Arg); 2242 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED); 2243 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 2244 } 2245 2246 /// See AbstractAttribute::trackStatistics() 2247 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)}; 2248 }; 2249 2250 /// NoFree attribute for function return value. 2251 struct AANoFreeReturned final : AANoFreeFloating { 2252 AANoFreeReturned(const IRPosition &IRP, Attributor &A) 2253 : AANoFreeFloating(IRP, A) { 2254 llvm_unreachable("NoFree is not applicable to function returns!"); 2255 } 2256 2257 /// See AbstractAttribute::initialize(...). 2258 void initialize(Attributor &A) override { 2259 llvm_unreachable("NoFree is not applicable to function returns!"); 2260 } 2261 2262 /// See AbstractAttribute::updateImpl(...). 2263 ChangeStatus updateImpl(Attributor &A) override { 2264 llvm_unreachable("NoFree is not applicable to function returns!"); 2265 } 2266 2267 /// See AbstractAttribute::trackStatistics() 2268 void trackStatistics() const override {} 2269 }; 2270 2271 /// NoFree attribute deduction for a call site return value. 2272 struct AANoFreeCallSiteReturned final : AANoFreeFloating { 2273 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) 2274 : AANoFreeFloating(IRP, A) {} 2275 2276 ChangeStatus manifest(Attributor &A) override { 2277 return ChangeStatus::UNCHANGED; 2278 } 2279 /// See AbstractAttribute::trackStatistics() 2280 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } 2281 }; 2282 } // namespace 2283 2284 /// ------------------------ NonNull Argument Attribute ------------------------ 2285 namespace { 2286 static int64_t getKnownNonNullAndDerefBytesForUse( 2287 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, 2288 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { 2289 TrackUse = false; 2290 2291 const Value *UseV = U->get(); 2292 if (!UseV->getType()->isPointerTy()) 2293 return 0; 2294 2295 // We need to follow common pointer manipulation uses to the accesses they 2296 // feed into. We can try to be smart to avoid looking through things we do not 2297 // like for now, e.g., non-inbounds GEPs. 2298 if (isa<CastInst>(I)) { 2299 TrackUse = true; 2300 return 0; 2301 } 2302 2303 if (isa<GetElementPtrInst>(I)) { 2304 TrackUse = true; 2305 return 0; 2306 } 2307 2308 Type *PtrTy = UseV->getType(); 2309 const Function *F = I->getFunction(); 2310 bool NullPointerIsDefined = 2311 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; 2312 const DataLayout &DL = A.getInfoCache().getDL(); 2313 if (const auto *CB = dyn_cast<CallBase>(I)) { 2314 if (CB->isBundleOperand(U)) { 2315 if (RetainedKnowledge RK = getKnowledgeFromUse( 2316 U, {Attribute::NonNull, Attribute::Dereferenceable})) { 2317 IsNonNull |= 2318 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); 2319 return RK.ArgValue; 2320 } 2321 return 0; 2322 } 2323 2324 if (CB->isCallee(U)) { 2325 IsNonNull |= !NullPointerIsDefined; 2326 return 0; 2327 } 2328 2329 unsigned ArgNo = CB->getArgOperandNo(U); 2330 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 2331 // As long as we only use known information there is no need to track 2332 // dependences here. 2333 auto &DerefAA = 2334 A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE); 2335 IsNonNull |= DerefAA.isKnownNonNull(); 2336 return DerefAA.getKnownDereferenceableBytes(); 2337 } 2338 2339 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); 2340 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) 2341 return 0; 2342 2343 int64_t Offset; 2344 const Value *Base = 2345 getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL); 2346 if (Base && Base == &AssociatedValue) { 2347 int64_t DerefBytes = Loc->Size.getValue() + Offset; 2348 IsNonNull |= !NullPointerIsDefined; 2349 return std::max(int64_t(0), DerefBytes); 2350 } 2351 2352 /// Corner case when an offset is 0. 2353 Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL, 2354 /*AllowNonInbounds*/ true); 2355 if (Base && Base == &AssociatedValue && Offset == 0) { 2356 int64_t DerefBytes = Loc->Size.getValue(); 2357 IsNonNull |= !NullPointerIsDefined; 2358 return std::max(int64_t(0), DerefBytes); 2359 } 2360 2361 return 0; 2362 } 2363 2364 struct AANonNullImpl : AANonNull { 2365 AANonNullImpl(const IRPosition &IRP, Attributor &A) 2366 : AANonNull(IRP, A), 2367 NullIsDefined(NullPointerIsDefined( 2368 getAnchorScope(), 2369 getAssociatedValue().getType()->getPointerAddressSpace())) {} 2370 2371 /// See AbstractAttribute::initialize(...). 2372 void initialize(Attributor &A) override { 2373 Value &V = getAssociatedValue(); 2374 if (!NullIsDefined && 2375 hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, 2376 /* IgnoreSubsumingPositions */ false, &A)) { 2377 indicateOptimisticFixpoint(); 2378 return; 2379 } 2380 2381 if (isa<ConstantPointerNull>(V)) { 2382 indicatePessimisticFixpoint(); 2383 return; 2384 } 2385 2386 AANonNull::initialize(A); 2387 2388 bool CanBeNull, CanBeFreed; 2389 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull, 2390 CanBeFreed)) { 2391 if (!CanBeNull) { 2392 indicateOptimisticFixpoint(); 2393 return; 2394 } 2395 } 2396 2397 if (isa<GlobalValue>(&getAssociatedValue())) { 2398 indicatePessimisticFixpoint(); 2399 return; 2400 } 2401 2402 if (Instruction *CtxI = getCtxI()) 2403 followUsesInMBEC(*this, A, getState(), *CtxI); 2404 } 2405 2406 /// See followUsesInMBEC 2407 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 2408 AANonNull::StateType &State) { 2409 bool IsNonNull = false; 2410 bool TrackUse = false; 2411 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, 2412 IsNonNull, TrackUse); 2413 State.setKnown(IsNonNull); 2414 return TrackUse; 2415 } 2416 2417 /// See AbstractAttribute::getAsStr(). 2418 const std::string getAsStr() const override { 2419 return getAssumed() ? "nonnull" : "may-null"; 2420 } 2421 2422 /// Flag to determine if the underlying value can be null and still allow 2423 /// valid accesses. 2424 const bool NullIsDefined; 2425 }; 2426 2427 /// NonNull attribute for a floating value. 2428 struct AANonNullFloating : public AANonNullImpl { 2429 AANonNullFloating(const IRPosition &IRP, Attributor &A) 2430 : AANonNullImpl(IRP, A) {} 2431 2432 /// See AbstractAttribute::updateImpl(...). 2433 ChangeStatus updateImpl(Attributor &A) override { 2434 const DataLayout &DL = A.getDataLayout(); 2435 2436 DominatorTree *DT = nullptr; 2437 AssumptionCache *AC = nullptr; 2438 InformationCache &InfoCache = A.getInfoCache(); 2439 if (const Function *Fn = getAnchorScope()) { 2440 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); 2441 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); 2442 } 2443 2444 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 2445 AANonNull::StateType &T, bool Stripped) -> bool { 2446 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V), 2447 DepClassTy::REQUIRED); 2448 if (!Stripped && this == &AA) { 2449 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) 2450 T.indicatePessimisticFixpoint(); 2451 } else { 2452 // Use abstract attribute information. 2453 const AANonNull::StateType &NS = AA.getState(); 2454 T ^= NS; 2455 } 2456 return T.isValidState(); 2457 }; 2458 2459 StateType T; 2460 bool UsedAssumedInformation = false; 2461 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 2462 VisitValueCB, getCtxI(), 2463 UsedAssumedInformation)) 2464 return indicatePessimisticFixpoint(); 2465 2466 return clampStateAndIndicateChange(getState(), T); 2467 } 2468 2469 /// See AbstractAttribute::trackStatistics() 2470 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2471 }; 2472 2473 /// NonNull attribute for function return value. 2474 struct AANonNullReturned final 2475 : AAReturnedFromReturnedValues<AANonNull, AANonNull> { 2476 AANonNullReturned(const IRPosition &IRP, Attributor &A) 2477 : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {} 2478 2479 /// See AbstractAttribute::getAsStr(). 2480 const std::string getAsStr() const override { 2481 return getAssumed() ? "nonnull" : "may-null"; 2482 } 2483 2484 /// See AbstractAttribute::trackStatistics() 2485 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2486 }; 2487 2488 /// NonNull attribute for function argument. 2489 struct AANonNullArgument final 2490 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { 2491 AANonNullArgument(const IRPosition &IRP, Attributor &A) 2492 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} 2493 2494 /// See AbstractAttribute::trackStatistics() 2495 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } 2496 }; 2497 2498 struct AANonNullCallSiteArgument final : AANonNullFloating { 2499 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) 2500 : AANonNullFloating(IRP, A) {} 2501 2502 /// See AbstractAttribute::trackStatistics() 2503 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } 2504 }; 2505 2506 /// NonNull attribute for a call site return position. 2507 struct AANonNullCallSiteReturned final 2508 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { 2509 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) 2510 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} 2511 2512 /// See AbstractAttribute::trackStatistics() 2513 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } 2514 }; 2515 } // namespace 2516 2517 /// ------------------------ No-Recurse Attributes ---------------------------- 2518 2519 namespace { 2520 struct AANoRecurseImpl : public AANoRecurse { 2521 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} 2522 2523 /// See AbstractAttribute::getAsStr() 2524 const std::string getAsStr() const override { 2525 return getAssumed() ? "norecurse" : "may-recurse"; 2526 } 2527 }; 2528 2529 struct AANoRecurseFunction final : AANoRecurseImpl { 2530 AANoRecurseFunction(const IRPosition &IRP, Attributor &A) 2531 : AANoRecurseImpl(IRP, A) {} 2532 2533 /// See AbstractAttribute::updateImpl(...). 2534 ChangeStatus updateImpl(Attributor &A) override { 2535 2536 // If all live call sites are known to be no-recurse, we are as well. 2537 auto CallSitePred = [&](AbstractCallSite ACS) { 2538 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 2539 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 2540 DepClassTy::NONE); 2541 return NoRecurseAA.isKnownNoRecurse(); 2542 }; 2543 bool UsedAssumedInformation = false; 2544 if (A.checkForAllCallSites(CallSitePred, *this, true, 2545 UsedAssumedInformation)) { 2546 // If we know all call sites and all are known no-recurse, we are done. 2547 // If all known call sites, which might not be all that exist, are known 2548 // to be no-recurse, we are not done but we can continue to assume 2549 // no-recurse. If one of the call sites we have not visited will become 2550 // live, another update is triggered. 2551 if (!UsedAssumedInformation) 2552 indicateOptimisticFixpoint(); 2553 return ChangeStatus::UNCHANGED; 2554 } 2555 2556 const AAFunctionReachability &EdgeReachability = 2557 A.getAAFor<AAFunctionReachability>(*this, getIRPosition(), 2558 DepClassTy::REQUIRED); 2559 if (EdgeReachability.canReach(A, *getAnchorScope())) 2560 return indicatePessimisticFixpoint(); 2561 return ChangeStatus::UNCHANGED; 2562 } 2563 2564 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } 2565 }; 2566 2567 /// NoRecurse attribute deduction for a call sites. 2568 struct AANoRecurseCallSite final : AANoRecurseImpl { 2569 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) 2570 : AANoRecurseImpl(IRP, A) {} 2571 2572 /// See AbstractAttribute::initialize(...). 2573 void initialize(Attributor &A) override { 2574 AANoRecurseImpl::initialize(A); 2575 Function *F = getAssociatedFunction(); 2576 if (!F || F->isDeclaration()) 2577 indicatePessimisticFixpoint(); 2578 } 2579 2580 /// See AbstractAttribute::updateImpl(...). 2581 ChangeStatus updateImpl(Attributor &A) override { 2582 // TODO: Once we have call site specific value information we can provide 2583 // call site specific liveness information and then it makes 2584 // sense to specialize attributes for call sites arguments instead of 2585 // redirecting requests to the callee argument. 2586 Function *F = getAssociatedFunction(); 2587 const IRPosition &FnPos = IRPosition::function(*F); 2588 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED); 2589 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2590 } 2591 2592 /// See AbstractAttribute::trackStatistics() 2593 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } 2594 }; 2595 } // namespace 2596 2597 /// -------------------- Undefined-Behavior Attributes ------------------------ 2598 2599 namespace { 2600 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { 2601 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) 2602 : AAUndefinedBehavior(IRP, A) {} 2603 2604 /// See AbstractAttribute::updateImpl(...). 2605 // through a pointer (i.e. also branches etc.) 2606 ChangeStatus updateImpl(Attributor &A) override { 2607 const size_t UBPrevSize = KnownUBInsts.size(); 2608 const size_t NoUBPrevSize = AssumedNoUBInsts.size(); 2609 2610 auto InspectMemAccessInstForUB = [&](Instruction &I) { 2611 // Lang ref now states volatile store is not UB, let's skip them. 2612 if (I.isVolatile() && I.mayWriteToMemory()) 2613 return true; 2614 2615 // Skip instructions that are already saved. 2616 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2617 return true; 2618 2619 // If we reach here, we know we have an instruction 2620 // that accesses memory through a pointer operand, 2621 // for which getPointerOperand() should give it to us. 2622 Value *PtrOp = 2623 const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true)); 2624 assert(PtrOp && 2625 "Expected pointer operand of memory accessing instruction"); 2626 2627 // Either we stopped and the appropriate action was taken, 2628 // or we got back a simplified value to continue. 2629 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); 2630 if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue()) 2631 return true; 2632 const Value *PtrOpVal = SimplifiedPtrOp.getValue(); 2633 2634 // A memory access through a pointer is considered UB 2635 // only if the pointer has constant null value. 2636 // TODO: Expand it to not only check constant values. 2637 if (!isa<ConstantPointerNull>(PtrOpVal)) { 2638 AssumedNoUBInsts.insert(&I); 2639 return true; 2640 } 2641 const Type *PtrTy = PtrOpVal->getType(); 2642 2643 // Because we only consider instructions inside functions, 2644 // assume that a parent function exists. 2645 const Function *F = I.getFunction(); 2646 2647 // A memory access using constant null pointer is only considered UB 2648 // if null pointer is _not_ defined for the target platform. 2649 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) 2650 AssumedNoUBInsts.insert(&I); 2651 else 2652 KnownUBInsts.insert(&I); 2653 return true; 2654 }; 2655 2656 auto InspectBrInstForUB = [&](Instruction &I) { 2657 // A conditional branch instruction is considered UB if it has `undef` 2658 // condition. 2659 2660 // Skip instructions that are already saved. 2661 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2662 return true; 2663 2664 // We know we have a branch instruction. 2665 auto *BrInst = cast<BranchInst>(&I); 2666 2667 // Unconditional branches are never considered UB. 2668 if (BrInst->isUnconditional()) 2669 return true; 2670 2671 // Either we stopped and the appropriate action was taken, 2672 // or we got back a simplified value to continue. 2673 Optional<Value *> SimplifiedCond = 2674 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); 2675 if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue()) 2676 return true; 2677 AssumedNoUBInsts.insert(&I); 2678 return true; 2679 }; 2680 2681 auto InspectCallSiteForUB = [&](Instruction &I) { 2682 // Check whether a callsite always cause UB or not 2683 2684 // Skip instructions that are already saved. 2685 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2686 return true; 2687 2688 // Check nonnull and noundef argument attribute violation for each 2689 // callsite. 2690 CallBase &CB = cast<CallBase>(I); 2691 Function *Callee = CB.getCalledFunction(); 2692 if (!Callee) 2693 return true; 2694 for (unsigned idx = 0; idx < CB.arg_size(); idx++) { 2695 // If current argument is known to be simplified to null pointer and the 2696 // corresponding argument position is known to have nonnull attribute, 2697 // the argument is poison. Furthermore, if the argument is poison and 2698 // the position is known to have noundef attriubte, this callsite is 2699 // considered UB. 2700 if (idx >= Callee->arg_size()) 2701 break; 2702 Value *ArgVal = CB.getArgOperand(idx); 2703 if (!ArgVal) 2704 continue; 2705 // Here, we handle three cases. 2706 // (1) Not having a value means it is dead. (we can replace the value 2707 // with undef) 2708 // (2) Simplified to undef. The argument violate noundef attriubte. 2709 // (3) Simplified to null pointer where known to be nonnull. 2710 // The argument is a poison value and violate noundef attribute. 2711 IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx); 2712 auto &NoUndefAA = 2713 A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2714 if (!NoUndefAA.isKnownNoUndef()) 2715 continue; 2716 bool UsedAssumedInformation = false; 2717 Optional<Value *> SimplifiedVal = A.getAssumedSimplified( 2718 IRPosition::value(*ArgVal), *this, UsedAssumedInformation); 2719 if (UsedAssumedInformation) 2720 continue; 2721 if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue()) 2722 return true; 2723 if (!SimplifiedVal.hasValue() || 2724 isa<UndefValue>(*SimplifiedVal.getValue())) { 2725 KnownUBInsts.insert(&I); 2726 continue; 2727 } 2728 if (!ArgVal->getType()->isPointerTy() || 2729 !isa<ConstantPointerNull>(*SimplifiedVal.getValue())) 2730 continue; 2731 auto &NonNullAA = 2732 A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2733 if (NonNullAA.isKnownNonNull()) 2734 KnownUBInsts.insert(&I); 2735 } 2736 return true; 2737 }; 2738 2739 auto InspectReturnInstForUB = [&](Instruction &I) { 2740 auto &RI = cast<ReturnInst>(I); 2741 // Either we stopped and the appropriate action was taken, 2742 // or we got back a simplified return value to continue. 2743 Optional<Value *> SimplifiedRetValue = 2744 stopOnUndefOrAssumed(A, RI.getReturnValue(), &I); 2745 if (!SimplifiedRetValue.hasValue() || !SimplifiedRetValue.getValue()) 2746 return true; 2747 2748 // Check if a return instruction always cause UB or not 2749 // Note: It is guaranteed that the returned position of the anchor 2750 // scope has noundef attribute when this is called. 2751 // We also ensure the return position is not "assumed dead" 2752 // because the returned value was then potentially simplified to 2753 // `undef` in AAReturnedValues without removing the `noundef` 2754 // attribute yet. 2755 2756 // When the returned position has noundef attriubte, UB occurs in the 2757 // following cases. 2758 // (1) Returned value is known to be undef. 2759 // (2) The value is known to be a null pointer and the returned 2760 // position has nonnull attribute (because the returned value is 2761 // poison). 2762 if (isa<ConstantPointerNull>(*SimplifiedRetValue)) { 2763 auto &NonNullAA = A.getAAFor<AANonNull>( 2764 *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE); 2765 if (NonNullAA.isKnownNonNull()) 2766 KnownUBInsts.insert(&I); 2767 } 2768 2769 return true; 2770 }; 2771 2772 bool UsedAssumedInformation = false; 2773 A.checkForAllInstructions(InspectMemAccessInstForUB, *this, 2774 {Instruction::Load, Instruction::Store, 2775 Instruction::AtomicCmpXchg, 2776 Instruction::AtomicRMW}, 2777 UsedAssumedInformation, 2778 /* CheckBBLivenessOnly */ true); 2779 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, 2780 UsedAssumedInformation, 2781 /* CheckBBLivenessOnly */ true); 2782 A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this, 2783 UsedAssumedInformation); 2784 2785 // If the returned position of the anchor scope has noundef attriubte, check 2786 // all returned instructions. 2787 if (!getAnchorScope()->getReturnType()->isVoidTy()) { 2788 const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope()); 2789 if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) { 2790 auto &RetPosNoUndefAA = 2791 A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE); 2792 if (RetPosNoUndefAA.isKnownNoUndef()) 2793 A.checkForAllInstructions(InspectReturnInstForUB, *this, 2794 {Instruction::Ret}, UsedAssumedInformation, 2795 /* CheckBBLivenessOnly */ true); 2796 } 2797 } 2798 2799 if (NoUBPrevSize != AssumedNoUBInsts.size() || 2800 UBPrevSize != KnownUBInsts.size()) 2801 return ChangeStatus::CHANGED; 2802 return ChangeStatus::UNCHANGED; 2803 } 2804 2805 bool isKnownToCauseUB(Instruction *I) const override { 2806 return KnownUBInsts.count(I); 2807 } 2808 2809 bool isAssumedToCauseUB(Instruction *I) const override { 2810 // In simple words, if an instruction is not in the assumed to _not_ 2811 // cause UB, then it is assumed UB (that includes those 2812 // in the KnownUBInsts set). The rest is boilerplate 2813 // is to ensure that it is one of the instructions we test 2814 // for UB. 2815 2816 switch (I->getOpcode()) { 2817 case Instruction::Load: 2818 case Instruction::Store: 2819 case Instruction::AtomicCmpXchg: 2820 case Instruction::AtomicRMW: 2821 return !AssumedNoUBInsts.count(I); 2822 case Instruction::Br: { 2823 auto *BrInst = cast<BranchInst>(I); 2824 if (BrInst->isUnconditional()) 2825 return false; 2826 return !AssumedNoUBInsts.count(I); 2827 } break; 2828 default: 2829 return false; 2830 } 2831 return false; 2832 } 2833 2834 ChangeStatus manifest(Attributor &A) override { 2835 if (KnownUBInsts.empty()) 2836 return ChangeStatus::UNCHANGED; 2837 for (Instruction *I : KnownUBInsts) 2838 A.changeToUnreachableAfterManifest(I); 2839 return ChangeStatus::CHANGED; 2840 } 2841 2842 /// See AbstractAttribute::getAsStr() 2843 const std::string getAsStr() const override { 2844 return getAssumed() ? "undefined-behavior" : "no-ub"; 2845 } 2846 2847 /// Note: The correctness of this analysis depends on the fact that the 2848 /// following 2 sets will stop changing after some point. 2849 /// "Change" here means that their size changes. 2850 /// The size of each set is monotonically increasing 2851 /// (we only add items to them) and it is upper bounded by the number of 2852 /// instructions in the processed function (we can never save more 2853 /// elements in either set than this number). Hence, at some point, 2854 /// they will stop increasing. 2855 /// Consequently, at some point, both sets will have stopped 2856 /// changing, effectively making the analysis reach a fixpoint. 2857 2858 /// Note: These 2 sets are disjoint and an instruction can be considered 2859 /// one of 3 things: 2860 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in 2861 /// the KnownUBInsts set. 2862 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior 2863 /// has a reason to assume it). 2864 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior 2865 /// could not find a reason to assume or prove that it can cause UB, 2866 /// hence it assumes it doesn't. We have a set for these instructions 2867 /// so that we don't reprocess them in every update. 2868 /// Note however that instructions in this set may cause UB. 2869 2870 protected: 2871 /// A set of all live instructions _known_ to cause UB. 2872 SmallPtrSet<Instruction *, 8> KnownUBInsts; 2873 2874 private: 2875 /// A set of all the (live) instructions that are assumed to _not_ cause UB. 2876 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; 2877 2878 // Should be called on updates in which if we're processing an instruction 2879 // \p I that depends on a value \p V, one of the following has to happen: 2880 // - If the value is assumed, then stop. 2881 // - If the value is known but undef, then consider it UB. 2882 // - Otherwise, do specific processing with the simplified value. 2883 // We return None in the first 2 cases to signify that an appropriate 2884 // action was taken and the caller should stop. 2885 // Otherwise, we return the simplified value that the caller should 2886 // use for specific processing. 2887 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V, 2888 Instruction *I) { 2889 bool UsedAssumedInformation = false; 2890 Optional<Value *> SimplifiedV = A.getAssumedSimplified( 2891 IRPosition::value(*V), *this, UsedAssumedInformation); 2892 if (!UsedAssumedInformation) { 2893 // Don't depend on assumed values. 2894 if (!SimplifiedV.hasValue()) { 2895 // If it is known (which we tested above) but it doesn't have a value, 2896 // then we can assume `undef` and hence the instruction is UB. 2897 KnownUBInsts.insert(I); 2898 return llvm::None; 2899 } 2900 if (!SimplifiedV.getValue()) 2901 return nullptr; 2902 V = *SimplifiedV; 2903 } 2904 if (isa<UndefValue>(V)) { 2905 KnownUBInsts.insert(I); 2906 return llvm::None; 2907 } 2908 return V; 2909 } 2910 }; 2911 2912 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { 2913 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) 2914 : AAUndefinedBehaviorImpl(IRP, A) {} 2915 2916 /// See AbstractAttribute::trackStatistics() 2917 void trackStatistics() const override { 2918 STATS_DECL(UndefinedBehaviorInstruction, Instruction, 2919 "Number of instructions known to have UB"); 2920 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += 2921 KnownUBInsts.size(); 2922 } 2923 }; 2924 } // namespace 2925 2926 /// ------------------------ Will-Return Attributes ---------------------------- 2927 2928 namespace { 2929 // Helper function that checks whether a function has any cycle which we don't 2930 // know if it is bounded or not. 2931 // Loops with maximum trip count are considered bounded, any other cycle not. 2932 static bool mayContainUnboundedCycle(Function &F, Attributor &A) { 2933 ScalarEvolution *SE = 2934 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); 2935 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); 2936 // If either SCEV or LoopInfo is not available for the function then we assume 2937 // any cycle to be unbounded cycle. 2938 // We use scc_iterator which uses Tarjan algorithm to find all the maximal 2939 // SCCs.To detect if there's a cycle, we only need to find the maximal ones. 2940 if (!SE || !LI) { 2941 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) 2942 if (SCCI.hasCycle()) 2943 return true; 2944 return false; 2945 } 2946 2947 // If there's irreducible control, the function may contain non-loop cycles. 2948 if (mayContainIrreducibleControl(F, LI)) 2949 return true; 2950 2951 // Any loop that does not have a max trip count is considered unbounded cycle. 2952 for (auto *L : LI->getLoopsInPreorder()) { 2953 if (!SE->getSmallConstantMaxTripCount(L)) 2954 return true; 2955 } 2956 return false; 2957 } 2958 2959 struct AAWillReturnImpl : public AAWillReturn { 2960 AAWillReturnImpl(const IRPosition &IRP, Attributor &A) 2961 : AAWillReturn(IRP, A) {} 2962 2963 /// See AbstractAttribute::initialize(...). 2964 void initialize(Attributor &A) override { 2965 AAWillReturn::initialize(A); 2966 2967 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) { 2968 indicateOptimisticFixpoint(); 2969 return; 2970 } 2971 } 2972 2973 /// Check for `mustprogress` and `readonly` as they imply `willreturn`. 2974 bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) { 2975 // Check for `mustprogress` in the scope and the associated function which 2976 // might be different if this is a call site. 2977 if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) && 2978 (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress())) 2979 return false; 2980 2981 bool IsKnown; 2982 if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 2983 return IsKnown || !KnownOnly; 2984 return false; 2985 } 2986 2987 /// See AbstractAttribute::updateImpl(...). 2988 ChangeStatus updateImpl(Attributor &A) override { 2989 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 2990 return ChangeStatus::UNCHANGED; 2991 2992 auto CheckForWillReturn = [&](Instruction &I) { 2993 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); 2994 const auto &WillReturnAA = 2995 A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED); 2996 if (WillReturnAA.isKnownWillReturn()) 2997 return true; 2998 if (!WillReturnAA.isAssumedWillReturn()) 2999 return false; 3000 const auto &NoRecurseAA = 3001 A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED); 3002 return NoRecurseAA.isAssumedNoRecurse(); 3003 }; 3004 3005 bool UsedAssumedInformation = false; 3006 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this, 3007 UsedAssumedInformation)) 3008 return indicatePessimisticFixpoint(); 3009 3010 return ChangeStatus::UNCHANGED; 3011 } 3012 3013 /// See AbstractAttribute::getAsStr() 3014 const std::string getAsStr() const override { 3015 return getAssumed() ? "willreturn" : "may-noreturn"; 3016 } 3017 }; 3018 3019 struct AAWillReturnFunction final : AAWillReturnImpl { 3020 AAWillReturnFunction(const IRPosition &IRP, Attributor &A) 3021 : AAWillReturnImpl(IRP, A) {} 3022 3023 /// See AbstractAttribute::initialize(...). 3024 void initialize(Attributor &A) override { 3025 AAWillReturnImpl::initialize(A); 3026 3027 Function *F = getAnchorScope(); 3028 if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A)) 3029 indicatePessimisticFixpoint(); 3030 } 3031 3032 /// See AbstractAttribute::trackStatistics() 3033 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } 3034 }; 3035 3036 /// WillReturn attribute deduction for a call sites. 3037 struct AAWillReturnCallSite final : AAWillReturnImpl { 3038 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) 3039 : AAWillReturnImpl(IRP, A) {} 3040 3041 /// See AbstractAttribute::initialize(...). 3042 void initialize(Attributor &A) override { 3043 AAWillReturnImpl::initialize(A); 3044 Function *F = getAssociatedFunction(); 3045 if (!F || !A.isFunctionIPOAmendable(*F)) 3046 indicatePessimisticFixpoint(); 3047 } 3048 3049 /// See AbstractAttribute::updateImpl(...). 3050 ChangeStatus updateImpl(Attributor &A) override { 3051 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 3052 return ChangeStatus::UNCHANGED; 3053 3054 // TODO: Once we have call site specific value information we can provide 3055 // call site specific liveness information and then it makes 3056 // sense to specialize attributes for call sites arguments instead of 3057 // redirecting requests to the callee argument. 3058 Function *F = getAssociatedFunction(); 3059 const IRPosition &FnPos = IRPosition::function(*F); 3060 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED); 3061 return clampStateAndIndicateChange(getState(), FnAA.getState()); 3062 } 3063 3064 /// See AbstractAttribute::trackStatistics() 3065 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } 3066 }; 3067 } // namespace 3068 3069 /// -------------------AAReachability Attribute-------------------------- 3070 3071 namespace { 3072 struct AAReachabilityImpl : AAReachability { 3073 AAReachabilityImpl(const IRPosition &IRP, Attributor &A) 3074 : AAReachability(IRP, A) {} 3075 3076 const std::string getAsStr() const override { 3077 // TODO: Return the number of reachable queries. 3078 return "reachable"; 3079 } 3080 3081 /// See AbstractAttribute::updateImpl(...). 3082 ChangeStatus updateImpl(Attributor &A) override { 3083 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 3084 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 3085 if (!NoRecurseAA.isAssumedNoRecurse()) 3086 return indicatePessimisticFixpoint(); 3087 return ChangeStatus::UNCHANGED; 3088 } 3089 }; 3090 3091 struct AAReachabilityFunction final : public AAReachabilityImpl { 3092 AAReachabilityFunction(const IRPosition &IRP, Attributor &A) 3093 : AAReachabilityImpl(IRP, A) {} 3094 3095 /// See AbstractAttribute::trackStatistics() 3096 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); } 3097 }; 3098 } // namespace 3099 3100 /// ------------------------ NoAlias Argument Attribute ------------------------ 3101 3102 namespace { 3103 struct AANoAliasImpl : AANoAlias { 3104 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { 3105 assert(getAssociatedType()->isPointerTy() && 3106 "Noalias is a pointer attribute"); 3107 } 3108 3109 const std::string getAsStr() const override { 3110 return getAssumed() ? "noalias" : "may-alias"; 3111 } 3112 }; 3113 3114 /// NoAlias attribute for a floating value. 3115 struct AANoAliasFloating final : AANoAliasImpl { 3116 AANoAliasFloating(const IRPosition &IRP, Attributor &A) 3117 : AANoAliasImpl(IRP, A) {} 3118 3119 /// See AbstractAttribute::initialize(...). 3120 void initialize(Attributor &A) override { 3121 AANoAliasImpl::initialize(A); 3122 Value *Val = &getAssociatedValue(); 3123 do { 3124 CastInst *CI = dyn_cast<CastInst>(Val); 3125 if (!CI) 3126 break; 3127 Value *Base = CI->getOperand(0); 3128 if (!Base->hasOneUse()) 3129 break; 3130 Val = Base; 3131 } while (true); 3132 3133 if (!Val->getType()->isPointerTy()) { 3134 indicatePessimisticFixpoint(); 3135 return; 3136 } 3137 3138 if (isa<AllocaInst>(Val)) 3139 indicateOptimisticFixpoint(); 3140 else if (isa<ConstantPointerNull>(Val) && 3141 !NullPointerIsDefined(getAnchorScope(), 3142 Val->getType()->getPointerAddressSpace())) 3143 indicateOptimisticFixpoint(); 3144 else if (Val != &getAssociatedValue()) { 3145 const auto &ValNoAliasAA = A.getAAFor<AANoAlias>( 3146 *this, IRPosition::value(*Val), DepClassTy::OPTIONAL); 3147 if (ValNoAliasAA.isKnownNoAlias()) 3148 indicateOptimisticFixpoint(); 3149 } 3150 } 3151 3152 /// See AbstractAttribute::updateImpl(...). 3153 ChangeStatus updateImpl(Attributor &A) override { 3154 // TODO: Implement this. 3155 return indicatePessimisticFixpoint(); 3156 } 3157 3158 /// See AbstractAttribute::trackStatistics() 3159 void trackStatistics() const override { 3160 STATS_DECLTRACK_FLOATING_ATTR(noalias) 3161 } 3162 }; 3163 3164 /// NoAlias attribute for an argument. 3165 struct AANoAliasArgument final 3166 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { 3167 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; 3168 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 3169 3170 /// See AbstractAttribute::initialize(...). 3171 void initialize(Attributor &A) override { 3172 Base::initialize(A); 3173 // See callsite argument attribute and callee argument attribute. 3174 if (hasAttr({Attribute::ByVal})) 3175 indicateOptimisticFixpoint(); 3176 } 3177 3178 /// See AbstractAttribute::update(...). 3179 ChangeStatus updateImpl(Attributor &A) override { 3180 // We have to make sure no-alias on the argument does not break 3181 // synchronization when this is a callback argument, see also [1] below. 3182 // If synchronization cannot be affected, we delegate to the base updateImpl 3183 // function, otherwise we give up for now. 3184 3185 // If the function is no-sync, no-alias cannot break synchronization. 3186 const auto &NoSyncAA = 3187 A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()), 3188 DepClassTy::OPTIONAL); 3189 if (NoSyncAA.isAssumedNoSync()) 3190 return Base::updateImpl(A); 3191 3192 // If the argument is read-only, no-alias cannot break synchronization. 3193 bool IsKnown; 3194 if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 3195 return Base::updateImpl(A); 3196 3197 // If the argument is never passed through callbacks, no-alias cannot break 3198 // synchronization. 3199 bool UsedAssumedInformation = false; 3200 if (A.checkForAllCallSites( 3201 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, 3202 true, UsedAssumedInformation)) 3203 return Base::updateImpl(A); 3204 3205 // TODO: add no-alias but make sure it doesn't break synchronization by 3206 // introducing fake uses. See: 3207 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, 3208 // International Workshop on OpenMP 2018, 3209 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf 3210 3211 return indicatePessimisticFixpoint(); 3212 } 3213 3214 /// See AbstractAttribute::trackStatistics() 3215 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } 3216 }; 3217 3218 struct AANoAliasCallSiteArgument final : AANoAliasImpl { 3219 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) 3220 : AANoAliasImpl(IRP, A) {} 3221 3222 /// See AbstractAttribute::initialize(...). 3223 void initialize(Attributor &A) override { 3224 // See callsite argument attribute and callee argument attribute. 3225 const auto &CB = cast<CallBase>(getAnchorValue()); 3226 if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias)) 3227 indicateOptimisticFixpoint(); 3228 Value &Val = getAssociatedValue(); 3229 if (isa<ConstantPointerNull>(Val) && 3230 !NullPointerIsDefined(getAnchorScope(), 3231 Val.getType()->getPointerAddressSpace())) 3232 indicateOptimisticFixpoint(); 3233 } 3234 3235 /// Determine if the underlying value may alias with the call site argument 3236 /// \p OtherArgNo of \p ICS (= the underlying call site). 3237 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, 3238 const AAMemoryBehavior &MemBehaviorAA, 3239 const CallBase &CB, unsigned OtherArgNo) { 3240 // We do not need to worry about aliasing with the underlying IRP. 3241 if (this->getCalleeArgNo() == (int)OtherArgNo) 3242 return false; 3243 3244 // If it is not a pointer or pointer vector we do not alias. 3245 const Value *ArgOp = CB.getArgOperand(OtherArgNo); 3246 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 3247 return false; 3248 3249 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 3250 *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE); 3251 3252 // If the argument is readnone, there is no read-write aliasing. 3253 if (CBArgMemBehaviorAA.isAssumedReadNone()) { 3254 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3255 return false; 3256 } 3257 3258 // If the argument is readonly and the underlying value is readonly, there 3259 // is no read-write aliasing. 3260 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); 3261 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { 3262 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3263 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3264 return false; 3265 } 3266 3267 // We have to utilize actual alias analysis queries so we need the object. 3268 if (!AAR) 3269 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); 3270 3271 // Try to rule it out at the call site. 3272 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); 3273 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " 3274 "callsite arguments: " 3275 << getAssociatedValue() << " " << *ArgOp << " => " 3276 << (IsAliasing ? "" : "no-") << "alias \n"); 3277 3278 return IsAliasing; 3279 } 3280 3281 bool 3282 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, 3283 const AAMemoryBehavior &MemBehaviorAA, 3284 const AANoAlias &NoAliasAA) { 3285 // We can deduce "noalias" if the following conditions hold. 3286 // (i) Associated value is assumed to be noalias in the definition. 3287 // (ii) Associated value is assumed to be no-capture in all the uses 3288 // possibly executed before this callsite. 3289 // (iii) There is no other pointer argument which could alias with the 3290 // value. 3291 3292 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); 3293 if (!AssociatedValueIsNoAliasAtDef) { 3294 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() 3295 << " is not no-alias at the definition\n"); 3296 return false; 3297 } 3298 3299 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); 3300 3301 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3302 const Function *ScopeFn = VIRP.getAnchorScope(); 3303 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE); 3304 // Check whether the value is captured in the scope using AANoCapture. 3305 // Look at CFG and check only uses possibly executed before this 3306 // callsite. 3307 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 3308 Instruction *UserI = cast<Instruction>(U.getUser()); 3309 3310 // If UserI is the curr instruction and there is a single potential use of 3311 // the value in UserI we allow the use. 3312 // TODO: We should inspect the operands and allow those that cannot alias 3313 // with the value. 3314 if (UserI == getCtxI() && UserI->getNumOperands() == 1) 3315 return true; 3316 3317 if (ScopeFn) { 3318 const auto &ReachabilityAA = A.getAAFor<AAReachability>( 3319 *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL); 3320 3321 if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI())) 3322 return true; 3323 3324 if (auto *CB = dyn_cast<CallBase>(UserI)) { 3325 if (CB->isArgOperand(&U)) { 3326 3327 unsigned ArgNo = CB->getArgOperandNo(&U); 3328 3329 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 3330 *this, IRPosition::callsite_argument(*CB, ArgNo), 3331 DepClassTy::OPTIONAL); 3332 3333 if (NoCaptureAA.isAssumedNoCapture()) 3334 return true; 3335 } 3336 } 3337 } 3338 3339 // For cases which can potentially have more users 3340 if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) || 3341 isa<SelectInst>(U)) { 3342 Follow = true; 3343 return true; 3344 } 3345 3346 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n"); 3347 return false; 3348 }; 3349 3350 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 3351 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { 3352 LLVM_DEBUG( 3353 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() 3354 << " cannot be noalias as it is potentially captured\n"); 3355 return false; 3356 } 3357 } 3358 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); 3359 3360 // Check there is no other pointer argument which could alias with the 3361 // value passed at this call site. 3362 // TODO: AbstractCallSite 3363 const auto &CB = cast<CallBase>(getAnchorValue()); 3364 for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++) 3365 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) 3366 return false; 3367 3368 return true; 3369 } 3370 3371 /// See AbstractAttribute::updateImpl(...). 3372 ChangeStatus updateImpl(Attributor &A) override { 3373 // If the argument is readnone we are done as there are no accesses via the 3374 // argument. 3375 auto &MemBehaviorAA = 3376 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 3377 if (MemBehaviorAA.isAssumedReadNone()) { 3378 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3379 return ChangeStatus::UNCHANGED; 3380 } 3381 3382 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3383 const auto &NoAliasAA = 3384 A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE); 3385 3386 AAResults *AAR = nullptr; 3387 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, 3388 NoAliasAA)) { 3389 LLVM_DEBUG( 3390 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"); 3391 return ChangeStatus::UNCHANGED; 3392 } 3393 3394 return indicatePessimisticFixpoint(); 3395 } 3396 3397 /// See AbstractAttribute::trackStatistics() 3398 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } 3399 }; 3400 3401 /// NoAlias attribute for function return value. 3402 struct AANoAliasReturned final : AANoAliasImpl { 3403 AANoAliasReturned(const IRPosition &IRP, Attributor &A) 3404 : AANoAliasImpl(IRP, A) {} 3405 3406 /// See AbstractAttribute::initialize(...). 3407 void initialize(Attributor &A) override { 3408 AANoAliasImpl::initialize(A); 3409 Function *F = getAssociatedFunction(); 3410 if (!F || F->isDeclaration()) 3411 indicatePessimisticFixpoint(); 3412 } 3413 3414 /// See AbstractAttribute::updateImpl(...). 3415 virtual ChangeStatus updateImpl(Attributor &A) override { 3416 3417 auto CheckReturnValue = [&](Value &RV) -> bool { 3418 if (Constant *C = dyn_cast<Constant>(&RV)) 3419 if (C->isNullValue() || isa<UndefValue>(C)) 3420 return true; 3421 3422 /// For now, we can only deduce noalias if we have call sites. 3423 /// FIXME: add more support. 3424 if (!isa<CallBase>(&RV)) 3425 return false; 3426 3427 const IRPosition &RVPos = IRPosition::value(RV); 3428 const auto &NoAliasAA = 3429 A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED); 3430 if (!NoAliasAA.isAssumedNoAlias()) 3431 return false; 3432 3433 const auto &NoCaptureAA = 3434 A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED); 3435 return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); 3436 }; 3437 3438 if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) 3439 return indicatePessimisticFixpoint(); 3440 3441 return ChangeStatus::UNCHANGED; 3442 } 3443 3444 /// See AbstractAttribute::trackStatistics() 3445 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } 3446 }; 3447 3448 /// NoAlias attribute deduction for a call site return value. 3449 struct AANoAliasCallSiteReturned final : AANoAliasImpl { 3450 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) 3451 : AANoAliasImpl(IRP, A) {} 3452 3453 /// See AbstractAttribute::initialize(...). 3454 void initialize(Attributor &A) override { 3455 AANoAliasImpl::initialize(A); 3456 Function *F = getAssociatedFunction(); 3457 if (!F || F->isDeclaration()) 3458 indicatePessimisticFixpoint(); 3459 } 3460 3461 /// See AbstractAttribute::updateImpl(...). 3462 ChangeStatus updateImpl(Attributor &A) override { 3463 // TODO: Once we have call site specific value information we can provide 3464 // call site specific liveness information and then it makes 3465 // sense to specialize attributes for call sites arguments instead of 3466 // redirecting requests to the callee argument. 3467 Function *F = getAssociatedFunction(); 3468 const IRPosition &FnPos = IRPosition::returned(*F); 3469 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED); 3470 return clampStateAndIndicateChange(getState(), FnAA.getState()); 3471 } 3472 3473 /// See AbstractAttribute::trackStatistics() 3474 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } 3475 }; 3476 } // namespace 3477 3478 /// -------------------AAIsDead Function Attribute----------------------- 3479 3480 namespace { 3481 struct AAIsDeadValueImpl : public AAIsDead { 3482 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3483 3484 /// See AAIsDead::isAssumedDead(). 3485 bool isAssumedDead() const override { return isAssumed(IS_DEAD); } 3486 3487 /// See AAIsDead::isKnownDead(). 3488 bool isKnownDead() const override { return isKnown(IS_DEAD); } 3489 3490 /// See AAIsDead::isAssumedDead(BasicBlock *). 3491 bool isAssumedDead(const BasicBlock *BB) const override { return false; } 3492 3493 /// See AAIsDead::isKnownDead(BasicBlock *). 3494 bool isKnownDead(const BasicBlock *BB) const override { return false; } 3495 3496 /// See AAIsDead::isAssumedDead(Instruction *I). 3497 bool isAssumedDead(const Instruction *I) const override { 3498 return I == getCtxI() && isAssumedDead(); 3499 } 3500 3501 /// See AAIsDead::isKnownDead(Instruction *I). 3502 bool isKnownDead(const Instruction *I) const override { 3503 return isAssumedDead(I) && isKnownDead(); 3504 } 3505 3506 /// See AbstractAttribute::getAsStr(). 3507 virtual const std::string getAsStr() const override { 3508 return isAssumedDead() ? "assumed-dead" : "assumed-live"; 3509 } 3510 3511 /// Check if all uses are assumed dead. 3512 bool areAllUsesAssumedDead(Attributor &A, Value &V) { 3513 // Callers might not check the type, void has no uses. 3514 if (V.getType()->isVoidTy()) 3515 return true; 3516 3517 // If we replace a value with a constant there are no uses left afterwards. 3518 if (!isa<Constant>(V)) { 3519 bool UsedAssumedInformation = false; 3520 Optional<Constant *> C = 3521 A.getAssumedConstant(V, *this, UsedAssumedInformation); 3522 if (!C.hasValue() || *C) 3523 return true; 3524 } 3525 3526 auto UsePred = [&](const Use &U, bool &Follow) { return false; }; 3527 // Explicitly set the dependence class to required because we want a long 3528 // chain of N dependent instructions to be considered live as soon as one is 3529 // without going through N update cycles. This is not required for 3530 // correctness. 3531 return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false, 3532 DepClassTy::REQUIRED); 3533 } 3534 3535 /// Determine if \p I is assumed to be side-effect free. 3536 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { 3537 if (!I || wouldInstructionBeTriviallyDead(I)) 3538 return true; 3539 3540 auto *CB = dyn_cast<CallBase>(I); 3541 if (!CB || isa<IntrinsicInst>(CB)) 3542 return false; 3543 3544 const IRPosition &CallIRP = IRPosition::callsite_function(*CB); 3545 const auto &NoUnwindAA = 3546 A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE); 3547 if (!NoUnwindAA.isAssumedNoUnwind()) 3548 return false; 3549 if (!NoUnwindAA.isKnownNoUnwind()) 3550 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); 3551 3552 bool IsKnown; 3553 return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown); 3554 } 3555 }; 3556 3557 struct AAIsDeadFloating : public AAIsDeadValueImpl { 3558 AAIsDeadFloating(const IRPosition &IRP, Attributor &A) 3559 : AAIsDeadValueImpl(IRP, A) {} 3560 3561 /// See AbstractAttribute::initialize(...). 3562 void initialize(Attributor &A) override { 3563 if (isa<UndefValue>(getAssociatedValue())) { 3564 indicatePessimisticFixpoint(); 3565 return; 3566 } 3567 3568 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3569 if (!isAssumedSideEffectFree(A, I)) { 3570 if (!isa_and_nonnull<StoreInst>(I)) 3571 indicatePessimisticFixpoint(); 3572 else 3573 removeAssumedBits(HAS_NO_EFFECT); 3574 } 3575 } 3576 3577 bool isDeadStore(Attributor &A, StoreInst &SI) { 3578 // Lang ref now states volatile store is not UB/dead, let's skip them. 3579 if (SI.isVolatile()) 3580 return false; 3581 3582 bool UsedAssumedInformation = false; 3583 SmallSetVector<Value *, 4> PotentialCopies; 3584 if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this, 3585 UsedAssumedInformation)) 3586 return false; 3587 return llvm::all_of(PotentialCopies, [&](Value *V) { 3588 return A.isAssumedDead(IRPosition::value(*V), this, nullptr, 3589 UsedAssumedInformation); 3590 }); 3591 } 3592 3593 /// See AbstractAttribute::getAsStr(). 3594 const std::string getAsStr() const override { 3595 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3596 if (isa_and_nonnull<StoreInst>(I)) 3597 if (isValidState()) 3598 return "assumed-dead-store"; 3599 return AAIsDeadValueImpl::getAsStr(); 3600 } 3601 3602 /// See AbstractAttribute::updateImpl(...). 3603 ChangeStatus updateImpl(Attributor &A) override { 3604 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3605 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { 3606 if (!isDeadStore(A, *SI)) 3607 return indicatePessimisticFixpoint(); 3608 } else { 3609 if (!isAssumedSideEffectFree(A, I)) 3610 return indicatePessimisticFixpoint(); 3611 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3612 return indicatePessimisticFixpoint(); 3613 } 3614 return ChangeStatus::UNCHANGED; 3615 } 3616 3617 /// See AbstractAttribute::manifest(...). 3618 ChangeStatus manifest(Attributor &A) override { 3619 Value &V = getAssociatedValue(); 3620 if (auto *I = dyn_cast<Instruction>(&V)) { 3621 // If we get here we basically know the users are all dead. We check if 3622 // isAssumedSideEffectFree returns true here again because it might not be 3623 // the case and only the users are dead but the instruction (=call) is 3624 // still needed. 3625 if (isa<StoreInst>(I) || 3626 (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) { 3627 A.deleteAfterManifest(*I); 3628 return ChangeStatus::CHANGED; 3629 } 3630 } 3631 if (V.use_empty()) 3632 return ChangeStatus::UNCHANGED; 3633 3634 bool UsedAssumedInformation = false; 3635 Optional<Constant *> C = 3636 A.getAssumedConstant(V, *this, UsedAssumedInformation); 3637 if (C.hasValue() && C.getValue()) 3638 return ChangeStatus::UNCHANGED; 3639 3640 // Replace the value with undef as it is dead but keep droppable uses around 3641 // as they provide information we don't want to give up on just yet. 3642 UndefValue &UV = *UndefValue::get(V.getType()); 3643 bool AnyChange = 3644 A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false); 3645 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3646 } 3647 3648 /// See AbstractAttribute::trackStatistics() 3649 void trackStatistics() const override { 3650 STATS_DECLTRACK_FLOATING_ATTR(IsDead) 3651 } 3652 }; 3653 3654 struct AAIsDeadArgument : public AAIsDeadFloating { 3655 AAIsDeadArgument(const IRPosition &IRP, Attributor &A) 3656 : AAIsDeadFloating(IRP, A) {} 3657 3658 /// See AbstractAttribute::initialize(...). 3659 void initialize(Attributor &A) override { 3660 if (!A.isFunctionIPOAmendable(*getAnchorScope())) 3661 indicatePessimisticFixpoint(); 3662 } 3663 3664 /// See AbstractAttribute::manifest(...). 3665 ChangeStatus manifest(Attributor &A) override { 3666 ChangeStatus Changed = AAIsDeadFloating::manifest(A); 3667 Argument &Arg = *getAssociatedArgument(); 3668 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) 3669 if (A.registerFunctionSignatureRewrite( 3670 Arg, /* ReplacementTypes */ {}, 3671 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, 3672 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { 3673 Arg.dropDroppableUses(); 3674 return ChangeStatus::CHANGED; 3675 } 3676 return Changed; 3677 } 3678 3679 /// See AbstractAttribute::trackStatistics() 3680 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) } 3681 }; 3682 3683 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { 3684 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) 3685 : AAIsDeadValueImpl(IRP, A) {} 3686 3687 /// See AbstractAttribute::initialize(...). 3688 void initialize(Attributor &A) override { 3689 if (isa<UndefValue>(getAssociatedValue())) 3690 indicatePessimisticFixpoint(); 3691 } 3692 3693 /// See AbstractAttribute::updateImpl(...). 3694 ChangeStatus updateImpl(Attributor &A) override { 3695 // TODO: Once we have call site specific value information we can provide 3696 // call site specific liveness information and then it makes 3697 // sense to specialize attributes for call sites arguments instead of 3698 // redirecting requests to the callee argument. 3699 Argument *Arg = getAssociatedArgument(); 3700 if (!Arg) 3701 return indicatePessimisticFixpoint(); 3702 const IRPosition &ArgPos = IRPosition::argument(*Arg); 3703 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED); 3704 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 3705 } 3706 3707 /// See AbstractAttribute::manifest(...). 3708 ChangeStatus manifest(Attributor &A) override { 3709 CallBase &CB = cast<CallBase>(getAnchorValue()); 3710 Use &U = CB.getArgOperandUse(getCallSiteArgNo()); 3711 assert(!isa<UndefValue>(U.get()) && 3712 "Expected undef values to be filtered out!"); 3713 UndefValue &UV = *UndefValue::get(U->getType()); 3714 if (A.changeUseAfterManifest(U, UV)) 3715 return ChangeStatus::CHANGED; 3716 return ChangeStatus::UNCHANGED; 3717 } 3718 3719 /// See AbstractAttribute::trackStatistics() 3720 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) } 3721 }; 3722 3723 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { 3724 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) 3725 : AAIsDeadFloating(IRP, A) {} 3726 3727 /// See AAIsDead::isAssumedDead(). 3728 bool isAssumedDead() const override { 3729 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; 3730 } 3731 3732 /// See AbstractAttribute::initialize(...). 3733 void initialize(Attributor &A) override { 3734 if (isa<UndefValue>(getAssociatedValue())) { 3735 indicatePessimisticFixpoint(); 3736 return; 3737 } 3738 3739 // We track this separately as a secondary state. 3740 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); 3741 } 3742 3743 /// See AbstractAttribute::updateImpl(...). 3744 ChangeStatus updateImpl(Attributor &A) override { 3745 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3746 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { 3747 IsAssumedSideEffectFree = false; 3748 Changed = ChangeStatus::CHANGED; 3749 } 3750 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3751 return indicatePessimisticFixpoint(); 3752 return Changed; 3753 } 3754 3755 /// See AbstractAttribute::trackStatistics() 3756 void trackStatistics() const override { 3757 if (IsAssumedSideEffectFree) 3758 STATS_DECLTRACK_CSRET_ATTR(IsDead) 3759 else 3760 STATS_DECLTRACK_CSRET_ATTR(UnusedResult) 3761 } 3762 3763 /// See AbstractAttribute::getAsStr(). 3764 const std::string getAsStr() const override { 3765 return isAssumedDead() 3766 ? "assumed-dead" 3767 : (getAssumed() ? "assumed-dead-users" : "assumed-live"); 3768 } 3769 3770 private: 3771 bool IsAssumedSideEffectFree = true; 3772 }; 3773 3774 struct AAIsDeadReturned : public AAIsDeadValueImpl { 3775 AAIsDeadReturned(const IRPosition &IRP, Attributor &A) 3776 : AAIsDeadValueImpl(IRP, A) {} 3777 3778 /// See AbstractAttribute::updateImpl(...). 3779 ChangeStatus updateImpl(Attributor &A) override { 3780 3781 bool UsedAssumedInformation = false; 3782 A.checkForAllInstructions([](Instruction &) { return true; }, *this, 3783 {Instruction::Ret}, UsedAssumedInformation); 3784 3785 auto PredForCallSite = [&](AbstractCallSite ACS) { 3786 if (ACS.isCallbackCall() || !ACS.getInstruction()) 3787 return false; 3788 return areAllUsesAssumedDead(A, *ACS.getInstruction()); 3789 }; 3790 3791 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 3792 UsedAssumedInformation)) 3793 return indicatePessimisticFixpoint(); 3794 3795 return ChangeStatus::UNCHANGED; 3796 } 3797 3798 /// See AbstractAttribute::manifest(...). 3799 ChangeStatus manifest(Attributor &A) override { 3800 // TODO: Rewrite the signature to return void? 3801 bool AnyChange = false; 3802 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); 3803 auto RetInstPred = [&](Instruction &I) { 3804 ReturnInst &RI = cast<ReturnInst>(I); 3805 if (!isa<UndefValue>(RI.getReturnValue())) 3806 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); 3807 return true; 3808 }; 3809 bool UsedAssumedInformation = false; 3810 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}, 3811 UsedAssumedInformation); 3812 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3813 } 3814 3815 /// See AbstractAttribute::trackStatistics() 3816 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) } 3817 }; 3818 3819 struct AAIsDeadFunction : public AAIsDead { 3820 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3821 3822 /// See AbstractAttribute::initialize(...). 3823 void initialize(Attributor &A) override { 3824 const Function *F = getAnchorScope(); 3825 if (F && !F->isDeclaration()) { 3826 // We only want to compute liveness once. If the function is not part of 3827 // the SCC, skip it. 3828 if (A.isRunOn(*const_cast<Function *>(F))) { 3829 ToBeExploredFrom.insert(&F->getEntryBlock().front()); 3830 assumeLive(A, F->getEntryBlock()); 3831 } else { 3832 indicatePessimisticFixpoint(); 3833 } 3834 } 3835 } 3836 3837 /// See AbstractAttribute::getAsStr(). 3838 const std::string getAsStr() const override { 3839 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + 3840 std::to_string(getAnchorScope()->size()) + "][#TBEP " + 3841 std::to_string(ToBeExploredFrom.size()) + "][#KDE " + 3842 std::to_string(KnownDeadEnds.size()) + "]"; 3843 } 3844 3845 /// See AbstractAttribute::manifest(...). 3846 ChangeStatus manifest(Attributor &A) override { 3847 assert(getState().isValidState() && 3848 "Attempted to manifest an invalid state!"); 3849 3850 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 3851 Function &F = *getAnchorScope(); 3852 3853 if (AssumedLiveBlocks.empty()) { 3854 A.deleteAfterManifest(F); 3855 return ChangeStatus::CHANGED; 3856 } 3857 3858 // Flag to determine if we can change an invoke to a call assuming the 3859 // callee is nounwind. This is not possible if the personality of the 3860 // function allows to catch asynchronous exceptions. 3861 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 3862 3863 KnownDeadEnds.set_union(ToBeExploredFrom); 3864 for (const Instruction *DeadEndI : KnownDeadEnds) { 3865 auto *CB = dyn_cast<CallBase>(DeadEndI); 3866 if (!CB) 3867 continue; 3868 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3869 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 3870 bool MayReturn = !NoReturnAA.isAssumedNoReturn(); 3871 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) 3872 continue; 3873 3874 if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) 3875 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); 3876 else 3877 A.changeToUnreachableAfterManifest( 3878 const_cast<Instruction *>(DeadEndI->getNextNode())); 3879 HasChanged = ChangeStatus::CHANGED; 3880 } 3881 3882 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted."); 3883 for (BasicBlock &BB : F) 3884 if (!AssumedLiveBlocks.count(&BB)) { 3885 A.deleteAfterManifest(BB); 3886 ++BUILD_STAT_NAME(AAIsDead, BasicBlock); 3887 HasChanged = ChangeStatus::CHANGED; 3888 } 3889 3890 return HasChanged; 3891 } 3892 3893 /// See AbstractAttribute::updateImpl(...). 3894 ChangeStatus updateImpl(Attributor &A) override; 3895 3896 bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override { 3897 assert(From->getParent() == getAnchorScope() && 3898 To->getParent() == getAnchorScope() && 3899 "Used AAIsDead of the wrong function"); 3900 return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To)); 3901 } 3902 3903 /// See AbstractAttribute::trackStatistics() 3904 void trackStatistics() const override {} 3905 3906 /// Returns true if the function is assumed dead. 3907 bool isAssumedDead() const override { return false; } 3908 3909 /// See AAIsDead::isKnownDead(). 3910 bool isKnownDead() const override { return false; } 3911 3912 /// See AAIsDead::isAssumedDead(BasicBlock *). 3913 bool isAssumedDead(const BasicBlock *BB) const override { 3914 assert(BB->getParent() == getAnchorScope() && 3915 "BB must be in the same anchor scope function."); 3916 3917 if (!getAssumed()) 3918 return false; 3919 return !AssumedLiveBlocks.count(BB); 3920 } 3921 3922 /// See AAIsDead::isKnownDead(BasicBlock *). 3923 bool isKnownDead(const BasicBlock *BB) const override { 3924 return getKnown() && isAssumedDead(BB); 3925 } 3926 3927 /// See AAIsDead::isAssumed(Instruction *I). 3928 bool isAssumedDead(const Instruction *I) const override { 3929 assert(I->getParent()->getParent() == getAnchorScope() && 3930 "Instruction must be in the same anchor scope function."); 3931 3932 if (!getAssumed()) 3933 return false; 3934 3935 // If it is not in AssumedLiveBlocks then it for sure dead. 3936 // Otherwise, it can still be after noreturn call in a live block. 3937 if (!AssumedLiveBlocks.count(I->getParent())) 3938 return true; 3939 3940 // If it is not after a liveness barrier it is live. 3941 const Instruction *PrevI = I->getPrevNode(); 3942 while (PrevI) { 3943 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) 3944 return true; 3945 PrevI = PrevI->getPrevNode(); 3946 } 3947 return false; 3948 } 3949 3950 /// See AAIsDead::isKnownDead(Instruction *I). 3951 bool isKnownDead(const Instruction *I) const override { 3952 return getKnown() && isAssumedDead(I); 3953 } 3954 3955 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A 3956 /// that internal function called from \p BB should now be looked at. 3957 bool assumeLive(Attributor &A, const BasicBlock &BB) { 3958 if (!AssumedLiveBlocks.insert(&BB).second) 3959 return false; 3960 3961 // We assume that all of BB is (probably) live now and if there are calls to 3962 // internal functions we will assume that those are now live as well. This 3963 // is a performance optimization for blocks with calls to a lot of internal 3964 // functions. It can however cause dead functions to be treated as live. 3965 for (const Instruction &I : BB) 3966 if (const auto *CB = dyn_cast<CallBase>(&I)) 3967 if (const Function *F = CB->getCalledFunction()) 3968 if (F->hasLocalLinkage()) 3969 A.markLiveInternalFunction(*F); 3970 return true; 3971 } 3972 3973 /// Collection of instructions that need to be explored again, e.g., we 3974 /// did assume they do not transfer control to (one of their) successors. 3975 SmallSetVector<const Instruction *, 8> ToBeExploredFrom; 3976 3977 /// Collection of instructions that are known to not transfer control. 3978 SmallSetVector<const Instruction *, 8> KnownDeadEnds; 3979 3980 /// Collection of all assumed live edges 3981 DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges; 3982 3983 /// Collection of all assumed live BasicBlocks. 3984 DenseSet<const BasicBlock *> AssumedLiveBlocks; 3985 }; 3986 3987 static bool 3988 identifyAliveSuccessors(Attributor &A, const CallBase &CB, 3989 AbstractAttribute &AA, 3990 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3991 const IRPosition &IPos = IRPosition::callsite_function(CB); 3992 3993 const auto &NoReturnAA = 3994 A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL); 3995 if (NoReturnAA.isAssumedNoReturn()) 3996 return !NoReturnAA.isKnownNoReturn(); 3997 if (CB.isTerminator()) 3998 AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); 3999 else 4000 AliveSuccessors.push_back(CB.getNextNode()); 4001 return false; 4002 } 4003 4004 static bool 4005 identifyAliveSuccessors(Attributor &A, const InvokeInst &II, 4006 AbstractAttribute &AA, 4007 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 4008 bool UsedAssumedInformation = 4009 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); 4010 4011 // First, determine if we can change an invoke to a call assuming the 4012 // callee is nounwind. This is not possible if the personality of the 4013 // function allows to catch asynchronous exceptions. 4014 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { 4015 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 4016 } else { 4017 const IRPosition &IPos = IRPosition::callsite_function(II); 4018 const auto &AANoUnw = 4019 A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL); 4020 if (AANoUnw.isAssumedNoUnwind()) { 4021 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); 4022 } else { 4023 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 4024 } 4025 } 4026 return UsedAssumedInformation; 4027 } 4028 4029 static bool 4030 identifyAliveSuccessors(Attributor &A, const BranchInst &BI, 4031 AbstractAttribute &AA, 4032 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 4033 bool UsedAssumedInformation = false; 4034 if (BI.getNumSuccessors() == 1) { 4035 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 4036 } else { 4037 Optional<Constant *> C = 4038 A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation); 4039 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) { 4040 // No value yet, assume both edges are dead. 4041 } else if (isa_and_nonnull<ConstantInt>(*C)) { 4042 const BasicBlock *SuccBB = 4043 BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue()); 4044 AliveSuccessors.push_back(&SuccBB->front()); 4045 } else { 4046 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 4047 AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); 4048 UsedAssumedInformation = false; 4049 } 4050 } 4051 return UsedAssumedInformation; 4052 } 4053 4054 static bool 4055 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, 4056 AbstractAttribute &AA, 4057 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 4058 bool UsedAssumedInformation = false; 4059 Optional<Constant *> C = 4060 A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation); 4061 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) { 4062 // No value yet, assume all edges are dead. 4063 } else if (isa_and_nonnull<ConstantInt>(C.getValue())) { 4064 for (auto &CaseIt : SI.cases()) { 4065 if (CaseIt.getCaseValue() == C.getValue()) { 4066 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); 4067 return UsedAssumedInformation; 4068 } 4069 } 4070 AliveSuccessors.push_back(&SI.getDefaultDest()->front()); 4071 return UsedAssumedInformation; 4072 } else { 4073 for (const BasicBlock *SuccBB : successors(SI.getParent())) 4074 AliveSuccessors.push_back(&SuccBB->front()); 4075 } 4076 return UsedAssumedInformation; 4077 } 4078 4079 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { 4080 ChangeStatus Change = ChangeStatus::UNCHANGED; 4081 4082 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" 4083 << getAnchorScope()->size() << "] BBs and " 4084 << ToBeExploredFrom.size() << " exploration points and " 4085 << KnownDeadEnds.size() << " known dead ends\n"); 4086 4087 // Copy and clear the list of instructions we need to explore from. It is 4088 // refilled with instructions the next update has to look at. 4089 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), 4090 ToBeExploredFrom.end()); 4091 decltype(ToBeExploredFrom) NewToBeExploredFrom; 4092 4093 SmallVector<const Instruction *, 8> AliveSuccessors; 4094 while (!Worklist.empty()) { 4095 const Instruction *I = Worklist.pop_back_val(); 4096 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"); 4097 4098 // Fast forward for uninteresting instructions. We could look for UB here 4099 // though. 4100 while (!I->isTerminator() && !isa<CallBase>(I)) 4101 I = I->getNextNode(); 4102 4103 AliveSuccessors.clear(); 4104 4105 bool UsedAssumedInformation = false; 4106 switch (I->getOpcode()) { 4107 // TODO: look for (assumed) UB to backwards propagate "deadness". 4108 default: 4109 assert(I->isTerminator() && 4110 "Expected non-terminators to be handled already!"); 4111 for (const BasicBlock *SuccBB : successors(I->getParent())) 4112 AliveSuccessors.push_back(&SuccBB->front()); 4113 break; 4114 case Instruction::Call: 4115 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), 4116 *this, AliveSuccessors); 4117 break; 4118 case Instruction::Invoke: 4119 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), 4120 *this, AliveSuccessors); 4121 break; 4122 case Instruction::Br: 4123 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), 4124 *this, AliveSuccessors); 4125 break; 4126 case Instruction::Switch: 4127 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), 4128 *this, AliveSuccessors); 4129 break; 4130 } 4131 4132 if (UsedAssumedInformation) { 4133 NewToBeExploredFrom.insert(I); 4134 } else if (AliveSuccessors.empty() || 4135 (I->isTerminator() && 4136 AliveSuccessors.size() < I->getNumSuccessors())) { 4137 if (KnownDeadEnds.insert(I)) 4138 Change = ChangeStatus::CHANGED; 4139 } 4140 4141 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: " 4142 << AliveSuccessors.size() << " UsedAssumedInformation: " 4143 << UsedAssumedInformation << "\n"); 4144 4145 for (const Instruction *AliveSuccessor : AliveSuccessors) { 4146 if (!I->isTerminator()) { 4147 assert(AliveSuccessors.size() == 1 && 4148 "Non-terminator expected to have a single successor!"); 4149 Worklist.push_back(AliveSuccessor); 4150 } else { 4151 // record the assumed live edge 4152 auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent()); 4153 if (AssumedLiveEdges.insert(Edge).second) 4154 Change = ChangeStatus::CHANGED; 4155 if (assumeLive(A, *AliveSuccessor->getParent())) 4156 Worklist.push_back(AliveSuccessor); 4157 } 4158 } 4159 } 4160 4161 // Check if the content of ToBeExploredFrom changed, ignore the order. 4162 if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() || 4163 llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) { 4164 return !ToBeExploredFrom.count(I); 4165 })) { 4166 Change = ChangeStatus::CHANGED; 4167 ToBeExploredFrom = std::move(NewToBeExploredFrom); 4168 } 4169 4170 // If we know everything is live there is no need to query for liveness. 4171 // Instead, indicating a pessimistic fixpoint will cause the state to be 4172 // "invalid" and all queries to be answered conservatively without lookups. 4173 // To be in this state we have to (1) finished the exploration and (3) not 4174 // discovered any non-trivial dead end and (2) not ruled unreachable code 4175 // dead. 4176 if (ToBeExploredFrom.empty() && 4177 getAnchorScope()->size() == AssumedLiveBlocks.size() && 4178 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { 4179 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; 4180 })) 4181 return indicatePessimisticFixpoint(); 4182 return Change; 4183 } 4184 4185 /// Liveness information for a call sites. 4186 struct AAIsDeadCallSite final : AAIsDeadFunction { 4187 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) 4188 : AAIsDeadFunction(IRP, A) {} 4189 4190 /// See AbstractAttribute::initialize(...). 4191 void initialize(Attributor &A) override { 4192 // TODO: Once we have call site specific value information we can provide 4193 // call site specific liveness information and then it makes 4194 // sense to specialize attributes for call sites instead of 4195 // redirecting requests to the callee. 4196 llvm_unreachable("Abstract attributes for liveness are not " 4197 "supported for call sites yet!"); 4198 } 4199 4200 /// See AbstractAttribute::updateImpl(...). 4201 ChangeStatus updateImpl(Attributor &A) override { 4202 return indicatePessimisticFixpoint(); 4203 } 4204 4205 /// See AbstractAttribute::trackStatistics() 4206 void trackStatistics() const override {} 4207 }; 4208 } // namespace 4209 4210 /// -------------------- Dereferenceable Argument Attribute -------------------- 4211 4212 namespace { 4213 struct AADereferenceableImpl : AADereferenceable { 4214 AADereferenceableImpl(const IRPosition &IRP, Attributor &A) 4215 : AADereferenceable(IRP, A) {} 4216 using StateType = DerefState; 4217 4218 /// See AbstractAttribute::initialize(...). 4219 void initialize(Attributor &A) override { 4220 SmallVector<Attribute, 4> Attrs; 4221 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, 4222 Attrs, /* IgnoreSubsumingPositions */ false, &A); 4223 for (const Attribute &Attr : Attrs) 4224 takeKnownDerefBytesMaximum(Attr.getValueAsInt()); 4225 4226 const IRPosition &IRP = this->getIRPosition(); 4227 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE); 4228 4229 bool CanBeNull, CanBeFreed; 4230 takeKnownDerefBytesMaximum( 4231 IRP.getAssociatedValue().getPointerDereferenceableBytes( 4232 A.getDataLayout(), CanBeNull, CanBeFreed)); 4233 4234 bool IsFnInterface = IRP.isFnInterfaceKind(); 4235 Function *FnScope = IRP.getAnchorScope(); 4236 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { 4237 indicatePessimisticFixpoint(); 4238 return; 4239 } 4240 4241 if (Instruction *CtxI = getCtxI()) 4242 followUsesInMBEC(*this, A, getState(), *CtxI); 4243 } 4244 4245 /// See AbstractAttribute::getState() 4246 /// { 4247 StateType &getState() override { return *this; } 4248 const StateType &getState() const override { return *this; } 4249 /// } 4250 4251 /// Helper function for collecting accessed bytes in must-be-executed-context 4252 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, 4253 DerefState &State) { 4254 const Value *UseV = U->get(); 4255 if (!UseV->getType()->isPointerTy()) 4256 return; 4257 4258 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); 4259 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) 4260 return; 4261 4262 int64_t Offset; 4263 const Value *Base = GetPointerBaseWithConstantOffset( 4264 Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true); 4265 if (Base && Base == &getAssociatedValue()) 4266 State.addAccessedBytes(Offset, Loc->Size.getValue()); 4267 } 4268 4269 /// See followUsesInMBEC 4270 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4271 AADereferenceable::StateType &State) { 4272 bool IsNonNull = false; 4273 bool TrackUse = false; 4274 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( 4275 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); 4276 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes 4277 << " for instruction " << *I << "\n"); 4278 4279 addAccessedBytesForUse(A, U, I, State); 4280 State.takeKnownDerefBytesMaximum(DerefBytes); 4281 return TrackUse; 4282 } 4283 4284 /// See AbstractAttribute::manifest(...). 4285 ChangeStatus manifest(Attributor &A) override { 4286 ChangeStatus Change = AADereferenceable::manifest(A); 4287 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { 4288 removeAttrs({Attribute::DereferenceableOrNull}); 4289 return ChangeStatus::CHANGED; 4290 } 4291 return Change; 4292 } 4293 4294 void getDeducedAttributes(LLVMContext &Ctx, 4295 SmallVectorImpl<Attribute> &Attrs) const override { 4296 // TODO: Add *_globally support 4297 if (isAssumedNonNull()) 4298 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 4299 Ctx, getAssumedDereferenceableBytes())); 4300 else 4301 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 4302 Ctx, getAssumedDereferenceableBytes())); 4303 } 4304 4305 /// See AbstractAttribute::getAsStr(). 4306 const std::string getAsStr() const override { 4307 if (!getAssumedDereferenceableBytes()) 4308 return "unknown-dereferenceable"; 4309 return std::string("dereferenceable") + 4310 (isAssumedNonNull() ? "" : "_or_null") + 4311 (isAssumedGlobal() ? "_globally" : "") + "<" + 4312 std::to_string(getKnownDereferenceableBytes()) + "-" + 4313 std::to_string(getAssumedDereferenceableBytes()) + ">"; 4314 } 4315 }; 4316 4317 /// Dereferenceable attribute for a floating value. 4318 struct AADereferenceableFloating : AADereferenceableImpl { 4319 AADereferenceableFloating(const IRPosition &IRP, Attributor &A) 4320 : AADereferenceableImpl(IRP, A) {} 4321 4322 /// See AbstractAttribute::updateImpl(...). 4323 ChangeStatus updateImpl(Attributor &A) override { 4324 const DataLayout &DL = A.getDataLayout(); 4325 4326 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T, 4327 bool Stripped) -> bool { 4328 unsigned IdxWidth = 4329 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 4330 APInt Offset(IdxWidth, 0); 4331 const Value *Base = stripAndAccumulateOffsets( 4332 A, *this, &V, DL, Offset, /* GetMinOffset */ false, 4333 /* AllowNonInbounds */ true); 4334 4335 const auto &AA = A.getAAFor<AADereferenceable>( 4336 *this, IRPosition::value(*Base), DepClassTy::REQUIRED); 4337 int64_t DerefBytes = 0; 4338 if (!Stripped && this == &AA) { 4339 // Use IR information if we did not strip anything. 4340 // TODO: track globally. 4341 bool CanBeNull, CanBeFreed; 4342 DerefBytes = 4343 Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); 4344 T.GlobalState.indicatePessimisticFixpoint(); 4345 } else { 4346 const DerefState &DS = AA.getState(); 4347 DerefBytes = DS.DerefBytesState.getAssumed(); 4348 T.GlobalState &= DS.GlobalState; 4349 } 4350 4351 // For now we do not try to "increase" dereferenceability due to negative 4352 // indices as we first have to come up with code to deal with loops and 4353 // for overflows of the dereferenceable bytes. 4354 int64_t OffsetSExt = Offset.getSExtValue(); 4355 if (OffsetSExt < 0) 4356 OffsetSExt = 0; 4357 4358 T.takeAssumedDerefBytesMinimum( 4359 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4360 4361 if (this == &AA) { 4362 if (!Stripped) { 4363 // If nothing was stripped IR information is all we got. 4364 T.takeKnownDerefBytesMaximum( 4365 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4366 T.indicatePessimisticFixpoint(); 4367 } else if (OffsetSExt > 0) { 4368 // If something was stripped but there is circular reasoning we look 4369 // for the offset. If it is positive we basically decrease the 4370 // dereferenceable bytes in a circluar loop now, which will simply 4371 // drive them down to the known value in a very slow way which we 4372 // can accelerate. 4373 T.indicatePessimisticFixpoint(); 4374 } 4375 } 4376 4377 return T.isValidState(); 4378 }; 4379 4380 DerefState T; 4381 bool UsedAssumedInformation = false; 4382 if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T, 4383 VisitValueCB, getCtxI(), 4384 UsedAssumedInformation)) 4385 return indicatePessimisticFixpoint(); 4386 4387 return clampStateAndIndicateChange(getState(), T); 4388 } 4389 4390 /// See AbstractAttribute::trackStatistics() 4391 void trackStatistics() const override { 4392 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) 4393 } 4394 }; 4395 4396 /// Dereferenceable attribute for a return value. 4397 struct AADereferenceableReturned final 4398 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { 4399 AADereferenceableReturned(const IRPosition &IRP, Attributor &A) 4400 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( 4401 IRP, A) {} 4402 4403 /// See AbstractAttribute::trackStatistics() 4404 void trackStatistics() const override { 4405 STATS_DECLTRACK_FNRET_ATTR(dereferenceable) 4406 } 4407 }; 4408 4409 /// Dereferenceable attribute for an argument 4410 struct AADereferenceableArgument final 4411 : AAArgumentFromCallSiteArguments<AADereferenceable, 4412 AADereferenceableImpl> { 4413 using Base = 4414 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; 4415 AADereferenceableArgument(const IRPosition &IRP, Attributor &A) 4416 : Base(IRP, A) {} 4417 4418 /// See AbstractAttribute::trackStatistics() 4419 void trackStatistics() const override { 4420 STATS_DECLTRACK_ARG_ATTR(dereferenceable) 4421 } 4422 }; 4423 4424 /// Dereferenceable attribute for a call site argument. 4425 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { 4426 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) 4427 : AADereferenceableFloating(IRP, A) {} 4428 4429 /// See AbstractAttribute::trackStatistics() 4430 void trackStatistics() const override { 4431 STATS_DECLTRACK_CSARG_ATTR(dereferenceable) 4432 } 4433 }; 4434 4435 /// Dereferenceable attribute deduction for a call site return value. 4436 struct AADereferenceableCallSiteReturned final 4437 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { 4438 using Base = 4439 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; 4440 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) 4441 : Base(IRP, A) {} 4442 4443 /// See AbstractAttribute::trackStatistics() 4444 void trackStatistics() const override { 4445 STATS_DECLTRACK_CS_ATTR(dereferenceable); 4446 } 4447 }; 4448 } // namespace 4449 4450 // ------------------------ Align Argument Attribute ------------------------ 4451 4452 namespace { 4453 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA, 4454 Value &AssociatedValue, const Use *U, 4455 const Instruction *I, bool &TrackUse) { 4456 // We need to follow common pointer manipulation uses to the accesses they 4457 // feed into. 4458 if (isa<CastInst>(I)) { 4459 // Follow all but ptr2int casts. 4460 TrackUse = !isa<PtrToIntInst>(I); 4461 return 0; 4462 } 4463 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 4464 if (GEP->hasAllConstantIndices()) 4465 TrackUse = true; 4466 return 0; 4467 } 4468 4469 MaybeAlign MA; 4470 if (const auto *CB = dyn_cast<CallBase>(I)) { 4471 if (CB->isBundleOperand(U) || CB->isCallee(U)) 4472 return 0; 4473 4474 unsigned ArgNo = CB->getArgOperandNo(U); 4475 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 4476 // As long as we only use known information there is no need to track 4477 // dependences here. 4478 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE); 4479 MA = MaybeAlign(AlignAA.getKnownAlign()); 4480 } 4481 4482 const DataLayout &DL = A.getDataLayout(); 4483 const Value *UseV = U->get(); 4484 if (auto *SI = dyn_cast<StoreInst>(I)) { 4485 if (SI->getPointerOperand() == UseV) 4486 MA = SI->getAlign(); 4487 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 4488 if (LI->getPointerOperand() == UseV) 4489 MA = LI->getAlign(); 4490 } 4491 4492 if (!MA || *MA <= QueryingAA.getKnownAlign()) 4493 return 0; 4494 4495 unsigned Alignment = MA->value(); 4496 int64_t Offset; 4497 4498 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { 4499 if (Base == &AssociatedValue) { 4500 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4501 // So we can say that the maximum power of two which is a divisor of 4502 // gcd(Offset, Alignment) is an alignment. 4503 4504 uint32_t gcd = 4505 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment); 4506 Alignment = llvm::PowerOf2Floor(gcd); 4507 } 4508 } 4509 4510 return Alignment; 4511 } 4512 4513 struct AAAlignImpl : AAAlign { 4514 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} 4515 4516 /// See AbstractAttribute::initialize(...). 4517 void initialize(Attributor &A) override { 4518 SmallVector<Attribute, 4> Attrs; 4519 getAttrs({Attribute::Alignment}, Attrs); 4520 for (const Attribute &Attr : Attrs) 4521 takeKnownMaximum(Attr.getValueAsInt()); 4522 4523 Value &V = getAssociatedValue(); 4524 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value()); 4525 4526 if (getIRPosition().isFnInterfaceKind() && 4527 (!getAnchorScope() || 4528 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { 4529 indicatePessimisticFixpoint(); 4530 return; 4531 } 4532 4533 if (Instruction *CtxI = getCtxI()) 4534 followUsesInMBEC(*this, A, getState(), *CtxI); 4535 } 4536 4537 /// See AbstractAttribute::manifest(...). 4538 ChangeStatus manifest(Attributor &A) override { 4539 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; 4540 4541 // Check for users that allow alignment annotations. 4542 Value &AssociatedValue = getAssociatedValue(); 4543 for (const Use &U : AssociatedValue.uses()) { 4544 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { 4545 if (SI->getPointerOperand() == &AssociatedValue) 4546 if (SI->getAlignment() < getAssumedAlign()) { 4547 STATS_DECLTRACK(AAAlign, Store, 4548 "Number of times alignment added to a store"); 4549 SI->setAlignment(Align(getAssumedAlign())); 4550 LoadStoreChanged = ChangeStatus::CHANGED; 4551 } 4552 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { 4553 if (LI->getPointerOperand() == &AssociatedValue) 4554 if (LI->getAlignment() < getAssumedAlign()) { 4555 LI->setAlignment(Align(getAssumedAlign())); 4556 STATS_DECLTRACK(AAAlign, Load, 4557 "Number of times alignment added to a load"); 4558 LoadStoreChanged = ChangeStatus::CHANGED; 4559 } 4560 } 4561 } 4562 4563 ChangeStatus Changed = AAAlign::manifest(A); 4564 4565 Align InheritAlign = 4566 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4567 if (InheritAlign >= getAssumedAlign()) 4568 return LoadStoreChanged; 4569 return Changed | LoadStoreChanged; 4570 } 4571 4572 // TODO: Provide a helper to determine the implied ABI alignment and check in 4573 // the existing manifest method and a new one for AAAlignImpl that value 4574 // to avoid making the alignment explicit if it did not improve. 4575 4576 /// See AbstractAttribute::getDeducedAttributes 4577 virtual void 4578 getDeducedAttributes(LLVMContext &Ctx, 4579 SmallVectorImpl<Attribute> &Attrs) const override { 4580 if (getAssumedAlign() > 1) 4581 Attrs.emplace_back( 4582 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); 4583 } 4584 4585 /// See followUsesInMBEC 4586 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4587 AAAlign::StateType &State) { 4588 bool TrackUse = false; 4589 4590 unsigned int KnownAlign = 4591 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); 4592 State.takeKnownMaximum(KnownAlign); 4593 4594 return TrackUse; 4595 } 4596 4597 /// See AbstractAttribute::getAsStr(). 4598 const std::string getAsStr() const override { 4599 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + 4600 "-" + std::to_string(getAssumedAlign()) + ">") 4601 : "unknown-align"; 4602 } 4603 }; 4604 4605 /// Align attribute for a floating value. 4606 struct AAAlignFloating : AAAlignImpl { 4607 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} 4608 4609 /// See AbstractAttribute::updateImpl(...). 4610 ChangeStatus updateImpl(Attributor &A) override { 4611 const DataLayout &DL = A.getDataLayout(); 4612 4613 auto VisitValueCB = [&](Value &V, const Instruction *, 4614 AAAlign::StateType &T, bool Stripped) -> bool { 4615 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V)) 4616 return true; 4617 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V), 4618 DepClassTy::REQUIRED); 4619 if (!Stripped && this == &AA) { 4620 int64_t Offset; 4621 unsigned Alignment = 1; 4622 if (const Value *Base = 4623 GetPointerBaseWithConstantOffset(&V, Offset, DL)) { 4624 // TODO: Use AAAlign for the base too. 4625 Align PA = Base->getPointerAlignment(DL); 4626 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4627 // So we can say that the maximum power of two which is a divisor of 4628 // gcd(Offset, Alignment) is an alignment. 4629 4630 uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), 4631 uint32_t(PA.value())); 4632 Alignment = llvm::PowerOf2Floor(gcd); 4633 } else { 4634 Alignment = V.getPointerAlignment(DL).value(); 4635 } 4636 // Use only IR information if we did not strip anything. 4637 T.takeKnownMaximum(Alignment); 4638 T.indicatePessimisticFixpoint(); 4639 } else { 4640 // Use abstract attribute information. 4641 const AAAlign::StateType &DS = AA.getState(); 4642 T ^= DS; 4643 } 4644 return T.isValidState(); 4645 }; 4646 4647 StateType T; 4648 bool UsedAssumedInformation = false; 4649 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 4650 VisitValueCB, getCtxI(), 4651 UsedAssumedInformation)) 4652 return indicatePessimisticFixpoint(); 4653 4654 // TODO: If we know we visited all incoming values, thus no are assumed 4655 // dead, we can take the known information from the state T. 4656 return clampStateAndIndicateChange(getState(), T); 4657 } 4658 4659 /// See AbstractAttribute::trackStatistics() 4660 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } 4661 }; 4662 4663 /// Align attribute for function return value. 4664 struct AAAlignReturned final 4665 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { 4666 using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>; 4667 AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4668 4669 /// See AbstractAttribute::initialize(...). 4670 void initialize(Attributor &A) override { 4671 Base::initialize(A); 4672 Function *F = getAssociatedFunction(); 4673 if (!F || F->isDeclaration()) 4674 indicatePessimisticFixpoint(); 4675 } 4676 4677 /// See AbstractAttribute::trackStatistics() 4678 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } 4679 }; 4680 4681 /// Align attribute for function argument. 4682 struct AAAlignArgument final 4683 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { 4684 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; 4685 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4686 4687 /// See AbstractAttribute::manifest(...). 4688 ChangeStatus manifest(Attributor &A) override { 4689 // If the associated argument is involved in a must-tail call we give up 4690 // because we would need to keep the argument alignments of caller and 4691 // callee in-sync. Just does not seem worth the trouble right now. 4692 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) 4693 return ChangeStatus::UNCHANGED; 4694 return Base::manifest(A); 4695 } 4696 4697 /// See AbstractAttribute::trackStatistics() 4698 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } 4699 }; 4700 4701 struct AAAlignCallSiteArgument final : AAAlignFloating { 4702 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) 4703 : AAAlignFloating(IRP, A) {} 4704 4705 /// See AbstractAttribute::manifest(...). 4706 ChangeStatus manifest(Attributor &A) override { 4707 // If the associated argument is involved in a must-tail call we give up 4708 // because we would need to keep the argument alignments of caller and 4709 // callee in-sync. Just does not seem worth the trouble right now. 4710 if (Argument *Arg = getAssociatedArgument()) 4711 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) 4712 return ChangeStatus::UNCHANGED; 4713 ChangeStatus Changed = AAAlignImpl::manifest(A); 4714 Align InheritAlign = 4715 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4716 if (InheritAlign >= getAssumedAlign()) 4717 Changed = ChangeStatus::UNCHANGED; 4718 return Changed; 4719 } 4720 4721 /// See AbstractAttribute::updateImpl(Attributor &A). 4722 ChangeStatus updateImpl(Attributor &A) override { 4723 ChangeStatus Changed = AAAlignFloating::updateImpl(A); 4724 if (Argument *Arg = getAssociatedArgument()) { 4725 // We only take known information from the argument 4726 // so we do not need to track a dependence. 4727 const auto &ArgAlignAA = A.getAAFor<AAAlign>( 4728 *this, IRPosition::argument(*Arg), DepClassTy::NONE); 4729 takeKnownMaximum(ArgAlignAA.getKnownAlign()); 4730 } 4731 return Changed; 4732 } 4733 4734 /// See AbstractAttribute::trackStatistics() 4735 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } 4736 }; 4737 4738 /// Align attribute deduction for a call site return value. 4739 struct AAAlignCallSiteReturned final 4740 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { 4741 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; 4742 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) 4743 : Base(IRP, A) {} 4744 4745 /// See AbstractAttribute::initialize(...). 4746 void initialize(Attributor &A) override { 4747 Base::initialize(A); 4748 Function *F = getAssociatedFunction(); 4749 if (!F || F->isDeclaration()) 4750 indicatePessimisticFixpoint(); 4751 } 4752 4753 /// See AbstractAttribute::trackStatistics() 4754 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } 4755 }; 4756 } // namespace 4757 4758 /// ------------------ Function No-Return Attribute ---------------------------- 4759 namespace { 4760 struct AANoReturnImpl : public AANoReturn { 4761 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} 4762 4763 /// See AbstractAttribute::initialize(...). 4764 void initialize(Attributor &A) override { 4765 AANoReturn::initialize(A); 4766 Function *F = getAssociatedFunction(); 4767 if (!F || F->isDeclaration()) 4768 indicatePessimisticFixpoint(); 4769 } 4770 4771 /// See AbstractAttribute::getAsStr(). 4772 const std::string getAsStr() const override { 4773 return getAssumed() ? "noreturn" : "may-return"; 4774 } 4775 4776 /// See AbstractAttribute::updateImpl(Attributor &A). 4777 virtual ChangeStatus updateImpl(Attributor &A) override { 4778 auto CheckForNoReturn = [](Instruction &) { return false; }; 4779 bool UsedAssumedInformation = false; 4780 if (!A.checkForAllInstructions(CheckForNoReturn, *this, 4781 {(unsigned)Instruction::Ret}, 4782 UsedAssumedInformation)) 4783 return indicatePessimisticFixpoint(); 4784 return ChangeStatus::UNCHANGED; 4785 } 4786 }; 4787 4788 struct AANoReturnFunction final : AANoReturnImpl { 4789 AANoReturnFunction(const IRPosition &IRP, Attributor &A) 4790 : AANoReturnImpl(IRP, A) {} 4791 4792 /// See AbstractAttribute::trackStatistics() 4793 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } 4794 }; 4795 4796 /// NoReturn attribute deduction for a call sites. 4797 struct AANoReturnCallSite final : AANoReturnImpl { 4798 AANoReturnCallSite(const IRPosition &IRP, Attributor &A) 4799 : AANoReturnImpl(IRP, A) {} 4800 4801 /// See AbstractAttribute::initialize(...). 4802 void initialize(Attributor &A) override { 4803 AANoReturnImpl::initialize(A); 4804 if (Function *F = getAssociatedFunction()) { 4805 const IRPosition &FnPos = IRPosition::function(*F); 4806 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4807 if (!FnAA.isAssumedNoReturn()) 4808 indicatePessimisticFixpoint(); 4809 } 4810 } 4811 4812 /// See AbstractAttribute::updateImpl(...). 4813 ChangeStatus updateImpl(Attributor &A) override { 4814 // TODO: Once we have call site specific value information we can provide 4815 // call site specific liveness information and then it makes 4816 // sense to specialize attributes for call sites arguments instead of 4817 // redirecting requests to the callee argument. 4818 Function *F = getAssociatedFunction(); 4819 const IRPosition &FnPos = IRPosition::function(*F); 4820 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4821 return clampStateAndIndicateChange(getState(), FnAA.getState()); 4822 } 4823 4824 /// See AbstractAttribute::trackStatistics() 4825 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } 4826 }; 4827 } // namespace 4828 4829 /// ----------------------- Variable Capturing --------------------------------- 4830 4831 namespace { 4832 /// A class to hold the state of for no-capture attributes. 4833 struct AANoCaptureImpl : public AANoCapture { 4834 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} 4835 4836 /// See AbstractAttribute::initialize(...). 4837 void initialize(Attributor &A) override { 4838 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { 4839 indicateOptimisticFixpoint(); 4840 return; 4841 } 4842 Function *AnchorScope = getAnchorScope(); 4843 if (isFnInterfaceKind() && 4844 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { 4845 indicatePessimisticFixpoint(); 4846 return; 4847 } 4848 4849 // You cannot "capture" null in the default address space. 4850 if (isa<ConstantPointerNull>(getAssociatedValue()) && 4851 getAssociatedValue().getType()->getPointerAddressSpace() == 0) { 4852 indicateOptimisticFixpoint(); 4853 return; 4854 } 4855 4856 const Function *F = 4857 isArgumentPosition() ? getAssociatedFunction() : AnchorScope; 4858 4859 // Check what state the associated function can actually capture. 4860 if (F) 4861 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 4862 else 4863 indicatePessimisticFixpoint(); 4864 } 4865 4866 /// See AbstractAttribute::updateImpl(...). 4867 ChangeStatus updateImpl(Attributor &A) override; 4868 4869 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). 4870 virtual void 4871 getDeducedAttributes(LLVMContext &Ctx, 4872 SmallVectorImpl<Attribute> &Attrs) const override { 4873 if (!isAssumedNoCaptureMaybeReturned()) 4874 return; 4875 4876 if (isArgumentPosition()) { 4877 if (isAssumedNoCapture()) 4878 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); 4879 else if (ManifestInternal) 4880 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); 4881 } 4882 } 4883 4884 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known 4885 /// depending on the ability of the function associated with \p IRP to capture 4886 /// state in memory and through "returning/throwing", respectively. 4887 static void determineFunctionCaptureCapabilities(const IRPosition &IRP, 4888 const Function &F, 4889 BitIntegerState &State) { 4890 // TODO: Once we have memory behavior attributes we should use them here. 4891 4892 // If we know we cannot communicate or write to memory, we do not care about 4893 // ptr2int anymore. 4894 if (F.onlyReadsMemory() && F.doesNotThrow() && 4895 F.getReturnType()->isVoidTy()) { 4896 State.addKnownBits(NO_CAPTURE); 4897 return; 4898 } 4899 4900 // A function cannot capture state in memory if it only reads memory, it can 4901 // however return/throw state and the state might be influenced by the 4902 // pointer value, e.g., loading from a returned pointer might reveal a bit. 4903 if (F.onlyReadsMemory()) 4904 State.addKnownBits(NOT_CAPTURED_IN_MEM); 4905 4906 // A function cannot communicate state back if it does not through 4907 // exceptions and doesn not return values. 4908 if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) 4909 State.addKnownBits(NOT_CAPTURED_IN_RET); 4910 4911 // Check existing "returned" attributes. 4912 int ArgNo = IRP.getCalleeArgNo(); 4913 if (F.doesNotThrow() && ArgNo >= 0) { 4914 for (unsigned u = 0, e = F.arg_size(); u < e; ++u) 4915 if (F.hasParamAttribute(u, Attribute::Returned)) { 4916 if (u == unsigned(ArgNo)) 4917 State.removeAssumedBits(NOT_CAPTURED_IN_RET); 4918 else if (F.onlyReadsMemory()) 4919 State.addKnownBits(NO_CAPTURE); 4920 else 4921 State.addKnownBits(NOT_CAPTURED_IN_RET); 4922 break; 4923 } 4924 } 4925 } 4926 4927 /// See AbstractState::getAsStr(). 4928 const std::string getAsStr() const override { 4929 if (isKnownNoCapture()) 4930 return "known not-captured"; 4931 if (isAssumedNoCapture()) 4932 return "assumed not-captured"; 4933 if (isKnownNoCaptureMaybeReturned()) 4934 return "known not-captured-maybe-returned"; 4935 if (isAssumedNoCaptureMaybeReturned()) 4936 return "assumed not-captured-maybe-returned"; 4937 return "assumed-captured"; 4938 } 4939 }; 4940 4941 /// Attributor-aware capture tracker. 4942 struct AACaptureUseTracker final : public CaptureTracker { 4943 4944 /// Create a capture tracker that can lookup in-flight abstract attributes 4945 /// through the Attributor \p A. 4946 /// 4947 /// If a use leads to a potential capture, \p CapturedInMemory is set and the 4948 /// search is stopped. If a use leads to a return instruction, 4949 /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed. 4950 /// If a use leads to a ptr2int which may capture the value, 4951 /// \p CapturedInInteger is set. If a use is found that is currently assumed 4952 /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies 4953 /// set. All values in \p PotentialCopies are later tracked as well. For every 4954 /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0, 4955 /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger 4956 /// conservatively set to true. 4957 AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA, 4958 const AAIsDead &IsDeadAA, AANoCapture::StateType &State, 4959 SmallSetVector<Value *, 4> &PotentialCopies, 4960 unsigned &RemainingUsesToExplore) 4961 : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State), 4962 PotentialCopies(PotentialCopies), 4963 RemainingUsesToExplore(RemainingUsesToExplore) {} 4964 4965 /// Determine if \p V maybe captured. *Also updates the state!* 4966 bool valueMayBeCaptured(const Value *V) { 4967 if (V->getType()->isPointerTy()) { 4968 PointerMayBeCaptured(V, this); 4969 } else { 4970 State.indicatePessimisticFixpoint(); 4971 } 4972 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4973 } 4974 4975 /// See CaptureTracker::tooManyUses(). 4976 void tooManyUses() override { 4977 State.removeAssumedBits(AANoCapture::NO_CAPTURE); 4978 } 4979 4980 bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override { 4981 if (CaptureTracker::isDereferenceableOrNull(O, DL)) 4982 return true; 4983 const auto &DerefAA = A.getAAFor<AADereferenceable>( 4984 NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL); 4985 return DerefAA.getAssumedDereferenceableBytes(); 4986 } 4987 4988 /// See CaptureTracker::captured(...). 4989 bool captured(const Use *U) override { 4990 Instruction *UInst = cast<Instruction>(U->getUser()); 4991 LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst 4992 << "\n"); 4993 4994 // Because we may reuse the tracker multiple times we keep track of the 4995 // number of explored uses ourselves as well. 4996 if (RemainingUsesToExplore-- == 0) { 4997 LLVM_DEBUG(dbgs() << " - too many uses to explore!\n"); 4998 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4999 /* Return */ true); 5000 } 5001 5002 // Deal with ptr2int by following uses. 5003 if (isa<PtrToIntInst>(UInst)) { 5004 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n"); 5005 return valueMayBeCaptured(UInst); 5006 } 5007 5008 // For stores we check if we can follow the value through memory or not. 5009 if (auto *SI = dyn_cast<StoreInst>(UInst)) { 5010 if (SI->isVolatile()) 5011 return isCapturedIn(/* Memory */ true, /* Integer */ false, 5012 /* Return */ false); 5013 bool UsedAssumedInformation = false; 5014 if (!AA::getPotentialCopiesOfStoredValue( 5015 A, *SI, PotentialCopies, NoCaptureAA, UsedAssumedInformation)) 5016 return isCapturedIn(/* Memory */ true, /* Integer */ false, 5017 /* Return */ false); 5018 // Not captured directly, potential copies will be checked. 5019 return isCapturedIn(/* Memory */ false, /* Integer */ false, 5020 /* Return */ false); 5021 } 5022 5023 // Explicitly catch return instructions. 5024 if (isa<ReturnInst>(UInst)) { 5025 if (UInst->getFunction() == NoCaptureAA.getAnchorScope()) 5026 return isCapturedIn(/* Memory */ false, /* Integer */ false, 5027 /* Return */ true); 5028 return isCapturedIn(/* Memory */ true, /* Integer */ true, 5029 /* Return */ true); 5030 } 5031 5032 // For now we only use special logic for call sites. However, the tracker 5033 // itself knows about a lot of other non-capturing cases already. 5034 auto *CB = dyn_cast<CallBase>(UInst); 5035 if (!CB || !CB->isArgOperand(U)) 5036 return isCapturedIn(/* Memory */ true, /* Integer */ true, 5037 /* Return */ true); 5038 5039 unsigned ArgNo = CB->getArgOperandNo(U); 5040 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); 5041 // If we have a abstract no-capture attribute for the argument we can use 5042 // it to justify a non-capture attribute here. This allows recursion! 5043 auto &ArgNoCaptureAA = 5044 A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED); 5045 if (ArgNoCaptureAA.isAssumedNoCapture()) 5046 return isCapturedIn(/* Memory */ false, /* Integer */ false, 5047 /* Return */ false); 5048 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 5049 addPotentialCopy(*CB); 5050 return isCapturedIn(/* Memory */ false, /* Integer */ false, 5051 /* Return */ false); 5052 } 5053 5054 // Lastly, we could not find a reason no-capture can be assumed so we don't. 5055 return isCapturedIn(/* Memory */ true, /* Integer */ true, 5056 /* Return */ true); 5057 } 5058 5059 /// Register \p CS as potential copy of the value we are checking. 5060 void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); } 5061 5062 /// See CaptureTracker::shouldExplore(...). 5063 bool shouldExplore(const Use *U) override { 5064 // Check liveness and ignore droppable users. 5065 bool UsedAssumedInformation = false; 5066 return !U->getUser()->isDroppable() && 5067 !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA, 5068 UsedAssumedInformation); 5069 } 5070 5071 /// Update the state according to \p CapturedInMem, \p CapturedInInt, and 5072 /// \p CapturedInRet, then return the appropriate value for use in the 5073 /// CaptureTracker::captured() interface. 5074 bool isCapturedIn(bool CapturedInMem, bool CapturedInInt, 5075 bool CapturedInRet) { 5076 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " 5077 << CapturedInInt << "|Ret " << CapturedInRet << "]\n"); 5078 if (CapturedInMem) 5079 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); 5080 if (CapturedInInt) 5081 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); 5082 if (CapturedInRet) 5083 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); 5084 return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 5085 } 5086 5087 private: 5088 /// The attributor providing in-flight abstract attributes. 5089 Attributor &A; 5090 5091 /// The abstract attribute currently updated. 5092 AANoCapture &NoCaptureAA; 5093 5094 /// The abstract liveness state. 5095 const AAIsDead &IsDeadAA; 5096 5097 /// The state currently updated. 5098 AANoCapture::StateType &State; 5099 5100 /// Set of potential copies of the tracked value. 5101 SmallSetVector<Value *, 4> &PotentialCopies; 5102 5103 /// Global counter to limit the number of explored uses. 5104 unsigned &RemainingUsesToExplore; 5105 }; 5106 5107 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { 5108 const IRPosition &IRP = getIRPosition(); 5109 Value *V = isArgumentPosition() ? IRP.getAssociatedArgument() 5110 : &IRP.getAssociatedValue(); 5111 if (!V) 5112 return indicatePessimisticFixpoint(); 5113 5114 const Function *F = 5115 isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 5116 assert(F && "Expected a function!"); 5117 const IRPosition &FnPos = IRPosition::function(*F); 5118 const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE); 5119 5120 AANoCapture::StateType T; 5121 5122 // Readonly means we cannot capture through memory. 5123 bool IsKnown; 5124 if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) { 5125 T.addKnownBits(NOT_CAPTURED_IN_MEM); 5126 if (IsKnown) 5127 addKnownBits(NOT_CAPTURED_IN_MEM); 5128 } 5129 5130 // Make sure all returned values are different than the underlying value. 5131 // TODO: we could do this in a more sophisticated way inside 5132 // AAReturnedValues, e.g., track all values that escape through returns 5133 // directly somehow. 5134 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { 5135 bool SeenConstant = false; 5136 for (auto &It : RVAA.returned_values()) { 5137 if (isa<Constant>(It.first)) { 5138 if (SeenConstant) 5139 return false; 5140 SeenConstant = true; 5141 } else if (!isa<Argument>(It.first) || 5142 It.first == getAssociatedArgument()) 5143 return false; 5144 } 5145 return true; 5146 }; 5147 5148 const auto &NoUnwindAA = 5149 A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL); 5150 if (NoUnwindAA.isAssumedNoUnwind()) { 5151 bool IsVoidTy = F->getReturnType()->isVoidTy(); 5152 const AAReturnedValues *RVAA = 5153 IsVoidTy ? nullptr 5154 : &A.getAAFor<AAReturnedValues>(*this, FnPos, 5155 5156 DepClassTy::OPTIONAL); 5157 if (IsVoidTy || CheckReturnedArgs(*RVAA)) { 5158 T.addKnownBits(NOT_CAPTURED_IN_RET); 5159 if (T.isKnown(NOT_CAPTURED_IN_MEM)) 5160 return ChangeStatus::UNCHANGED; 5161 if (NoUnwindAA.isKnownNoUnwind() && 5162 (IsVoidTy || RVAA->getState().isAtFixpoint())) { 5163 addKnownBits(NOT_CAPTURED_IN_RET); 5164 if (isKnown(NOT_CAPTURED_IN_MEM)) 5165 return indicateOptimisticFixpoint(); 5166 } 5167 } 5168 } 5169 5170 // Use the CaptureTracker interface and logic with the specialized tracker, 5171 // defined in AACaptureUseTracker, that can look at in-flight abstract 5172 // attributes and directly updates the assumed state. 5173 SmallSetVector<Value *, 4> PotentialCopies; 5174 unsigned RemainingUsesToExplore = 5175 getDefaultMaxUsesToExploreForCaptureTracking(); 5176 AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies, 5177 RemainingUsesToExplore); 5178 5179 // Check all potential copies of the associated value until we can assume 5180 // none will be captured or we have to assume at least one might be. 5181 unsigned Idx = 0; 5182 PotentialCopies.insert(V); 5183 while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size()) 5184 Tracker.valueMayBeCaptured(PotentialCopies[Idx++]); 5185 5186 AANoCapture::StateType &S = getState(); 5187 auto Assumed = S.getAssumed(); 5188 S.intersectAssumedBits(T.getAssumed()); 5189 if (!isAssumedNoCaptureMaybeReturned()) 5190 return indicatePessimisticFixpoint(); 5191 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 5192 : ChangeStatus::CHANGED; 5193 } 5194 5195 /// NoCapture attribute for function arguments. 5196 struct AANoCaptureArgument final : AANoCaptureImpl { 5197 AANoCaptureArgument(const IRPosition &IRP, Attributor &A) 5198 : AANoCaptureImpl(IRP, A) {} 5199 5200 /// See AbstractAttribute::trackStatistics() 5201 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } 5202 }; 5203 5204 /// NoCapture attribute for call site arguments. 5205 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { 5206 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) 5207 : AANoCaptureImpl(IRP, A) {} 5208 5209 /// See AbstractAttribute::initialize(...). 5210 void initialize(Attributor &A) override { 5211 if (Argument *Arg = getAssociatedArgument()) 5212 if (Arg->hasByValAttr()) 5213 indicateOptimisticFixpoint(); 5214 AANoCaptureImpl::initialize(A); 5215 } 5216 5217 /// See AbstractAttribute::updateImpl(...). 5218 ChangeStatus updateImpl(Attributor &A) override { 5219 // TODO: Once we have call site specific value information we can provide 5220 // call site specific liveness information and then it makes 5221 // sense to specialize attributes for call sites arguments instead of 5222 // redirecting requests to the callee argument. 5223 Argument *Arg = getAssociatedArgument(); 5224 if (!Arg) 5225 return indicatePessimisticFixpoint(); 5226 const IRPosition &ArgPos = IRPosition::argument(*Arg); 5227 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED); 5228 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 5229 } 5230 5231 /// See AbstractAttribute::trackStatistics() 5232 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)}; 5233 }; 5234 5235 /// NoCapture attribute for floating values. 5236 struct AANoCaptureFloating final : AANoCaptureImpl { 5237 AANoCaptureFloating(const IRPosition &IRP, Attributor &A) 5238 : AANoCaptureImpl(IRP, A) {} 5239 5240 /// See AbstractAttribute::trackStatistics() 5241 void trackStatistics() const override { 5242 STATS_DECLTRACK_FLOATING_ATTR(nocapture) 5243 } 5244 }; 5245 5246 /// NoCapture attribute for function return value. 5247 struct AANoCaptureReturned final : AANoCaptureImpl { 5248 AANoCaptureReturned(const IRPosition &IRP, Attributor &A) 5249 : AANoCaptureImpl(IRP, A) { 5250 llvm_unreachable("NoCapture is not applicable to function returns!"); 5251 } 5252 5253 /// See AbstractAttribute::initialize(...). 5254 void initialize(Attributor &A) override { 5255 llvm_unreachable("NoCapture is not applicable to function returns!"); 5256 } 5257 5258 /// See AbstractAttribute::updateImpl(...). 5259 ChangeStatus updateImpl(Attributor &A) override { 5260 llvm_unreachable("NoCapture is not applicable to function returns!"); 5261 } 5262 5263 /// See AbstractAttribute::trackStatistics() 5264 void trackStatistics() const override {} 5265 }; 5266 5267 /// NoCapture attribute deduction for a call site return value. 5268 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { 5269 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) 5270 : AANoCaptureImpl(IRP, A) {} 5271 5272 /// See AbstractAttribute::initialize(...). 5273 void initialize(Attributor &A) override { 5274 const Function *F = getAnchorScope(); 5275 // Check what state the associated function can actually capture. 5276 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 5277 } 5278 5279 /// See AbstractAttribute::trackStatistics() 5280 void trackStatistics() const override { 5281 STATS_DECLTRACK_CSRET_ATTR(nocapture) 5282 } 5283 }; 5284 } // namespace 5285 5286 /// ------------------ Value Simplify Attribute ---------------------------- 5287 5288 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) { 5289 // FIXME: Add a typecast support. 5290 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5291 SimplifiedAssociatedValue, Other, Ty); 5292 if (SimplifiedAssociatedValue == Optional<Value *>(nullptr)) 5293 return false; 5294 5295 LLVM_DEBUG({ 5296 if (SimplifiedAssociatedValue.hasValue()) 5297 dbgs() << "[ValueSimplify] is assumed to be " 5298 << **SimplifiedAssociatedValue << "\n"; 5299 else 5300 dbgs() << "[ValueSimplify] is assumed to be <none>\n"; 5301 }); 5302 return true; 5303 } 5304 5305 namespace { 5306 struct AAValueSimplifyImpl : AAValueSimplify { 5307 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) 5308 : AAValueSimplify(IRP, A) {} 5309 5310 /// See AbstractAttribute::initialize(...). 5311 void initialize(Attributor &A) override { 5312 if (getAssociatedValue().getType()->isVoidTy()) 5313 indicatePessimisticFixpoint(); 5314 if (A.hasSimplificationCallback(getIRPosition())) 5315 indicatePessimisticFixpoint(); 5316 } 5317 5318 /// See AbstractAttribute::getAsStr(). 5319 const std::string getAsStr() const override { 5320 LLVM_DEBUG({ 5321 errs() << "SAV: " << SimplifiedAssociatedValue << " "; 5322 if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue) 5323 errs() << "SAV: " << **SimplifiedAssociatedValue << " "; 5324 }); 5325 return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple") 5326 : "not-simple"; 5327 } 5328 5329 /// See AbstractAttribute::trackStatistics() 5330 void trackStatistics() const override {} 5331 5332 /// See AAValueSimplify::getAssumedSimplifiedValue() 5333 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5334 return SimplifiedAssociatedValue; 5335 } 5336 5337 /// Return a value we can use as replacement for the associated one, or 5338 /// nullptr if we don't have one that makes sense. 5339 Value *getReplacementValue(Attributor &A) const { 5340 Value *NewV; 5341 NewV = SimplifiedAssociatedValue.hasValue() 5342 ? SimplifiedAssociatedValue.getValue() 5343 : UndefValue::get(getAssociatedType()); 5344 if (!NewV) 5345 return nullptr; 5346 NewV = AA::getWithType(*NewV, *getAssociatedType()); 5347 if (!NewV || NewV == &getAssociatedValue()) 5348 return nullptr; 5349 const Instruction *CtxI = getCtxI(); 5350 if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache())) 5351 return nullptr; 5352 if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope())) 5353 return nullptr; 5354 return NewV; 5355 } 5356 5357 /// Helper function for querying AAValueSimplify and updating candicate. 5358 /// \param IRP The value position we are trying to unify with SimplifiedValue 5359 bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, 5360 const IRPosition &IRP, bool Simplify = true) { 5361 bool UsedAssumedInformation = false; 5362 Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue(); 5363 if (Simplify) 5364 QueryingValueSimplified = 5365 A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation); 5366 return unionAssumed(QueryingValueSimplified); 5367 } 5368 5369 /// Returns a candidate is found or not 5370 template <typename AAType> bool askSimplifiedValueFor(Attributor &A) { 5371 if (!getAssociatedValue().getType()->isIntegerTy()) 5372 return false; 5373 5374 // This will also pass the call base context. 5375 const auto &AA = 5376 A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE); 5377 5378 Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A); 5379 5380 if (!COpt.hasValue()) { 5381 SimplifiedAssociatedValue = llvm::None; 5382 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5383 return true; 5384 } 5385 if (auto *C = COpt.getValue()) { 5386 SimplifiedAssociatedValue = C; 5387 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5388 return true; 5389 } 5390 return false; 5391 } 5392 5393 bool askSimplifiedValueForOtherAAs(Attributor &A) { 5394 if (askSimplifiedValueFor<AAValueConstantRange>(A)) 5395 return true; 5396 if (askSimplifiedValueFor<AAPotentialValues>(A)) 5397 return true; 5398 return false; 5399 } 5400 5401 /// See AbstractAttribute::manifest(...). 5402 ChangeStatus manifest(Attributor &A) override { 5403 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5404 if (getAssociatedValue().user_empty()) 5405 return Changed; 5406 5407 if (auto *NewV = getReplacementValue(A)) { 5408 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> " 5409 << *NewV << " :: " << *this << "\n"); 5410 if (A.changeValueAfterManifest(getAssociatedValue(), *NewV)) 5411 Changed = ChangeStatus::CHANGED; 5412 } 5413 5414 return Changed | AAValueSimplify::manifest(A); 5415 } 5416 5417 /// See AbstractState::indicatePessimisticFixpoint(...). 5418 ChangeStatus indicatePessimisticFixpoint() override { 5419 SimplifiedAssociatedValue = &getAssociatedValue(); 5420 return AAValueSimplify::indicatePessimisticFixpoint(); 5421 } 5422 5423 static bool handleLoad(Attributor &A, const AbstractAttribute &AA, 5424 LoadInst &L, function_ref<bool(Value &)> Union) { 5425 auto UnionWrapper = [&](Value &V, Value &Obj) { 5426 if (isa<AllocaInst>(Obj)) 5427 return Union(V); 5428 if (!AA::isDynamicallyUnique(A, AA, V)) 5429 return false; 5430 if (!AA::isValidAtPosition(V, L, A.getInfoCache())) 5431 return false; 5432 return Union(V); 5433 }; 5434 5435 Value &Ptr = *L.getPointerOperand(); 5436 SmallVector<Value *, 8> Objects; 5437 bool UsedAssumedInformation = false; 5438 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L, 5439 UsedAssumedInformation)) 5440 return false; 5441 5442 const auto *TLI = 5443 A.getInfoCache().getTargetLibraryInfoForFunction(*L.getFunction()); 5444 for (Value *Obj : Objects) { 5445 LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n"); 5446 if (isa<UndefValue>(Obj)) 5447 continue; 5448 if (isa<ConstantPointerNull>(Obj)) { 5449 // A null pointer access can be undefined but any offset from null may 5450 // be OK. We do not try to optimize the latter. 5451 if (!NullPointerIsDefined(L.getFunction(), 5452 Ptr.getType()->getPointerAddressSpace()) && 5453 A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj) 5454 continue; 5455 return false; 5456 } 5457 Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType(), TLI); 5458 if (!InitialVal || !Union(*InitialVal)) 5459 return false; 5460 5461 LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store " 5462 "propagation, checking accesses next.\n"); 5463 5464 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) { 5465 LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n"); 5466 if (Acc.isWrittenValueYetUndetermined()) 5467 return true; 5468 Value *Content = Acc.getWrittenValue(); 5469 if (!Content) 5470 return false; 5471 Value *CastedContent = 5472 AA::getWithType(*Content, *AA.getAssociatedType()); 5473 if (!CastedContent) 5474 return false; 5475 if (IsExact) 5476 return UnionWrapper(*CastedContent, *Obj); 5477 if (auto *C = dyn_cast<Constant>(CastedContent)) 5478 if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C)) 5479 return UnionWrapper(*CastedContent, *Obj); 5480 return false; 5481 }; 5482 5483 auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj), 5484 DepClassTy::REQUIRED); 5485 if (!PI.forallInterferingAccesses(A, AA, L, CheckAccess)) 5486 return false; 5487 } 5488 return true; 5489 } 5490 }; 5491 5492 struct AAValueSimplifyArgument final : AAValueSimplifyImpl { 5493 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) 5494 : AAValueSimplifyImpl(IRP, A) {} 5495 5496 void initialize(Attributor &A) override { 5497 AAValueSimplifyImpl::initialize(A); 5498 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) 5499 indicatePessimisticFixpoint(); 5500 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated, 5501 Attribute::StructRet, Attribute::Nest, Attribute::ByVal}, 5502 /* IgnoreSubsumingPositions */ true)) 5503 indicatePessimisticFixpoint(); 5504 } 5505 5506 /// See AbstractAttribute::updateImpl(...). 5507 ChangeStatus updateImpl(Attributor &A) override { 5508 // Byval is only replacable if it is readonly otherwise we would write into 5509 // the replaced value and not the copy that byval creates implicitly. 5510 Argument *Arg = getAssociatedArgument(); 5511 if (Arg->hasByValAttr()) { 5512 // TODO: We probably need to verify synchronization is not an issue, e.g., 5513 // there is no race by not copying a constant byval. 5514 bool IsKnown; 5515 if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 5516 return indicatePessimisticFixpoint(); 5517 } 5518 5519 auto Before = SimplifiedAssociatedValue; 5520 5521 auto PredForCallSite = [&](AbstractCallSite ACS) { 5522 const IRPosition &ACSArgPos = 5523 IRPosition::callsite_argument(ACS, getCallSiteArgNo()); 5524 // Check if a coresponding argument was found or if it is on not 5525 // associated (which can happen for callback calls). 5526 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 5527 return false; 5528 5529 // Simplify the argument operand explicitly and check if the result is 5530 // valid in the current scope. This avoids refering to simplified values 5531 // in other functions, e.g., we don't want to say a an argument in a 5532 // static function is actually an argument in a different function. 5533 bool UsedAssumedInformation = false; 5534 Optional<Constant *> SimpleArgOp = 5535 A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation); 5536 if (!SimpleArgOp.hasValue()) 5537 return true; 5538 if (!SimpleArgOp.getValue()) 5539 return false; 5540 if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp)) 5541 return false; 5542 return unionAssumed(*SimpleArgOp); 5543 }; 5544 5545 // Generate a answer specific to a call site context. 5546 bool Success; 5547 bool UsedAssumedInformation = false; 5548 if (hasCallBaseContext() && 5549 getCallBaseContext()->getCalledFunction() == Arg->getParent()) 5550 Success = PredForCallSite( 5551 AbstractCallSite(&getCallBaseContext()->getCalledOperandUse())); 5552 else 5553 Success = A.checkForAllCallSites(PredForCallSite, *this, true, 5554 UsedAssumedInformation); 5555 5556 if (!Success) 5557 if (!askSimplifiedValueForOtherAAs(A)) 5558 return indicatePessimisticFixpoint(); 5559 5560 // If a candicate was found in this update, return CHANGED. 5561 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5562 : ChangeStatus ::CHANGED; 5563 } 5564 5565 /// See AbstractAttribute::trackStatistics() 5566 void trackStatistics() const override { 5567 STATS_DECLTRACK_ARG_ATTR(value_simplify) 5568 } 5569 }; 5570 5571 struct AAValueSimplifyReturned : AAValueSimplifyImpl { 5572 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) 5573 : AAValueSimplifyImpl(IRP, A) {} 5574 5575 /// See AAValueSimplify::getAssumedSimplifiedValue() 5576 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5577 if (!isValidState()) 5578 return nullptr; 5579 return SimplifiedAssociatedValue; 5580 } 5581 5582 /// See AbstractAttribute::updateImpl(...). 5583 ChangeStatus updateImpl(Attributor &A) override { 5584 auto Before = SimplifiedAssociatedValue; 5585 5586 auto PredForReturned = [&](Value &V) { 5587 return checkAndUpdate(A, *this, 5588 IRPosition::value(V, getCallBaseContext())); 5589 }; 5590 5591 if (!A.checkForAllReturnedValues(PredForReturned, *this)) 5592 if (!askSimplifiedValueForOtherAAs(A)) 5593 return indicatePessimisticFixpoint(); 5594 5595 // If a candicate was found in this update, return CHANGED. 5596 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5597 : ChangeStatus ::CHANGED; 5598 } 5599 5600 ChangeStatus manifest(Attributor &A) override { 5601 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5602 if (!A.isRunOn(*getAnchorScope())) 5603 return Changed; 5604 5605 assert(!hasCallBaseContext() && "Should never manifest a simplified " 5606 "function return with call base context!"); 5607 5608 if (auto *NewV = getReplacementValue(A)) { 5609 auto PredForReturned = 5610 [&](Value &, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 5611 for (ReturnInst *RI : RetInsts) { 5612 Value *ReturnedVal = RI->getReturnValue(); 5613 if (ReturnedVal == NewV || isa<UndefValue>(ReturnedVal)) 5614 return true; 5615 assert(RI->getFunction() == getAnchorScope() && 5616 "ReturnInst in wrong function!"); 5617 LLVM_DEBUG(dbgs() 5618 << "[ValueSimplify] " << *ReturnedVal << " -> " 5619 << *NewV << " in " << *RI << " :: " << *this << "\n"); 5620 if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV)) 5621 Changed = ChangeStatus::CHANGED; 5622 } 5623 return true; 5624 }; 5625 A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this); 5626 } 5627 5628 return Changed | AAValueSimplify::manifest(A); 5629 } 5630 5631 /// See AbstractAttribute::trackStatistics() 5632 void trackStatistics() const override { 5633 STATS_DECLTRACK_FNRET_ATTR(value_simplify) 5634 } 5635 }; 5636 5637 struct AAValueSimplifyFloating : AAValueSimplifyImpl { 5638 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) 5639 : AAValueSimplifyImpl(IRP, A) {} 5640 5641 /// See AbstractAttribute::initialize(...). 5642 void initialize(Attributor &A) override { 5643 AAValueSimplifyImpl::initialize(A); 5644 Value &V = getAnchorValue(); 5645 5646 // TODO: add other stuffs 5647 if (isa<Constant>(V)) 5648 indicatePessimisticFixpoint(); 5649 } 5650 5651 /// Check if \p Cmp is a comparison we can simplify. 5652 /// 5653 /// We handle multiple cases, one in which at least one operand is an 5654 /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other 5655 /// operand. Return true if successful, in that case SimplifiedAssociatedValue 5656 /// will be updated. 5657 bool handleCmp(Attributor &A, CmpInst &Cmp) { 5658 auto Union = [&](Value &V) { 5659 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5660 SimplifiedAssociatedValue, &V, V.getType()); 5661 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5662 }; 5663 5664 Value *LHS = Cmp.getOperand(0); 5665 Value *RHS = Cmp.getOperand(1); 5666 5667 // Simplify the operands first. 5668 bool UsedAssumedInformation = false; 5669 const auto &SimplifiedLHS = 5670 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 5671 *this, UsedAssumedInformation); 5672 if (!SimplifiedLHS.hasValue()) 5673 return true; 5674 if (!SimplifiedLHS.getValue()) 5675 return false; 5676 LHS = *SimplifiedLHS; 5677 5678 const auto &SimplifiedRHS = 5679 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 5680 *this, UsedAssumedInformation); 5681 if (!SimplifiedRHS.hasValue()) 5682 return true; 5683 if (!SimplifiedRHS.getValue()) 5684 return false; 5685 RHS = *SimplifiedRHS; 5686 5687 LLVMContext &Ctx = Cmp.getContext(); 5688 // Handle the trivial case first in which we don't even need to think about 5689 // null or non-null. 5690 if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) { 5691 Constant *NewVal = 5692 ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual()); 5693 if (!Union(*NewVal)) 5694 return false; 5695 if (!UsedAssumedInformation) 5696 indicateOptimisticFixpoint(); 5697 return true; 5698 } 5699 5700 // From now on we only handle equalities (==, !=). 5701 ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp); 5702 if (!ICmp || !ICmp->isEquality()) 5703 return false; 5704 5705 bool LHSIsNull = isa<ConstantPointerNull>(LHS); 5706 bool RHSIsNull = isa<ConstantPointerNull>(RHS); 5707 if (!LHSIsNull && !RHSIsNull) 5708 return false; 5709 5710 // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the 5711 // non-nullptr operand and if we assume it's non-null we can conclude the 5712 // result of the comparison. 5713 assert((LHSIsNull || RHSIsNull) && 5714 "Expected nullptr versus non-nullptr comparison at this point"); 5715 5716 // The index is the operand that we assume is not null. 5717 unsigned PtrIdx = LHSIsNull; 5718 auto &PtrNonNullAA = A.getAAFor<AANonNull>( 5719 *this, IRPosition::value(*ICmp->getOperand(PtrIdx)), 5720 DepClassTy::REQUIRED); 5721 if (!PtrNonNullAA.isAssumedNonNull()) 5722 return false; 5723 UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull(); 5724 5725 // The new value depends on the predicate, true for != and false for ==. 5726 Constant *NewVal = ConstantInt::get( 5727 Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE); 5728 if (!Union(*NewVal)) 5729 return false; 5730 5731 if (!UsedAssumedInformation) 5732 indicateOptimisticFixpoint(); 5733 5734 return true; 5735 } 5736 5737 bool updateWithLoad(Attributor &A, LoadInst &L) { 5738 auto Union = [&](Value &V) { 5739 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5740 SimplifiedAssociatedValue, &V, L.getType()); 5741 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5742 }; 5743 return handleLoad(A, *this, L, Union); 5744 } 5745 5746 /// Use the generic, non-optimistic InstSimplfy functionality if we managed to 5747 /// simplify any operand of the instruction \p I. Return true if successful, 5748 /// in that case SimplifiedAssociatedValue will be updated. 5749 bool handleGenericInst(Attributor &A, Instruction &I) { 5750 bool SomeSimplified = false; 5751 bool UsedAssumedInformation = false; 5752 5753 SmallVector<Value *, 8> NewOps(I.getNumOperands()); 5754 int Idx = 0; 5755 for (Value *Op : I.operands()) { 5756 const auto &SimplifiedOp = 5757 A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()), 5758 *this, UsedAssumedInformation); 5759 // If we are not sure about any operand we are not sure about the entire 5760 // instruction, we'll wait. 5761 if (!SimplifiedOp.hasValue()) 5762 return true; 5763 5764 if (SimplifiedOp.getValue()) 5765 NewOps[Idx] = SimplifiedOp.getValue(); 5766 else 5767 NewOps[Idx] = Op; 5768 5769 SomeSimplified |= (NewOps[Idx] != Op); 5770 ++Idx; 5771 } 5772 5773 // We won't bother with the InstSimplify interface if we didn't simplify any 5774 // operand ourselves. 5775 if (!SomeSimplified) 5776 return false; 5777 5778 InformationCache &InfoCache = A.getInfoCache(); 5779 Function *F = I.getFunction(); 5780 const auto *DT = 5781 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 5782 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5783 auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 5784 OptimizationRemarkEmitter *ORE = nullptr; 5785 5786 const DataLayout &DL = I.getModule()->getDataLayout(); 5787 SimplifyQuery Q(DL, TLI, DT, AC, &I); 5788 if (Value *SimplifiedI = 5789 SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) { 5790 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5791 SimplifiedAssociatedValue, SimplifiedI, I.getType()); 5792 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5793 } 5794 return false; 5795 } 5796 5797 /// See AbstractAttribute::updateImpl(...). 5798 ChangeStatus updateImpl(Attributor &A) override { 5799 auto Before = SimplifiedAssociatedValue; 5800 5801 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &, 5802 bool Stripped) -> bool { 5803 auto &AA = A.getAAFor<AAValueSimplify>( 5804 *this, IRPosition::value(V, getCallBaseContext()), 5805 DepClassTy::REQUIRED); 5806 if (!Stripped && this == &AA) { 5807 5808 if (auto *I = dyn_cast<Instruction>(&V)) { 5809 if (auto *LI = dyn_cast<LoadInst>(&V)) 5810 if (updateWithLoad(A, *LI)) 5811 return true; 5812 if (auto *Cmp = dyn_cast<CmpInst>(&V)) 5813 if (handleCmp(A, *Cmp)) 5814 return true; 5815 if (handleGenericInst(A, *I)) 5816 return true; 5817 } 5818 // TODO: Look the instruction and check recursively. 5819 5820 LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V 5821 << "\n"); 5822 return false; 5823 } 5824 return checkAndUpdate(A, *this, 5825 IRPosition::value(V, getCallBaseContext())); 5826 }; 5827 5828 bool Dummy = false; 5829 bool UsedAssumedInformation = false; 5830 if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy, 5831 VisitValueCB, getCtxI(), 5832 UsedAssumedInformation, 5833 /* UseValueSimplify */ false)) 5834 if (!askSimplifiedValueForOtherAAs(A)) 5835 return indicatePessimisticFixpoint(); 5836 5837 // If a candicate was found in this update, return CHANGED. 5838 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5839 : ChangeStatus ::CHANGED; 5840 } 5841 5842 /// See AbstractAttribute::trackStatistics() 5843 void trackStatistics() const override { 5844 STATS_DECLTRACK_FLOATING_ATTR(value_simplify) 5845 } 5846 }; 5847 5848 struct AAValueSimplifyFunction : AAValueSimplifyImpl { 5849 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) 5850 : AAValueSimplifyImpl(IRP, A) {} 5851 5852 /// See AbstractAttribute::initialize(...). 5853 void initialize(Attributor &A) override { 5854 SimplifiedAssociatedValue = nullptr; 5855 indicateOptimisticFixpoint(); 5856 } 5857 /// See AbstractAttribute::initialize(...). 5858 ChangeStatus updateImpl(Attributor &A) override { 5859 llvm_unreachable( 5860 "AAValueSimplify(Function|CallSite)::updateImpl will not be called"); 5861 } 5862 /// See AbstractAttribute::trackStatistics() 5863 void trackStatistics() const override { 5864 STATS_DECLTRACK_FN_ATTR(value_simplify) 5865 } 5866 }; 5867 5868 struct AAValueSimplifyCallSite : AAValueSimplifyFunction { 5869 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) 5870 : AAValueSimplifyFunction(IRP, A) {} 5871 /// See AbstractAttribute::trackStatistics() 5872 void trackStatistics() const override { 5873 STATS_DECLTRACK_CS_ATTR(value_simplify) 5874 } 5875 }; 5876 5877 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl { 5878 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) 5879 : AAValueSimplifyImpl(IRP, A) {} 5880 5881 void initialize(Attributor &A) override { 5882 AAValueSimplifyImpl::initialize(A); 5883 if (!getAssociatedFunction()) 5884 indicatePessimisticFixpoint(); 5885 } 5886 5887 /// See AbstractAttribute::updateImpl(...). 5888 ChangeStatus updateImpl(Attributor &A) override { 5889 auto Before = SimplifiedAssociatedValue; 5890 auto &RetAA = A.getAAFor<AAReturnedValues>( 5891 *this, IRPosition::function(*getAssociatedFunction()), 5892 DepClassTy::REQUIRED); 5893 auto PredForReturned = 5894 [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 5895 bool UsedAssumedInformation = false; 5896 Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent( 5897 &RetVal, *cast<CallBase>(getCtxI()), *this, 5898 UsedAssumedInformation); 5899 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5900 SimplifiedAssociatedValue, CSRetVal, getAssociatedType()); 5901 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5902 }; 5903 if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned)) 5904 if (!askSimplifiedValueForOtherAAs(A)) 5905 return indicatePessimisticFixpoint(); 5906 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5907 : ChangeStatus ::CHANGED; 5908 } 5909 5910 void trackStatistics() const override { 5911 STATS_DECLTRACK_CSRET_ATTR(value_simplify) 5912 } 5913 }; 5914 5915 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { 5916 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) 5917 : AAValueSimplifyFloating(IRP, A) {} 5918 5919 /// See AbstractAttribute::manifest(...). 5920 ChangeStatus manifest(Attributor &A) override { 5921 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5922 5923 if (auto *NewV = getReplacementValue(A)) { 5924 Use &U = cast<CallBase>(&getAnchorValue()) 5925 ->getArgOperandUse(getCallSiteArgNo()); 5926 if (A.changeUseAfterManifest(U, *NewV)) 5927 Changed = ChangeStatus::CHANGED; 5928 } 5929 5930 return Changed | AAValueSimplify::manifest(A); 5931 } 5932 5933 void trackStatistics() const override { 5934 STATS_DECLTRACK_CSARG_ATTR(value_simplify) 5935 } 5936 }; 5937 } // namespace 5938 5939 /// ----------------------- Heap-To-Stack Conversion --------------------------- 5940 namespace { 5941 struct AAHeapToStackFunction final : public AAHeapToStack { 5942 5943 struct AllocationInfo { 5944 /// The call that allocates the memory. 5945 CallBase *const CB; 5946 5947 /// The library function id for the allocation. 5948 LibFunc LibraryFunctionId = NotLibFunc; 5949 5950 /// The status wrt. a rewrite. 5951 enum { 5952 STACK_DUE_TO_USE, 5953 STACK_DUE_TO_FREE, 5954 INVALID, 5955 } Status = STACK_DUE_TO_USE; 5956 5957 /// Flag to indicate if we encountered a use that might free this allocation 5958 /// but which is not in the deallocation infos. 5959 bool HasPotentiallyFreeingUnknownUses = false; 5960 5961 /// The set of free calls that use this allocation. 5962 SmallPtrSet<CallBase *, 1> PotentialFreeCalls{}; 5963 }; 5964 5965 struct DeallocationInfo { 5966 /// The call that deallocates the memory. 5967 CallBase *const CB; 5968 5969 /// Flag to indicate if we don't know all objects this deallocation might 5970 /// free. 5971 bool MightFreeUnknownObjects = false; 5972 5973 /// The set of allocation calls that are potentially freed. 5974 SmallPtrSet<CallBase *, 1> PotentialAllocationCalls{}; 5975 }; 5976 5977 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) 5978 : AAHeapToStack(IRP, A) {} 5979 5980 ~AAHeapToStackFunction() { 5981 // Ensure we call the destructor so we release any memory allocated in the 5982 // sets. 5983 for (auto &It : AllocationInfos) 5984 It.getSecond()->~AllocationInfo(); 5985 for (auto &It : DeallocationInfos) 5986 It.getSecond()->~DeallocationInfo(); 5987 } 5988 5989 void initialize(Attributor &A) override { 5990 AAHeapToStack::initialize(A); 5991 5992 const Function *F = getAnchorScope(); 5993 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5994 5995 auto AllocationIdentifierCB = [&](Instruction &I) { 5996 CallBase *CB = dyn_cast<CallBase>(&I); 5997 if (!CB) 5998 return true; 5999 if (isFreeCall(CB, TLI)) { 6000 DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB}; 6001 return true; 6002 } 6003 // To do heap to stack, we need to know that the allocation itself is 6004 // removable once uses are rewritten, and that we can initialize the 6005 // alloca to the same pattern as the original allocation result. 6006 if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) { 6007 auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext()); 6008 if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) { 6009 AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB}; 6010 AllocationInfos[CB] = AI; 6011 TLI->getLibFunc(*CB, AI->LibraryFunctionId); 6012 } 6013 } 6014 return true; 6015 }; 6016 6017 bool UsedAssumedInformation = false; 6018 bool Success = A.checkForAllCallLikeInstructions( 6019 AllocationIdentifierCB, *this, UsedAssumedInformation, 6020 /* CheckBBLivenessOnly */ false, 6021 /* CheckPotentiallyDead */ true); 6022 (void)Success; 6023 assert(Success && "Did not expect the call base visit callback to fail!"); 6024 6025 Attributor::SimplifictionCallbackTy SCB = 6026 [](const IRPosition &, const AbstractAttribute *, 6027 bool &) -> Optional<Value *> { return nullptr; }; 6028 for (const auto &It : AllocationInfos) 6029 A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first), 6030 SCB); 6031 for (const auto &It : DeallocationInfos) 6032 A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first), 6033 SCB); 6034 } 6035 6036 const std::string getAsStr() const override { 6037 unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0; 6038 for (const auto &It : AllocationInfos) { 6039 if (It.second->Status == AllocationInfo::INVALID) 6040 ++NumInvalidMallocs; 6041 else 6042 ++NumH2SMallocs; 6043 } 6044 return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" + 6045 std::to_string(NumInvalidMallocs); 6046 } 6047 6048 /// See AbstractAttribute::trackStatistics(). 6049 void trackStatistics() const override { 6050 STATS_DECL( 6051 MallocCalls, Function, 6052 "Number of malloc/calloc/aligned_alloc calls converted to allocas"); 6053 for (auto &It : AllocationInfos) 6054 if (It.second->Status != AllocationInfo::INVALID) 6055 ++BUILD_STAT_NAME(MallocCalls, Function); 6056 } 6057 6058 bool isAssumedHeapToStack(const CallBase &CB) const override { 6059 if (isValidState()) 6060 if (AllocationInfo *AI = AllocationInfos.lookup(&CB)) 6061 return AI->Status != AllocationInfo::INVALID; 6062 return false; 6063 } 6064 6065 bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override { 6066 if (!isValidState()) 6067 return false; 6068 6069 for (auto &It : AllocationInfos) { 6070 AllocationInfo &AI = *It.second; 6071 if (AI.Status == AllocationInfo::INVALID) 6072 continue; 6073 6074 if (AI.PotentialFreeCalls.count(&CB)) 6075 return true; 6076 } 6077 6078 return false; 6079 } 6080 6081 ChangeStatus manifest(Attributor &A) override { 6082 assert(getState().isValidState() && 6083 "Attempted to manifest an invalid state!"); 6084 6085 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 6086 Function *F = getAnchorScope(); 6087 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 6088 6089 for (auto &It : AllocationInfos) { 6090 AllocationInfo &AI = *It.second; 6091 if (AI.Status == AllocationInfo::INVALID) 6092 continue; 6093 6094 for (CallBase *FreeCall : AI.PotentialFreeCalls) { 6095 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); 6096 A.deleteAfterManifest(*FreeCall); 6097 HasChanged = ChangeStatus::CHANGED; 6098 } 6099 6100 LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB 6101 << "\n"); 6102 6103 auto Remark = [&](OptimizationRemark OR) { 6104 LibFunc IsAllocShared; 6105 if (TLI->getLibFunc(*AI.CB, IsAllocShared)) 6106 if (IsAllocShared == LibFunc___kmpc_alloc_shared) 6107 return OR << "Moving globalized variable to the stack."; 6108 return OR << "Moving memory allocation from the heap to the stack."; 6109 }; 6110 if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 6111 A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark); 6112 else 6113 A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark); 6114 6115 const DataLayout &DL = A.getInfoCache().getDL(); 6116 Value *Size; 6117 Optional<APInt> SizeAPI = getSize(A, *this, AI); 6118 if (SizeAPI.hasValue()) { 6119 Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI); 6120 } else { 6121 LLVMContext &Ctx = AI.CB->getContext(); 6122 ObjectSizeOpts Opts; 6123 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts); 6124 SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB); 6125 assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() && 6126 cast<ConstantInt>(SizeOffsetPair.second)->isZero()); 6127 Size = SizeOffsetPair.first; 6128 } 6129 6130 Align Alignment(1); 6131 if (MaybeAlign RetAlign = AI.CB->getRetAlign()) 6132 Alignment = max(Alignment, RetAlign); 6133 if (Value *Align = getAllocAlignment(AI.CB, TLI)) { 6134 Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align); 6135 assert(AlignmentAPI.hasValue() && 6136 "Expected an alignment during manifest!"); 6137 Alignment = 6138 max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue())); 6139 } 6140 6141 // TODO: Hoist the alloca towards the function entry. 6142 unsigned AS = DL.getAllocaAddrSpace(); 6143 Instruction *Alloca = new AllocaInst(Type::getInt8Ty(F->getContext()), AS, 6144 Size, Alignment, "", AI.CB); 6145 6146 if (Alloca->getType() != AI.CB->getType()) 6147 Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 6148 Alloca, AI.CB->getType(), "malloc_cast", AI.CB); 6149 6150 auto *I8Ty = Type::getInt8Ty(F->getContext()); 6151 auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty); 6152 assert(InitVal && 6153 "Must be able to materialize initial memory state of allocation"); 6154 6155 A.changeValueAfterManifest(*AI.CB, *Alloca); 6156 6157 if (auto *II = dyn_cast<InvokeInst>(AI.CB)) { 6158 auto *NBB = II->getNormalDest(); 6159 BranchInst::Create(NBB, AI.CB->getParent()); 6160 A.deleteAfterManifest(*AI.CB); 6161 } else { 6162 A.deleteAfterManifest(*AI.CB); 6163 } 6164 6165 // Initialize the alloca with the same value as used by the allocation 6166 // function. We can skip undef as the initial value of an alloc is 6167 // undef, and the memset would simply end up being DSEd. 6168 if (!isa<UndefValue>(InitVal)) { 6169 IRBuilder<> Builder(Alloca->getNextNode()); 6170 // TODO: Use alignment above if align!=1 6171 Builder.CreateMemSet(Alloca, InitVal, Size, None); 6172 } 6173 HasChanged = ChangeStatus::CHANGED; 6174 } 6175 6176 return HasChanged; 6177 } 6178 6179 Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA, 6180 Value &V) { 6181 bool UsedAssumedInformation = false; 6182 Optional<Constant *> SimpleV = 6183 A.getAssumedConstant(V, AA, UsedAssumedInformation); 6184 if (!SimpleV.hasValue()) 6185 return APInt(64, 0); 6186 if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue())) 6187 return CI->getValue(); 6188 return llvm::None; 6189 } 6190 6191 Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA, 6192 AllocationInfo &AI) { 6193 auto Mapper = [&](const Value *V) -> const Value * { 6194 bool UsedAssumedInformation = false; 6195 if (Optional<Constant *> SimpleV = 6196 A.getAssumedConstant(*V, AA, UsedAssumedInformation)) 6197 if (*SimpleV) 6198 return *SimpleV; 6199 return V; 6200 }; 6201 6202 const Function *F = getAnchorScope(); 6203 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 6204 return getAllocSize(AI.CB, TLI, Mapper); 6205 } 6206 6207 /// Collection of all malloc-like calls in a function with associated 6208 /// information. 6209 DenseMap<CallBase *, AllocationInfo *> AllocationInfos; 6210 6211 /// Collection of all free-like calls in a function with associated 6212 /// information. 6213 DenseMap<CallBase *, DeallocationInfo *> DeallocationInfos; 6214 6215 ChangeStatus updateImpl(Attributor &A) override; 6216 }; 6217 6218 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) { 6219 ChangeStatus Changed = ChangeStatus::UNCHANGED; 6220 const Function *F = getAnchorScope(); 6221 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 6222 6223 const auto &LivenessAA = 6224 A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE); 6225 6226 MustBeExecutedContextExplorer &Explorer = 6227 A.getInfoCache().getMustBeExecutedContextExplorer(); 6228 6229 bool StackIsAccessibleByOtherThreads = 6230 A.getInfoCache().stackIsAccessibleByOtherThreads(); 6231 6232 // Flag to ensure we update our deallocation information at most once per 6233 // updateImpl call and only if we use the free check reasoning. 6234 bool HasUpdatedFrees = false; 6235 6236 auto UpdateFrees = [&]() { 6237 HasUpdatedFrees = true; 6238 6239 for (auto &It : DeallocationInfos) { 6240 DeallocationInfo &DI = *It.second; 6241 // For now we cannot use deallocations that have unknown inputs, skip 6242 // them. 6243 if (DI.MightFreeUnknownObjects) 6244 continue; 6245 6246 // No need to analyze dead calls, ignore them instead. 6247 bool UsedAssumedInformation = false; 6248 if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation, 6249 /* CheckBBLivenessOnly */ true)) 6250 continue; 6251 6252 // Use the optimistic version to get the freed objects, ignoring dead 6253 // branches etc. 6254 SmallVector<Value *, 8> Objects; 6255 if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects, 6256 *this, DI.CB, 6257 UsedAssumedInformation)) { 6258 LLVM_DEBUG( 6259 dbgs() 6260 << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n"); 6261 DI.MightFreeUnknownObjects = true; 6262 continue; 6263 } 6264 6265 // Check each object explicitly. 6266 for (auto *Obj : Objects) { 6267 // Free of null and undef can be ignored as no-ops (or UB in the latter 6268 // case). 6269 if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj)) 6270 continue; 6271 6272 CallBase *ObjCB = dyn_cast<CallBase>(Obj); 6273 if (!ObjCB) { 6274 LLVM_DEBUG(dbgs() 6275 << "[H2S] Free of a non-call object: " << *Obj << "\n"); 6276 DI.MightFreeUnknownObjects = true; 6277 continue; 6278 } 6279 6280 AllocationInfo *AI = AllocationInfos.lookup(ObjCB); 6281 if (!AI) { 6282 LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj 6283 << "\n"); 6284 DI.MightFreeUnknownObjects = true; 6285 continue; 6286 } 6287 6288 DI.PotentialAllocationCalls.insert(ObjCB); 6289 } 6290 } 6291 }; 6292 6293 auto FreeCheck = [&](AllocationInfo &AI) { 6294 // If the stack is not accessible by other threads, the "must-free" logic 6295 // doesn't apply as the pointer could be shared and needs to be places in 6296 // "shareable" memory. 6297 if (!StackIsAccessibleByOtherThreads) { 6298 auto &NoSyncAA = 6299 A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL); 6300 if (!NoSyncAA.isAssumedNoSync()) { 6301 LLVM_DEBUG( 6302 dbgs() << "[H2S] found an escaping use, stack is not accessible by " 6303 "other threads and function is not nosync:\n"); 6304 return false; 6305 } 6306 } 6307 if (!HasUpdatedFrees) 6308 UpdateFrees(); 6309 6310 // TODO: Allow multi exit functions that have different free calls. 6311 if (AI.PotentialFreeCalls.size() != 1) { 6312 LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but " 6313 << AI.PotentialFreeCalls.size() << "\n"); 6314 return false; 6315 } 6316 CallBase *UniqueFree = *AI.PotentialFreeCalls.begin(); 6317 DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree); 6318 if (!DI) { 6319 LLVM_DEBUG( 6320 dbgs() << "[H2S] unique free call was not known as deallocation call " 6321 << *UniqueFree << "\n"); 6322 return false; 6323 } 6324 if (DI->MightFreeUnknownObjects) { 6325 LLVM_DEBUG( 6326 dbgs() << "[H2S] unique free call might free unknown allocations\n"); 6327 return false; 6328 } 6329 if (DI->PotentialAllocationCalls.size() > 1) { 6330 LLVM_DEBUG(dbgs() << "[H2S] unique free call might free " 6331 << DI->PotentialAllocationCalls.size() 6332 << " different allocations\n"); 6333 return false; 6334 } 6335 if (*DI->PotentialAllocationCalls.begin() != AI.CB) { 6336 LLVM_DEBUG( 6337 dbgs() 6338 << "[H2S] unique free call not known to free this allocation but " 6339 << **DI->PotentialAllocationCalls.begin() << "\n"); 6340 return false; 6341 } 6342 Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode(); 6343 if (!Explorer.findInContextOf(UniqueFree, CtxI)) { 6344 LLVM_DEBUG( 6345 dbgs() 6346 << "[H2S] unique free call might not be executed with the allocation " 6347 << *UniqueFree << "\n"); 6348 return false; 6349 } 6350 return true; 6351 }; 6352 6353 auto UsesCheck = [&](AllocationInfo &AI) { 6354 bool ValidUsesOnly = true; 6355 6356 auto Pred = [&](const Use &U, bool &Follow) -> bool { 6357 Instruction *UserI = cast<Instruction>(U.getUser()); 6358 if (isa<LoadInst>(UserI)) 6359 return true; 6360 if (auto *SI = dyn_cast<StoreInst>(UserI)) { 6361 if (SI->getValueOperand() == U.get()) { 6362 LLVM_DEBUG(dbgs() 6363 << "[H2S] escaping store to memory: " << *UserI << "\n"); 6364 ValidUsesOnly = false; 6365 } else { 6366 // A store into the malloc'ed memory is fine. 6367 } 6368 return true; 6369 } 6370 if (auto *CB = dyn_cast<CallBase>(UserI)) { 6371 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) 6372 return true; 6373 if (DeallocationInfos.count(CB)) { 6374 AI.PotentialFreeCalls.insert(CB); 6375 return true; 6376 } 6377 6378 unsigned ArgNo = CB->getArgOperandNo(&U); 6379 6380 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 6381 *this, IRPosition::callsite_argument(*CB, ArgNo), 6382 DepClassTy::OPTIONAL); 6383 6384 // If a call site argument use is nofree, we are fine. 6385 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( 6386 *this, IRPosition::callsite_argument(*CB, ArgNo), 6387 DepClassTy::OPTIONAL); 6388 6389 bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture(); 6390 bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree(); 6391 if (MaybeCaptured || 6392 (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared && 6393 MaybeFreed)) { 6394 AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed; 6395 6396 // Emit a missed remark if this is missed OpenMP globalization. 6397 auto Remark = [&](OptimizationRemarkMissed ORM) { 6398 return ORM 6399 << "Could not move globalized variable to the stack. " 6400 "Variable is potentially captured in call. Mark " 6401 "parameter as `__attribute__((noescape))` to override."; 6402 }; 6403 6404 if (ValidUsesOnly && 6405 AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 6406 A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark); 6407 6408 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n"); 6409 ValidUsesOnly = false; 6410 } 6411 return true; 6412 } 6413 6414 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 6415 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 6416 Follow = true; 6417 return true; 6418 } 6419 // Unknown user for which we can not track uses further (in a way that 6420 // makes sense). 6421 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n"); 6422 ValidUsesOnly = false; 6423 return true; 6424 }; 6425 if (!A.checkForAllUses(Pred, *this, *AI.CB)) 6426 return false; 6427 return ValidUsesOnly; 6428 }; 6429 6430 // The actual update starts here. We look at all allocations and depending on 6431 // their status perform the appropriate check(s). 6432 for (auto &It : AllocationInfos) { 6433 AllocationInfo &AI = *It.second; 6434 if (AI.Status == AllocationInfo::INVALID) 6435 continue; 6436 6437 if (Value *Align = getAllocAlignment(AI.CB, TLI)) { 6438 Optional<APInt> APAlign = getAPInt(A, *this, *Align); 6439 if (!APAlign) { 6440 // Can't generate an alloca which respects the required alignment 6441 // on the allocation. 6442 LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB 6443 << "\n"); 6444 AI.Status = AllocationInfo::INVALID; 6445 Changed = ChangeStatus::CHANGED; 6446 continue; 6447 } else { 6448 if (APAlign->ugt(llvm::Value::MaximumAlignment) || !APAlign->isPowerOf2()) { 6449 LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign << "\n"); 6450 AI.Status = AllocationInfo::INVALID; 6451 Changed = ChangeStatus::CHANGED; 6452 continue; 6453 } 6454 } 6455 } 6456 6457 if (MaxHeapToStackSize != -1) { 6458 Optional<APInt> Size = getSize(A, *this, AI); 6459 if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) { 6460 LLVM_DEBUG({ 6461 if (!Size.hasValue()) 6462 dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n"; 6463 else 6464 dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " 6465 << MaxHeapToStackSize << "\n"; 6466 }); 6467 6468 AI.Status = AllocationInfo::INVALID; 6469 Changed = ChangeStatus::CHANGED; 6470 continue; 6471 } 6472 } 6473 6474 switch (AI.Status) { 6475 case AllocationInfo::STACK_DUE_TO_USE: 6476 if (UsesCheck(AI)) 6477 continue; 6478 AI.Status = AllocationInfo::STACK_DUE_TO_FREE; 6479 LLVM_FALLTHROUGH; 6480 case AllocationInfo::STACK_DUE_TO_FREE: 6481 if (FreeCheck(AI)) 6482 continue; 6483 AI.Status = AllocationInfo::INVALID; 6484 Changed = ChangeStatus::CHANGED; 6485 continue; 6486 case AllocationInfo::INVALID: 6487 llvm_unreachable("Invalid allocations should never reach this point!"); 6488 }; 6489 } 6490 6491 return Changed; 6492 } 6493 } // namespace 6494 6495 /// ----------------------- Privatizable Pointers ------------------------------ 6496 namespace { 6497 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { 6498 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) 6499 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} 6500 6501 ChangeStatus indicatePessimisticFixpoint() override { 6502 AAPrivatizablePtr::indicatePessimisticFixpoint(); 6503 PrivatizableType = nullptr; 6504 return ChangeStatus::CHANGED; 6505 } 6506 6507 /// Identify the type we can chose for a private copy of the underlying 6508 /// argument. None means it is not clear yet, nullptr means there is none. 6509 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; 6510 6511 /// Return a privatizable type that encloses both T0 and T1. 6512 /// TODO: This is merely a stub for now as we should manage a mapping as well. 6513 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { 6514 if (!T0.hasValue()) 6515 return T1; 6516 if (!T1.hasValue()) 6517 return T0; 6518 if (T0 == T1) 6519 return T0; 6520 return nullptr; 6521 } 6522 6523 Optional<Type *> getPrivatizableType() const override { 6524 return PrivatizableType; 6525 } 6526 6527 const std::string getAsStr() const override { 6528 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; 6529 } 6530 6531 protected: 6532 Optional<Type *> PrivatizableType; 6533 }; 6534 6535 // TODO: Do this for call site arguments (probably also other values) as well. 6536 6537 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { 6538 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) 6539 : AAPrivatizablePtrImpl(IRP, A) {} 6540 6541 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6542 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6543 // If this is a byval argument and we know all the call sites (so we can 6544 // rewrite them), there is no need to check them explicitly. 6545 bool UsedAssumedInformation = false; 6546 if (getIRPosition().hasAttr(Attribute::ByVal) && 6547 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, 6548 true, UsedAssumedInformation)) 6549 return getAssociatedValue().getType()->getPointerElementType(); 6550 6551 Optional<Type *> Ty; 6552 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 6553 6554 // Make sure the associated call site argument has the same type at all call 6555 // sites and it is an allocation we know is safe to privatize, for now that 6556 // means we only allow alloca instructions. 6557 // TODO: We can additionally analyze the accesses in the callee to create 6558 // the type from that information instead. That is a little more 6559 // involved and will be done in a follow up patch. 6560 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6561 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 6562 // Check if a coresponding argument was found or if it is one not 6563 // associated (which can happen for callback calls). 6564 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 6565 return false; 6566 6567 // Check that all call sites agree on a type. 6568 auto &PrivCSArgAA = 6569 A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED); 6570 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); 6571 6572 LLVM_DEBUG({ 6573 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; 6574 if (CSTy.hasValue() && CSTy.getValue()) 6575 CSTy.getValue()->print(dbgs()); 6576 else if (CSTy.hasValue()) 6577 dbgs() << "<nullptr>"; 6578 else 6579 dbgs() << "<none>"; 6580 }); 6581 6582 Ty = combineTypes(Ty, CSTy); 6583 6584 LLVM_DEBUG({ 6585 dbgs() << " : New Type: "; 6586 if (Ty.hasValue() && Ty.getValue()) 6587 Ty.getValue()->print(dbgs()); 6588 else if (Ty.hasValue()) 6589 dbgs() << "<nullptr>"; 6590 else 6591 dbgs() << "<none>"; 6592 dbgs() << "\n"; 6593 }); 6594 6595 return !Ty.hasValue() || Ty.getValue(); 6596 }; 6597 6598 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, 6599 UsedAssumedInformation)) 6600 return nullptr; 6601 return Ty; 6602 } 6603 6604 /// See AbstractAttribute::updateImpl(...). 6605 ChangeStatus updateImpl(Attributor &A) override { 6606 PrivatizableType = identifyPrivatizableType(A); 6607 if (!PrivatizableType.hasValue()) 6608 return ChangeStatus::UNCHANGED; 6609 if (!PrivatizableType.getValue()) 6610 return indicatePessimisticFixpoint(); 6611 6612 // The dependence is optional so we don't give up once we give up on the 6613 // alignment. 6614 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), 6615 DepClassTy::OPTIONAL); 6616 6617 // Avoid arguments with padding for now. 6618 if (!getIRPosition().hasAttr(Attribute::ByVal) && 6619 !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(), 6620 A.getInfoCache().getDL())) { 6621 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n"); 6622 return indicatePessimisticFixpoint(); 6623 } 6624 6625 // Collect the types that will replace the privatizable type in the function 6626 // signature. 6627 SmallVector<Type *, 16> ReplacementTypes; 6628 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 6629 6630 // Verify callee and caller agree on how the promoted argument would be 6631 // passed. 6632 Function &Fn = *getIRPosition().getAnchorScope(); 6633 const auto *TTI = 6634 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); 6635 if (!TTI) { 6636 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function " 6637 << Fn.getName() << "\n"); 6638 return indicatePessimisticFixpoint(); 6639 } 6640 6641 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6642 CallBase *CB = ACS.getInstruction(); 6643 return TTI->areTypesABICompatible( 6644 CB->getCaller(), CB->getCalledFunction(), ReplacementTypes); 6645 }; 6646 bool UsedAssumedInformation = false; 6647 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, 6648 UsedAssumedInformation)) { 6649 LLVM_DEBUG( 6650 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " 6651 << Fn.getName() << "\n"); 6652 return indicatePessimisticFixpoint(); 6653 } 6654 6655 // Register a rewrite of the argument. 6656 Argument *Arg = getAssociatedArgument(); 6657 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { 6658 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"); 6659 return indicatePessimisticFixpoint(); 6660 } 6661 6662 unsigned ArgNo = Arg->getArgNo(); 6663 6664 // Helper to check if for the given call site the associated argument is 6665 // passed to a callback where the privatization would be different. 6666 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { 6667 SmallVector<const Use *, 4> CallbackUses; 6668 AbstractCallSite::getCallbackUses(CB, CallbackUses); 6669 for (const Use *U : CallbackUses) { 6670 AbstractCallSite CBACS(U); 6671 assert(CBACS && CBACS.isCallbackCall()); 6672 for (Argument &CBArg : CBACS.getCalledFunction()->args()) { 6673 int CBArgNo = CBACS.getCallArgOperandNo(CBArg); 6674 6675 LLVM_DEBUG({ 6676 dbgs() 6677 << "[AAPrivatizablePtr] Argument " << *Arg 6678 << "check if can be privatized in the context of its parent (" 6679 << Arg->getParent()->getName() 6680 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6681 "callback (" 6682 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6683 << ")\n[AAPrivatizablePtr] " << CBArg << " : " 6684 << CBACS.getCallArgOperand(CBArg) << " vs " 6685 << CB.getArgOperand(ArgNo) << "\n" 6686 << "[AAPrivatizablePtr] " << CBArg << " : " 6687 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; 6688 }); 6689 6690 if (CBArgNo != int(ArgNo)) 6691 continue; 6692 const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6693 *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED); 6694 if (CBArgPrivAA.isValidState()) { 6695 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); 6696 if (!CBArgPrivTy.hasValue()) 6697 continue; 6698 if (CBArgPrivTy.getValue() == PrivatizableType) 6699 continue; 6700 } 6701 6702 LLVM_DEBUG({ 6703 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6704 << " cannot be privatized in the context of its parent (" 6705 << Arg->getParent()->getName() 6706 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6707 "callback (" 6708 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6709 << ").\n[AAPrivatizablePtr] for which the argument " 6710 "privatization is not compatible.\n"; 6711 }); 6712 return false; 6713 } 6714 } 6715 return true; 6716 }; 6717 6718 // Helper to check if for the given call site the associated argument is 6719 // passed to a direct call where the privatization would be different. 6720 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { 6721 CallBase *DC = cast<CallBase>(ACS.getInstruction()); 6722 int DCArgNo = ACS.getCallArgOperandNo(ArgNo); 6723 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() && 6724 "Expected a direct call operand for callback call operand"); 6725 6726 LLVM_DEBUG({ 6727 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6728 << " check if be privatized in the context of its parent (" 6729 << Arg->getParent()->getName() 6730 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6731 "direct call of (" 6732 << DCArgNo << "@" << DC->getCalledFunction()->getName() 6733 << ").\n"; 6734 }); 6735 6736 Function *DCCallee = DC->getCalledFunction(); 6737 if (unsigned(DCArgNo) < DCCallee->arg_size()) { 6738 const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6739 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)), 6740 DepClassTy::REQUIRED); 6741 if (DCArgPrivAA.isValidState()) { 6742 auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); 6743 if (!DCArgPrivTy.hasValue()) 6744 return true; 6745 if (DCArgPrivTy.getValue() == PrivatizableType) 6746 return true; 6747 } 6748 } 6749 6750 LLVM_DEBUG({ 6751 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6752 << " cannot be privatized in the context of its parent (" 6753 << Arg->getParent()->getName() 6754 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6755 "direct call of (" 6756 << ACS.getInstruction()->getCalledFunction()->getName() 6757 << ").\n[AAPrivatizablePtr] for which the argument " 6758 "privatization is not compatible.\n"; 6759 }); 6760 return false; 6761 }; 6762 6763 // Helper to check if the associated argument is used at the given abstract 6764 // call site in a way that is incompatible with the privatization assumed 6765 // here. 6766 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { 6767 if (ACS.isDirectCall()) 6768 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); 6769 if (ACS.isCallbackCall()) 6770 return IsCompatiblePrivArgOfDirectCS(ACS); 6771 return false; 6772 }; 6773 6774 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true, 6775 UsedAssumedInformation)) 6776 return indicatePessimisticFixpoint(); 6777 6778 return ChangeStatus::UNCHANGED; 6779 } 6780 6781 /// Given a type to private \p PrivType, collect the constituates (which are 6782 /// used) in \p ReplacementTypes. 6783 static void 6784 identifyReplacementTypes(Type *PrivType, 6785 SmallVectorImpl<Type *> &ReplacementTypes) { 6786 // TODO: For now we expand the privatization type to the fullest which can 6787 // lead to dead arguments that need to be removed later. 6788 assert(PrivType && "Expected privatizable type!"); 6789 6790 // Traverse the type, extract constituate types on the outermost level. 6791 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6792 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) 6793 ReplacementTypes.push_back(PrivStructType->getElementType(u)); 6794 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6795 ReplacementTypes.append(PrivArrayType->getNumElements(), 6796 PrivArrayType->getElementType()); 6797 } else { 6798 ReplacementTypes.push_back(PrivType); 6799 } 6800 } 6801 6802 /// Initialize \p Base according to the type \p PrivType at position \p IP. 6803 /// The values needed are taken from the arguments of \p F starting at 6804 /// position \p ArgNo. 6805 static void createInitialization(Type *PrivType, Value &Base, Function &F, 6806 unsigned ArgNo, Instruction &IP) { 6807 assert(PrivType && "Expected privatizable type!"); 6808 6809 IRBuilder<NoFolder> IRB(&IP); 6810 const DataLayout &DL = F.getParent()->getDataLayout(); 6811 6812 // Traverse the type, build GEPs and stores. 6813 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6814 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6815 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6816 Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo(); 6817 Value *Ptr = 6818 constructPointer(PointeeTy, PrivType, &Base, 6819 PrivStructLayout->getElementOffset(u), IRB, DL); 6820 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6821 } 6822 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6823 Type *PointeeTy = PrivArrayType->getElementType(); 6824 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6825 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6826 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6827 Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base, 6828 u * PointeeTySize, IRB, DL); 6829 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6830 } 6831 } else { 6832 new StoreInst(F.getArg(ArgNo), &Base, &IP); 6833 } 6834 } 6835 6836 /// Extract values from \p Base according to the type \p PrivType at the 6837 /// call position \p ACS. The values are appended to \p ReplacementValues. 6838 void createReplacementValues(Align Alignment, Type *PrivType, 6839 AbstractCallSite ACS, Value *Base, 6840 SmallVectorImpl<Value *> &ReplacementValues) { 6841 assert(Base && "Expected base value!"); 6842 assert(PrivType && "Expected privatizable type!"); 6843 Instruction *IP = ACS.getInstruction(); 6844 6845 IRBuilder<NoFolder> IRB(IP); 6846 const DataLayout &DL = IP->getModule()->getDataLayout(); 6847 6848 Type *PrivPtrType = PrivType->getPointerTo(); 6849 if (Base->getType() != PrivPtrType) 6850 Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 6851 Base, PrivPtrType, "", ACS.getInstruction()); 6852 6853 // Traverse the type, build GEPs and loads. 6854 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6855 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6856 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6857 Type *PointeeTy = PrivStructType->getElementType(u); 6858 Value *Ptr = 6859 constructPointer(PointeeTy->getPointerTo(), PrivType, Base, 6860 PrivStructLayout->getElementOffset(u), IRB, DL); 6861 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6862 L->setAlignment(Alignment); 6863 ReplacementValues.push_back(L); 6864 } 6865 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6866 Type *PointeeTy = PrivArrayType->getElementType(); 6867 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6868 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6869 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6870 Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base, 6871 u * PointeeTySize, IRB, DL); 6872 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6873 L->setAlignment(Alignment); 6874 ReplacementValues.push_back(L); 6875 } 6876 } else { 6877 LoadInst *L = new LoadInst(PrivType, Base, "", IP); 6878 L->setAlignment(Alignment); 6879 ReplacementValues.push_back(L); 6880 } 6881 } 6882 6883 /// See AbstractAttribute::manifest(...) 6884 ChangeStatus manifest(Attributor &A) override { 6885 if (!PrivatizableType.hasValue()) 6886 return ChangeStatus::UNCHANGED; 6887 assert(PrivatizableType.getValue() && "Expected privatizable type!"); 6888 6889 // Collect all tail calls in the function as we cannot allow new allocas to 6890 // escape into tail recursion. 6891 // TODO: Be smarter about new allocas escaping into tail calls. 6892 SmallVector<CallInst *, 16> TailCalls; 6893 bool UsedAssumedInformation = false; 6894 if (!A.checkForAllInstructions( 6895 [&](Instruction &I) { 6896 CallInst &CI = cast<CallInst>(I); 6897 if (CI.isTailCall()) 6898 TailCalls.push_back(&CI); 6899 return true; 6900 }, 6901 *this, {Instruction::Call}, UsedAssumedInformation)) 6902 return ChangeStatus::UNCHANGED; 6903 6904 Argument *Arg = getAssociatedArgument(); 6905 // Query AAAlign attribute for alignment of associated argument to 6906 // determine the best alignment of loads. 6907 const auto &AlignAA = 6908 A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE); 6909 6910 // Callback to repair the associated function. A new alloca is placed at the 6911 // beginning and initialized with the values passed through arguments. The 6912 // new alloca replaces the use of the old pointer argument. 6913 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = 6914 [=](const Attributor::ArgumentReplacementInfo &ARI, 6915 Function &ReplacementFn, Function::arg_iterator ArgIt) { 6916 BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); 6917 Instruction *IP = &*EntryBB.getFirstInsertionPt(); 6918 const DataLayout &DL = IP->getModule()->getDataLayout(); 6919 unsigned AS = DL.getAllocaAddrSpace(); 6920 Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS, 6921 Arg->getName() + ".priv", IP); 6922 createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn, 6923 ArgIt->getArgNo(), *IP); 6924 6925 if (AI->getType() != Arg->getType()) 6926 AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 6927 AI, Arg->getType(), "", IP); 6928 Arg->replaceAllUsesWith(AI); 6929 6930 for (CallInst *CI : TailCalls) 6931 CI->setTailCall(false); 6932 }; 6933 6934 // Callback to repair a call site of the associated function. The elements 6935 // of the privatizable type are loaded prior to the call and passed to the 6936 // new function version. 6937 Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = 6938 [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI, 6939 AbstractCallSite ACS, 6940 SmallVectorImpl<Value *> &NewArgOperands) { 6941 // When no alignment is specified for the load instruction, 6942 // natural alignment is assumed. 6943 createReplacementValues( 6944 assumeAligned(AlignAA.getAssumedAlign()), 6945 PrivatizableType.getValue(), ACS, 6946 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()), 6947 NewArgOperands); 6948 }; 6949 6950 // Collect the types that will replace the privatizable type in the function 6951 // signature. 6952 SmallVector<Type *, 16> ReplacementTypes; 6953 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 6954 6955 // Register a rewrite of the argument. 6956 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes, 6957 std::move(FnRepairCB), 6958 std::move(ACSRepairCB))) 6959 return ChangeStatus::CHANGED; 6960 return ChangeStatus::UNCHANGED; 6961 } 6962 6963 /// See AbstractAttribute::trackStatistics() 6964 void trackStatistics() const override { 6965 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr); 6966 } 6967 }; 6968 6969 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { 6970 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) 6971 : AAPrivatizablePtrImpl(IRP, A) {} 6972 6973 /// See AbstractAttribute::initialize(...). 6974 virtual void initialize(Attributor &A) override { 6975 // TODO: We can privatize more than arguments. 6976 indicatePessimisticFixpoint(); 6977 } 6978 6979 ChangeStatus updateImpl(Attributor &A) override { 6980 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" 6981 "updateImpl will not be called"); 6982 } 6983 6984 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6985 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6986 Value *Obj = getUnderlyingObject(&getAssociatedValue()); 6987 if (!Obj) { 6988 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n"); 6989 return nullptr; 6990 } 6991 6992 if (auto *AI = dyn_cast<AllocaInst>(Obj)) 6993 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 6994 if (CI->isOne()) 6995 return AI->getAllocatedType(); 6996 if (auto *Arg = dyn_cast<Argument>(Obj)) { 6997 auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>( 6998 *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED); 6999 if (PrivArgAA.isAssumedPrivatizablePtr()) 7000 return Obj->getType()->getPointerElementType(); 7001 } 7002 7003 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " 7004 "alloca nor privatizable argument: " 7005 << *Obj << "!\n"); 7006 return nullptr; 7007 } 7008 7009 /// See AbstractAttribute::trackStatistics() 7010 void trackStatistics() const override { 7011 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr); 7012 } 7013 }; 7014 7015 struct AAPrivatizablePtrCallSiteArgument final 7016 : public AAPrivatizablePtrFloating { 7017 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) 7018 : AAPrivatizablePtrFloating(IRP, A) {} 7019 7020 /// See AbstractAttribute::initialize(...). 7021 void initialize(Attributor &A) override { 7022 if (getIRPosition().hasAttr(Attribute::ByVal)) 7023 indicateOptimisticFixpoint(); 7024 } 7025 7026 /// See AbstractAttribute::updateImpl(...). 7027 ChangeStatus updateImpl(Attributor &A) override { 7028 PrivatizableType = identifyPrivatizableType(A); 7029 if (!PrivatizableType.hasValue()) 7030 return ChangeStatus::UNCHANGED; 7031 if (!PrivatizableType.getValue()) 7032 return indicatePessimisticFixpoint(); 7033 7034 const IRPosition &IRP = getIRPosition(); 7035 auto &NoCaptureAA = 7036 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED); 7037 if (!NoCaptureAA.isAssumedNoCapture()) { 7038 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n"); 7039 return indicatePessimisticFixpoint(); 7040 } 7041 7042 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED); 7043 if (!NoAliasAA.isAssumedNoAlias()) { 7044 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n"); 7045 return indicatePessimisticFixpoint(); 7046 } 7047 7048 bool IsKnown; 7049 if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) { 7050 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n"); 7051 return indicatePessimisticFixpoint(); 7052 } 7053 7054 return ChangeStatus::UNCHANGED; 7055 } 7056 7057 /// See AbstractAttribute::trackStatistics() 7058 void trackStatistics() const override { 7059 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr); 7060 } 7061 }; 7062 7063 struct AAPrivatizablePtrCallSiteReturned final 7064 : public AAPrivatizablePtrFloating { 7065 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) 7066 : AAPrivatizablePtrFloating(IRP, A) {} 7067 7068 /// See AbstractAttribute::initialize(...). 7069 void initialize(Attributor &A) override { 7070 // TODO: We can privatize more than arguments. 7071 indicatePessimisticFixpoint(); 7072 } 7073 7074 /// See AbstractAttribute::trackStatistics() 7075 void trackStatistics() const override { 7076 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr); 7077 } 7078 }; 7079 7080 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { 7081 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) 7082 : AAPrivatizablePtrFloating(IRP, A) {} 7083 7084 /// See AbstractAttribute::initialize(...). 7085 void initialize(Attributor &A) override { 7086 // TODO: We can privatize more than arguments. 7087 indicatePessimisticFixpoint(); 7088 } 7089 7090 /// See AbstractAttribute::trackStatistics() 7091 void trackStatistics() const override { 7092 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr); 7093 } 7094 }; 7095 } // namespace 7096 7097 /// -------------------- Memory Behavior Attributes ---------------------------- 7098 /// Includes read-none, read-only, and write-only. 7099 /// ---------------------------------------------------------------------------- 7100 namespace { 7101 struct AAMemoryBehaviorImpl : public AAMemoryBehavior { 7102 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) 7103 : AAMemoryBehavior(IRP, A) {} 7104 7105 /// See AbstractAttribute::initialize(...). 7106 void initialize(Attributor &A) override { 7107 intersectAssumedBits(BEST_STATE); 7108 getKnownStateFromValue(getIRPosition(), getState()); 7109 AAMemoryBehavior::initialize(A); 7110 } 7111 7112 /// Return the memory behavior information encoded in the IR for \p IRP. 7113 static void getKnownStateFromValue(const IRPosition &IRP, 7114 BitIntegerState &State, 7115 bool IgnoreSubsumingPositions = false) { 7116 SmallVector<Attribute, 2> Attrs; 7117 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 7118 for (const Attribute &Attr : Attrs) { 7119 switch (Attr.getKindAsEnum()) { 7120 case Attribute::ReadNone: 7121 State.addKnownBits(NO_ACCESSES); 7122 break; 7123 case Attribute::ReadOnly: 7124 State.addKnownBits(NO_WRITES); 7125 break; 7126 case Attribute::WriteOnly: 7127 State.addKnownBits(NO_READS); 7128 break; 7129 default: 7130 llvm_unreachable("Unexpected attribute!"); 7131 } 7132 } 7133 7134 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) { 7135 if (!I->mayReadFromMemory()) 7136 State.addKnownBits(NO_READS); 7137 if (!I->mayWriteToMemory()) 7138 State.addKnownBits(NO_WRITES); 7139 } 7140 } 7141 7142 /// See AbstractAttribute::getDeducedAttributes(...). 7143 void getDeducedAttributes(LLVMContext &Ctx, 7144 SmallVectorImpl<Attribute> &Attrs) const override { 7145 assert(Attrs.size() == 0); 7146 if (isAssumedReadNone()) 7147 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 7148 else if (isAssumedReadOnly()) 7149 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly)); 7150 else if (isAssumedWriteOnly()) 7151 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly)); 7152 assert(Attrs.size() <= 1); 7153 } 7154 7155 /// See AbstractAttribute::manifest(...). 7156 ChangeStatus manifest(Attributor &A) override { 7157 if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true)) 7158 return ChangeStatus::UNCHANGED; 7159 7160 const IRPosition &IRP = getIRPosition(); 7161 7162 // Check if we would improve the existing attributes first. 7163 SmallVector<Attribute, 4> DeducedAttrs; 7164 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 7165 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 7166 return IRP.hasAttr(Attr.getKindAsEnum(), 7167 /* IgnoreSubsumingPositions */ true); 7168 })) 7169 return ChangeStatus::UNCHANGED; 7170 7171 // Clear existing attributes. 7172 IRP.removeAttrs(AttrKinds); 7173 7174 // Use the generic manifest method. 7175 return IRAttribute::manifest(A); 7176 } 7177 7178 /// See AbstractState::getAsStr(). 7179 const std::string getAsStr() const override { 7180 if (isAssumedReadNone()) 7181 return "readnone"; 7182 if (isAssumedReadOnly()) 7183 return "readonly"; 7184 if (isAssumedWriteOnly()) 7185 return "writeonly"; 7186 return "may-read/write"; 7187 } 7188 7189 /// The set of IR attributes AAMemoryBehavior deals with. 7190 static const Attribute::AttrKind AttrKinds[3]; 7191 }; 7192 7193 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { 7194 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; 7195 7196 /// Memory behavior attribute for a floating value. 7197 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { 7198 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) 7199 : AAMemoryBehaviorImpl(IRP, A) {} 7200 7201 /// See AbstractAttribute::updateImpl(...). 7202 ChangeStatus updateImpl(Attributor &A) override; 7203 7204 /// See AbstractAttribute::trackStatistics() 7205 void trackStatistics() const override { 7206 if (isAssumedReadNone()) 7207 STATS_DECLTRACK_FLOATING_ATTR(readnone) 7208 else if (isAssumedReadOnly()) 7209 STATS_DECLTRACK_FLOATING_ATTR(readonly) 7210 else if (isAssumedWriteOnly()) 7211 STATS_DECLTRACK_FLOATING_ATTR(writeonly) 7212 } 7213 7214 private: 7215 /// Return true if users of \p UserI might access the underlying 7216 /// variable/location described by \p U and should therefore be analyzed. 7217 bool followUsersOfUseIn(Attributor &A, const Use &U, 7218 const Instruction *UserI); 7219 7220 /// Update the state according to the effect of use \p U in \p UserI. 7221 void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI); 7222 }; 7223 7224 /// Memory behavior attribute for function argument. 7225 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { 7226 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) 7227 : AAMemoryBehaviorFloating(IRP, A) {} 7228 7229 /// See AbstractAttribute::initialize(...). 7230 void initialize(Attributor &A) override { 7231 intersectAssumedBits(BEST_STATE); 7232 const IRPosition &IRP = getIRPosition(); 7233 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we 7234 // can query it when we use has/getAttr. That would allow us to reuse the 7235 // initialize of the base class here. 7236 bool HasByVal = 7237 IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true); 7238 getKnownStateFromValue(IRP, getState(), 7239 /* IgnoreSubsumingPositions */ HasByVal); 7240 7241 // Initialize the use vector with all direct uses of the associated value. 7242 Argument *Arg = getAssociatedArgument(); 7243 if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) 7244 indicatePessimisticFixpoint(); 7245 } 7246 7247 ChangeStatus manifest(Attributor &A) override { 7248 // TODO: Pointer arguments are not supported on vectors of pointers yet. 7249 if (!getAssociatedValue().getType()->isPointerTy()) 7250 return ChangeStatus::UNCHANGED; 7251 7252 // TODO: From readattrs.ll: "inalloca parameters are always 7253 // considered written" 7254 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) { 7255 removeKnownBits(NO_WRITES); 7256 removeAssumedBits(NO_WRITES); 7257 } 7258 return AAMemoryBehaviorFloating::manifest(A); 7259 } 7260 7261 /// See AbstractAttribute::trackStatistics() 7262 void trackStatistics() const override { 7263 if (isAssumedReadNone()) 7264 STATS_DECLTRACK_ARG_ATTR(readnone) 7265 else if (isAssumedReadOnly()) 7266 STATS_DECLTRACK_ARG_ATTR(readonly) 7267 else if (isAssumedWriteOnly()) 7268 STATS_DECLTRACK_ARG_ATTR(writeonly) 7269 } 7270 }; 7271 7272 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { 7273 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) 7274 : AAMemoryBehaviorArgument(IRP, A) {} 7275 7276 /// See AbstractAttribute::initialize(...). 7277 void initialize(Attributor &A) override { 7278 // If we don't have an associated attribute this is either a variadic call 7279 // or an indirect call, either way, nothing to do here. 7280 Argument *Arg = getAssociatedArgument(); 7281 if (!Arg) { 7282 indicatePessimisticFixpoint(); 7283 return; 7284 } 7285 if (Arg->hasByValAttr()) { 7286 addKnownBits(NO_WRITES); 7287 removeKnownBits(NO_READS); 7288 removeAssumedBits(NO_READS); 7289 } 7290 AAMemoryBehaviorArgument::initialize(A); 7291 if (getAssociatedFunction()->isDeclaration()) 7292 indicatePessimisticFixpoint(); 7293 } 7294 7295 /// See AbstractAttribute::updateImpl(...). 7296 ChangeStatus updateImpl(Attributor &A) override { 7297 // TODO: Once we have call site specific value information we can provide 7298 // call site specific liveness liveness information and then it makes 7299 // sense to specialize attributes for call sites arguments instead of 7300 // redirecting requests to the callee argument. 7301 Argument *Arg = getAssociatedArgument(); 7302 const IRPosition &ArgPos = IRPosition::argument(*Arg); 7303 auto &ArgAA = 7304 A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED); 7305 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 7306 } 7307 7308 /// See AbstractAttribute::trackStatistics() 7309 void trackStatistics() const override { 7310 if (isAssumedReadNone()) 7311 STATS_DECLTRACK_CSARG_ATTR(readnone) 7312 else if (isAssumedReadOnly()) 7313 STATS_DECLTRACK_CSARG_ATTR(readonly) 7314 else if (isAssumedWriteOnly()) 7315 STATS_DECLTRACK_CSARG_ATTR(writeonly) 7316 } 7317 }; 7318 7319 /// Memory behavior attribute for a call site return position. 7320 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { 7321 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) 7322 : AAMemoryBehaviorFloating(IRP, A) {} 7323 7324 /// See AbstractAttribute::initialize(...). 7325 void initialize(Attributor &A) override { 7326 AAMemoryBehaviorImpl::initialize(A); 7327 Function *F = getAssociatedFunction(); 7328 if (!F || F->isDeclaration()) 7329 indicatePessimisticFixpoint(); 7330 } 7331 7332 /// See AbstractAttribute::manifest(...). 7333 ChangeStatus manifest(Attributor &A) override { 7334 // We do not annotate returned values. 7335 return ChangeStatus::UNCHANGED; 7336 } 7337 7338 /// See AbstractAttribute::trackStatistics() 7339 void trackStatistics() const override {} 7340 }; 7341 7342 /// An AA to represent the memory behavior function attributes. 7343 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { 7344 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) 7345 : AAMemoryBehaviorImpl(IRP, A) {} 7346 7347 /// See AbstractAttribute::updateImpl(Attributor &A). 7348 virtual ChangeStatus updateImpl(Attributor &A) override; 7349 7350 /// See AbstractAttribute::manifest(...). 7351 ChangeStatus manifest(Attributor &A) override { 7352 Function &F = cast<Function>(getAnchorValue()); 7353 if (isAssumedReadNone()) { 7354 F.removeFnAttr(Attribute::ArgMemOnly); 7355 F.removeFnAttr(Attribute::InaccessibleMemOnly); 7356 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); 7357 } 7358 return AAMemoryBehaviorImpl::manifest(A); 7359 } 7360 7361 /// See AbstractAttribute::trackStatistics() 7362 void trackStatistics() const override { 7363 if (isAssumedReadNone()) 7364 STATS_DECLTRACK_FN_ATTR(readnone) 7365 else if (isAssumedReadOnly()) 7366 STATS_DECLTRACK_FN_ATTR(readonly) 7367 else if (isAssumedWriteOnly()) 7368 STATS_DECLTRACK_FN_ATTR(writeonly) 7369 } 7370 }; 7371 7372 /// AAMemoryBehavior attribute for call sites. 7373 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl { 7374 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) 7375 : AAMemoryBehaviorImpl(IRP, A) {} 7376 7377 /// See AbstractAttribute::initialize(...). 7378 void initialize(Attributor &A) override { 7379 AAMemoryBehaviorImpl::initialize(A); 7380 Function *F = getAssociatedFunction(); 7381 if (!F || F->isDeclaration()) 7382 indicatePessimisticFixpoint(); 7383 } 7384 7385 /// See AbstractAttribute::updateImpl(...). 7386 ChangeStatus updateImpl(Attributor &A) override { 7387 // TODO: Once we have call site specific value information we can provide 7388 // call site specific liveness liveness information and then it makes 7389 // sense to specialize attributes for call sites arguments instead of 7390 // redirecting requests to the callee argument. 7391 Function *F = getAssociatedFunction(); 7392 const IRPosition &FnPos = IRPosition::function(*F); 7393 auto &FnAA = 7394 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED); 7395 return clampStateAndIndicateChange(getState(), FnAA.getState()); 7396 } 7397 7398 /// See AbstractAttribute::trackStatistics() 7399 void trackStatistics() const override { 7400 if (isAssumedReadNone()) 7401 STATS_DECLTRACK_CS_ATTR(readnone) 7402 else if (isAssumedReadOnly()) 7403 STATS_DECLTRACK_CS_ATTR(readonly) 7404 else if (isAssumedWriteOnly()) 7405 STATS_DECLTRACK_CS_ATTR(writeonly) 7406 } 7407 }; 7408 7409 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { 7410 7411 // The current assumed state used to determine a change. 7412 auto AssumedState = getAssumed(); 7413 7414 auto CheckRWInst = [&](Instruction &I) { 7415 // If the instruction has an own memory behavior state, use it to restrict 7416 // the local state. No further analysis is required as the other memory 7417 // state is as optimistic as it gets. 7418 if (const auto *CB = dyn_cast<CallBase>(&I)) { 7419 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 7420 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 7421 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7422 return !isAtFixpoint(); 7423 } 7424 7425 // Remove access kind modifiers if necessary. 7426 if (I.mayReadFromMemory()) 7427 removeAssumedBits(NO_READS); 7428 if (I.mayWriteToMemory()) 7429 removeAssumedBits(NO_WRITES); 7430 return !isAtFixpoint(); 7431 }; 7432 7433 bool UsedAssumedInformation = false; 7434 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 7435 UsedAssumedInformation)) 7436 return indicatePessimisticFixpoint(); 7437 7438 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7439 : ChangeStatus::UNCHANGED; 7440 } 7441 7442 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { 7443 7444 const IRPosition &IRP = getIRPosition(); 7445 const IRPosition &FnPos = IRPosition::function_scope(IRP); 7446 AAMemoryBehavior::StateType &S = getState(); 7447 7448 // First, check the function scope. We take the known information and we avoid 7449 // work if the assumed information implies the current assumed information for 7450 // this attribute. This is a valid for all but byval arguments. 7451 Argument *Arg = IRP.getAssociatedArgument(); 7452 AAMemoryBehavior::base_t FnMemAssumedState = 7453 AAMemoryBehavior::StateType::getWorstState(); 7454 if (!Arg || !Arg->hasByValAttr()) { 7455 const auto &FnMemAA = 7456 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL); 7457 FnMemAssumedState = FnMemAA.getAssumed(); 7458 S.addKnownBits(FnMemAA.getKnown()); 7459 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed()) 7460 return ChangeStatus::UNCHANGED; 7461 } 7462 7463 // The current assumed state used to determine a change. 7464 auto AssumedState = S.getAssumed(); 7465 7466 // Make sure the value is not captured (except through "return"), if 7467 // it is, any information derived would be irrelevant anyway as we cannot 7468 // check the potential aliases introduced by the capture. However, no need 7469 // to fall back to anythign less optimistic than the function state. 7470 const auto &ArgNoCaptureAA = 7471 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL); 7472 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 7473 S.intersectAssumedBits(FnMemAssumedState); 7474 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7475 : ChangeStatus::UNCHANGED; 7476 } 7477 7478 // Visit and expand uses until all are analyzed or a fixpoint is reached. 7479 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 7480 Instruction *UserI = cast<Instruction>(U.getUser()); 7481 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI 7482 << " \n"); 7483 7484 // Droppable users, e.g., llvm::assume does not actually perform any action. 7485 if (UserI->isDroppable()) 7486 return true; 7487 7488 // Check if the users of UserI should also be visited. 7489 Follow = followUsersOfUseIn(A, U, UserI); 7490 7491 // If UserI might touch memory we analyze the use in detail. 7492 if (UserI->mayReadOrWriteMemory()) 7493 analyzeUseIn(A, U, UserI); 7494 7495 return !isAtFixpoint(); 7496 }; 7497 7498 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) 7499 return indicatePessimisticFixpoint(); 7500 7501 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7502 : ChangeStatus::UNCHANGED; 7503 } 7504 7505 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U, 7506 const Instruction *UserI) { 7507 // The loaded value is unrelated to the pointer argument, no need to 7508 // follow the users of the load. 7509 if (isa<LoadInst>(UserI)) 7510 return false; 7511 7512 // By default we follow all uses assuming UserI might leak information on U, 7513 // we have special handling for call sites operands though. 7514 const auto *CB = dyn_cast<CallBase>(UserI); 7515 if (!CB || !CB->isArgOperand(&U)) 7516 return true; 7517 7518 // If the use is a call argument known not to be captured, the users of 7519 // the call do not need to be visited because they have to be unrelated to 7520 // the input. Note that this check is not trivial even though we disallow 7521 // general capturing of the underlying argument. The reason is that the 7522 // call might the argument "through return", which we allow and for which we 7523 // need to check call users. 7524 if (U.get()->getType()->isPointerTy()) { 7525 unsigned ArgNo = CB->getArgOperandNo(&U); 7526 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 7527 *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL); 7528 return !ArgNoCaptureAA.isAssumedNoCapture(); 7529 } 7530 7531 return true; 7532 } 7533 7534 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U, 7535 const Instruction *UserI) { 7536 assert(UserI->mayReadOrWriteMemory()); 7537 7538 switch (UserI->getOpcode()) { 7539 default: 7540 // TODO: Handle all atomics and other side-effect operations we know of. 7541 break; 7542 case Instruction::Load: 7543 // Loads cause the NO_READS property to disappear. 7544 removeAssumedBits(NO_READS); 7545 return; 7546 7547 case Instruction::Store: 7548 // Stores cause the NO_WRITES property to disappear if the use is the 7549 // pointer operand. Note that while capturing was taken care of somewhere 7550 // else we need to deal with stores of the value that is not looked through. 7551 if (cast<StoreInst>(UserI)->getPointerOperand() == U.get()) 7552 removeAssumedBits(NO_WRITES); 7553 else 7554 indicatePessimisticFixpoint(); 7555 return; 7556 7557 case Instruction::Call: 7558 case Instruction::CallBr: 7559 case Instruction::Invoke: { 7560 // For call sites we look at the argument memory behavior attribute (this 7561 // could be recursive!) in order to restrict our own state. 7562 const auto *CB = cast<CallBase>(UserI); 7563 7564 // Give up on operand bundles. 7565 if (CB->isBundleOperand(&U)) { 7566 indicatePessimisticFixpoint(); 7567 return; 7568 } 7569 7570 // Calling a function does read the function pointer, maybe write it if the 7571 // function is self-modifying. 7572 if (CB->isCallee(&U)) { 7573 removeAssumedBits(NO_READS); 7574 break; 7575 } 7576 7577 // Adjust the possible access behavior based on the information on the 7578 // argument. 7579 IRPosition Pos; 7580 if (U.get()->getType()->isPointerTy()) 7581 Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)); 7582 else 7583 Pos = IRPosition::callsite_function(*CB); 7584 const auto &MemBehaviorAA = 7585 A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL); 7586 // "assumed" has at most the same bits as the MemBehaviorAA assumed 7587 // and at least "known". 7588 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7589 return; 7590 } 7591 }; 7592 7593 // Generally, look at the "may-properties" and adjust the assumed state if we 7594 // did not trigger special handling before. 7595 if (UserI->mayReadFromMemory()) 7596 removeAssumedBits(NO_READS); 7597 if (UserI->mayWriteToMemory()) 7598 removeAssumedBits(NO_WRITES); 7599 } 7600 } // namespace 7601 7602 /// -------------------- Memory Locations Attributes --------------------------- 7603 /// Includes read-none, argmemonly, inaccessiblememonly, 7604 /// inaccessiblememorargmemonly 7605 /// ---------------------------------------------------------------------------- 7606 7607 std::string AAMemoryLocation::getMemoryLocationsAsStr( 7608 AAMemoryLocation::MemoryLocationsKind MLK) { 7609 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) 7610 return "all memory"; 7611 if (MLK == AAMemoryLocation::NO_LOCATIONS) 7612 return "no memory"; 7613 std::string S = "memory:"; 7614 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) 7615 S += "stack,"; 7616 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) 7617 S += "constant,"; 7618 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) 7619 S += "internal global,"; 7620 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) 7621 S += "external global,"; 7622 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) 7623 S += "argument,"; 7624 if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) 7625 S += "inaccessible,"; 7626 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) 7627 S += "malloced,"; 7628 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) 7629 S += "unknown,"; 7630 S.pop_back(); 7631 return S; 7632 } 7633 7634 namespace { 7635 struct AAMemoryLocationImpl : public AAMemoryLocation { 7636 7637 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) 7638 : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { 7639 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7640 AccessKind2Accesses[u] = nullptr; 7641 } 7642 7643 ~AAMemoryLocationImpl() { 7644 // The AccessSets are allocated via a BumpPtrAllocator, we call 7645 // the destructor manually. 7646 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7647 if (AccessKind2Accesses[u]) 7648 AccessKind2Accesses[u]->~AccessSet(); 7649 } 7650 7651 /// See AbstractAttribute::initialize(...). 7652 void initialize(Attributor &A) override { 7653 intersectAssumedBits(BEST_STATE); 7654 getKnownStateFromValue(A, getIRPosition(), getState()); 7655 AAMemoryLocation::initialize(A); 7656 } 7657 7658 /// Return the memory behavior information encoded in the IR for \p IRP. 7659 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, 7660 BitIntegerState &State, 7661 bool IgnoreSubsumingPositions = false) { 7662 // For internal functions we ignore `argmemonly` and 7663 // `inaccessiblememorargmemonly` as we might break it via interprocedural 7664 // constant propagation. It is unclear if this is the best way but it is 7665 // unlikely this will cause real performance problems. If we are deriving 7666 // attributes for the anchor function we even remove the attribute in 7667 // addition to ignoring it. 7668 bool UseArgMemOnly = true; 7669 Function *AnchorFn = IRP.getAnchorScope(); 7670 if (AnchorFn && A.isRunOn(*AnchorFn)) 7671 UseArgMemOnly = !AnchorFn->hasLocalLinkage(); 7672 7673 SmallVector<Attribute, 2> Attrs; 7674 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 7675 for (const Attribute &Attr : Attrs) { 7676 switch (Attr.getKindAsEnum()) { 7677 case Attribute::ReadNone: 7678 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM); 7679 break; 7680 case Attribute::InaccessibleMemOnly: 7681 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 7682 break; 7683 case Attribute::ArgMemOnly: 7684 if (UseArgMemOnly) 7685 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true)); 7686 else 7687 IRP.removeAttrs({Attribute::ArgMemOnly}); 7688 break; 7689 case Attribute::InaccessibleMemOrArgMemOnly: 7690 if (UseArgMemOnly) 7691 State.addKnownBits(inverseLocation( 7692 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 7693 else 7694 IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly}); 7695 break; 7696 default: 7697 llvm_unreachable("Unexpected attribute!"); 7698 } 7699 } 7700 } 7701 7702 /// See AbstractAttribute::getDeducedAttributes(...). 7703 void getDeducedAttributes(LLVMContext &Ctx, 7704 SmallVectorImpl<Attribute> &Attrs) const override { 7705 assert(Attrs.size() == 0); 7706 if (isAssumedReadNone()) { 7707 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 7708 } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { 7709 if (isAssumedInaccessibleMemOnly()) 7710 Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly)); 7711 else if (isAssumedArgMemOnly()) 7712 Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly)); 7713 else if (isAssumedInaccessibleOrArgMemOnly()) 7714 Attrs.push_back( 7715 Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly)); 7716 } 7717 assert(Attrs.size() <= 1); 7718 } 7719 7720 /// See AbstractAttribute::manifest(...). 7721 ChangeStatus manifest(Attributor &A) override { 7722 const IRPosition &IRP = getIRPosition(); 7723 7724 // Check if we would improve the existing attributes first. 7725 SmallVector<Attribute, 4> DeducedAttrs; 7726 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 7727 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 7728 return IRP.hasAttr(Attr.getKindAsEnum(), 7729 /* IgnoreSubsumingPositions */ true); 7730 })) 7731 return ChangeStatus::UNCHANGED; 7732 7733 // Clear existing attributes. 7734 IRP.removeAttrs(AttrKinds); 7735 if (isAssumedReadNone()) 7736 IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds); 7737 7738 // Use the generic manifest method. 7739 return IRAttribute::manifest(A); 7740 } 7741 7742 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). 7743 bool checkForAllAccessesToMemoryKind( 7744 function_ref<bool(const Instruction *, const Value *, AccessKind, 7745 MemoryLocationsKind)> 7746 Pred, 7747 MemoryLocationsKind RequestedMLK) const override { 7748 if (!isValidState()) 7749 return false; 7750 7751 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); 7752 if (AssumedMLK == NO_LOCATIONS) 7753 return true; 7754 7755 unsigned Idx = 0; 7756 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; 7757 CurMLK *= 2, ++Idx) { 7758 if (CurMLK & RequestedMLK) 7759 continue; 7760 7761 if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) 7762 for (const AccessInfo &AI : *Accesses) 7763 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) 7764 return false; 7765 } 7766 7767 return true; 7768 } 7769 7770 ChangeStatus indicatePessimisticFixpoint() override { 7771 // If we give up and indicate a pessimistic fixpoint this instruction will 7772 // become an access for all potential access kinds: 7773 // TODO: Add pointers for argmemonly and globals to improve the results of 7774 // checkForAllAccessesToMemoryKind. 7775 bool Changed = false; 7776 MemoryLocationsKind KnownMLK = getKnown(); 7777 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 7778 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) 7779 if (!(CurMLK & KnownMLK)) 7780 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed, 7781 getAccessKindFromInst(I)); 7782 return AAMemoryLocation::indicatePessimisticFixpoint(); 7783 } 7784 7785 protected: 7786 /// Helper struct to tie together an instruction that has a read or write 7787 /// effect with the pointer it accesses (if any). 7788 struct AccessInfo { 7789 7790 /// The instruction that caused the access. 7791 const Instruction *I; 7792 7793 /// The base pointer that is accessed, or null if unknown. 7794 const Value *Ptr; 7795 7796 /// The kind of access (read/write/read+write). 7797 AccessKind Kind; 7798 7799 bool operator==(const AccessInfo &RHS) const { 7800 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; 7801 } 7802 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { 7803 if (LHS.I != RHS.I) 7804 return LHS.I < RHS.I; 7805 if (LHS.Ptr != RHS.Ptr) 7806 return LHS.Ptr < RHS.Ptr; 7807 if (LHS.Kind != RHS.Kind) 7808 return LHS.Kind < RHS.Kind; 7809 return false; 7810 } 7811 }; 7812 7813 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the 7814 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. 7815 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; 7816 AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()]; 7817 7818 /// Categorize the pointer arguments of CB that might access memory in 7819 /// AccessedLoc and update the state and access map accordingly. 7820 void 7821 categorizeArgumentPointerLocations(Attributor &A, CallBase &CB, 7822 AAMemoryLocation::StateType &AccessedLocs, 7823 bool &Changed); 7824 7825 /// Return the kind(s) of location that may be accessed by \p V. 7826 AAMemoryLocation::MemoryLocationsKind 7827 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); 7828 7829 /// Return the access kind as determined by \p I. 7830 AccessKind getAccessKindFromInst(const Instruction *I) { 7831 AccessKind AK = READ_WRITE; 7832 if (I) { 7833 AK = I->mayReadFromMemory() ? READ : NONE; 7834 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); 7835 } 7836 return AK; 7837 } 7838 7839 /// Update the state \p State and the AccessKind2Accesses given that \p I is 7840 /// an access of kind \p AK to a \p MLK memory location with the access 7841 /// pointer \p Ptr. 7842 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, 7843 MemoryLocationsKind MLK, const Instruction *I, 7844 const Value *Ptr, bool &Changed, 7845 AccessKind AK = READ_WRITE) { 7846 7847 assert(isPowerOf2_32(MLK) && "Expected a single location set!"); 7848 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)]; 7849 if (!Accesses) 7850 Accesses = new (Allocator) AccessSet(); 7851 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second; 7852 State.removeAssumedBits(MLK); 7853 } 7854 7855 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or 7856 /// arguments, and update the state and access map accordingly. 7857 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, 7858 AAMemoryLocation::StateType &State, bool &Changed); 7859 7860 /// Used to allocate access sets. 7861 BumpPtrAllocator &Allocator; 7862 7863 /// The set of IR attributes AAMemoryLocation deals with. 7864 static const Attribute::AttrKind AttrKinds[4]; 7865 }; 7866 7867 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = { 7868 Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly, 7869 Attribute::InaccessibleMemOrArgMemOnly}; 7870 7871 void AAMemoryLocationImpl::categorizePtrValue( 7872 Attributor &A, const Instruction &I, const Value &Ptr, 7873 AAMemoryLocation::StateType &State, bool &Changed) { 7874 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for " 7875 << Ptr << " [" 7876 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n"); 7877 7878 SmallVector<Value *, 8> Objects; 7879 bool UsedAssumedInformation = false; 7880 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I, 7881 UsedAssumedInformation, 7882 /* Intraprocedural */ true)) { 7883 LLVM_DEBUG( 7884 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"); 7885 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed, 7886 getAccessKindFromInst(&I)); 7887 return; 7888 } 7889 7890 for (Value *Obj : Objects) { 7891 // TODO: recognize the TBAA used for constant accesses. 7892 MemoryLocationsKind MLK = NO_LOCATIONS; 7893 if (isa<UndefValue>(Obj)) 7894 continue; 7895 if (isa<Argument>(Obj)) { 7896 // TODO: For now we do not treat byval arguments as local copies performed 7897 // on the call edge, though, we should. To make that happen we need to 7898 // teach various passes, e.g., DSE, about the copy effect of a byval. That 7899 // would also allow us to mark functions only accessing byval arguments as 7900 // readnone again, atguably their acceses have no effect outside of the 7901 // function, like accesses to allocas. 7902 MLK = NO_ARGUMENT_MEM; 7903 } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) { 7904 // Reading constant memory is not treated as a read "effect" by the 7905 // function attr pass so we won't neither. Constants defined by TBAA are 7906 // similar. (We know we do not write it because it is constant.) 7907 if (auto *GVar = dyn_cast<GlobalVariable>(GV)) 7908 if (GVar->isConstant()) 7909 continue; 7910 7911 if (GV->hasLocalLinkage()) 7912 MLK = NO_GLOBAL_INTERNAL_MEM; 7913 else 7914 MLK = NO_GLOBAL_EXTERNAL_MEM; 7915 } else if (isa<ConstantPointerNull>(Obj) && 7916 !NullPointerIsDefined(getAssociatedFunction(), 7917 Ptr.getType()->getPointerAddressSpace())) { 7918 continue; 7919 } else if (isa<AllocaInst>(Obj)) { 7920 MLK = NO_LOCAL_MEM; 7921 } else if (const auto *CB = dyn_cast<CallBase>(Obj)) { 7922 const auto &NoAliasAA = A.getAAFor<AANoAlias>( 7923 *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL); 7924 if (NoAliasAA.isAssumedNoAlias()) 7925 MLK = NO_MALLOCED_MEM; 7926 else 7927 MLK = NO_UNKOWN_MEM; 7928 } else { 7929 MLK = NO_UNKOWN_MEM; 7930 } 7931 7932 assert(MLK != NO_LOCATIONS && "No location specified!"); 7933 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: " 7934 << *Obj << " -> " << getMemoryLocationsAsStr(MLK) 7935 << "\n"); 7936 updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed, 7937 getAccessKindFromInst(&I)); 7938 } 7939 7940 LLVM_DEBUG( 7941 dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: " 7942 << getMemoryLocationsAsStr(State.getAssumed()) << "\n"); 7943 } 7944 7945 void AAMemoryLocationImpl::categorizeArgumentPointerLocations( 7946 Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs, 7947 bool &Changed) { 7948 for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) { 7949 7950 // Skip non-pointer arguments. 7951 const Value *ArgOp = CB.getArgOperand(ArgNo); 7952 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 7953 continue; 7954 7955 // Skip readnone arguments. 7956 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo); 7957 const auto &ArgOpMemLocationAA = 7958 A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL); 7959 7960 if (ArgOpMemLocationAA.isAssumedReadNone()) 7961 continue; 7962 7963 // Categorize potentially accessed pointer arguments as if there was an 7964 // access instruction with them as pointer. 7965 categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed); 7966 } 7967 } 7968 7969 AAMemoryLocation::MemoryLocationsKind 7970 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, 7971 bool &Changed) { 7972 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for " 7973 << I << "\n"); 7974 7975 AAMemoryLocation::StateType AccessedLocs; 7976 AccessedLocs.intersectAssumedBits(NO_LOCATIONS); 7977 7978 if (auto *CB = dyn_cast<CallBase>(&I)) { 7979 7980 // First check if we assume any memory is access is visible. 7981 const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>( 7982 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 7983 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I 7984 << " [" << CBMemLocationAA << "]\n"); 7985 7986 if (CBMemLocationAA.isAssumedReadNone()) 7987 return NO_LOCATIONS; 7988 7989 if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) { 7990 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr, 7991 Changed, getAccessKindFromInst(&I)); 7992 return AccessedLocs.getAssumed(); 7993 } 7994 7995 uint32_t CBAssumedNotAccessedLocs = 7996 CBMemLocationAA.getAssumedNotAccessedLocation(); 7997 7998 // Set the argmemonly and global bit as we handle them separately below. 7999 uint32_t CBAssumedNotAccessedLocsNoArgMem = 8000 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; 8001 8002 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { 8003 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) 8004 continue; 8005 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed, 8006 getAccessKindFromInst(&I)); 8007 } 8008 8009 // Now handle global memory if it might be accessed. This is slightly tricky 8010 // as NO_GLOBAL_MEM has multiple bits set. 8011 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); 8012 if (HasGlobalAccesses) { 8013 auto AccessPred = [&](const Instruction *, const Value *Ptr, 8014 AccessKind Kind, MemoryLocationsKind MLK) { 8015 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed, 8016 getAccessKindFromInst(&I)); 8017 return true; 8018 }; 8019 if (!CBMemLocationAA.checkForAllAccessesToMemoryKind( 8020 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false))) 8021 return AccessedLocs.getWorstState(); 8022 } 8023 8024 LLVM_DEBUG( 8025 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " 8026 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 8027 8028 // Now handle argument memory if it might be accessed. 8029 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); 8030 if (HasArgAccesses) 8031 categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed); 8032 8033 LLVM_DEBUG( 8034 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " 8035 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 8036 8037 return AccessedLocs.getAssumed(); 8038 } 8039 8040 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) { 8041 LLVM_DEBUG( 8042 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " 8043 << I << " [" << *Ptr << "]\n"); 8044 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed); 8045 return AccessedLocs.getAssumed(); 8046 } 8047 8048 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " 8049 << I << "\n"); 8050 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed, 8051 getAccessKindFromInst(&I)); 8052 return AccessedLocs.getAssumed(); 8053 } 8054 8055 /// An AA to represent the memory behavior function attributes. 8056 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { 8057 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) 8058 : AAMemoryLocationImpl(IRP, A) {} 8059 8060 /// See AbstractAttribute::updateImpl(Attributor &A). 8061 virtual ChangeStatus updateImpl(Attributor &A) override { 8062 8063 const auto &MemBehaviorAA = 8064 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 8065 if (MemBehaviorAA.isAssumedReadNone()) { 8066 if (MemBehaviorAA.isKnownReadNone()) 8067 return indicateOptimisticFixpoint(); 8068 assert(isAssumedReadNone() && 8069 "AAMemoryLocation was not read-none but AAMemoryBehavior was!"); 8070 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 8071 return ChangeStatus::UNCHANGED; 8072 } 8073 8074 // The current assumed state used to determine a change. 8075 auto AssumedState = getAssumed(); 8076 bool Changed = false; 8077 8078 auto CheckRWInst = [&](Instruction &I) { 8079 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); 8080 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I 8081 << ": " << getMemoryLocationsAsStr(MLK) << "\n"); 8082 removeAssumedBits(inverseLocation(MLK, false, false)); 8083 // Stop once only the valid bit set in the *not assumed location*, thus 8084 // once we don't actually exclude any memory locations in the state. 8085 return getAssumedNotAccessedLocation() != VALID_STATE; 8086 }; 8087 8088 bool UsedAssumedInformation = false; 8089 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 8090 UsedAssumedInformation)) 8091 return indicatePessimisticFixpoint(); 8092 8093 Changed |= AssumedState != getAssumed(); 8094 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 8095 } 8096 8097 /// See AbstractAttribute::trackStatistics() 8098 void trackStatistics() const override { 8099 if (isAssumedReadNone()) 8100 STATS_DECLTRACK_FN_ATTR(readnone) 8101 else if (isAssumedArgMemOnly()) 8102 STATS_DECLTRACK_FN_ATTR(argmemonly) 8103 else if (isAssumedInaccessibleMemOnly()) 8104 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly) 8105 else if (isAssumedInaccessibleOrArgMemOnly()) 8106 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly) 8107 } 8108 }; 8109 8110 /// AAMemoryLocation attribute for call sites. 8111 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { 8112 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) 8113 : AAMemoryLocationImpl(IRP, A) {} 8114 8115 /// See AbstractAttribute::initialize(...). 8116 void initialize(Attributor &A) override { 8117 AAMemoryLocationImpl::initialize(A); 8118 Function *F = getAssociatedFunction(); 8119 if (!F || F->isDeclaration()) 8120 indicatePessimisticFixpoint(); 8121 } 8122 8123 /// See AbstractAttribute::updateImpl(...). 8124 ChangeStatus updateImpl(Attributor &A) override { 8125 // TODO: Once we have call site specific value information we can provide 8126 // call site specific liveness liveness information and then it makes 8127 // sense to specialize attributes for call sites arguments instead of 8128 // redirecting requests to the callee argument. 8129 Function *F = getAssociatedFunction(); 8130 const IRPosition &FnPos = IRPosition::function(*F); 8131 auto &FnAA = 8132 A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED); 8133 bool Changed = false; 8134 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 8135 AccessKind Kind, MemoryLocationsKind MLK) { 8136 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed, 8137 getAccessKindFromInst(I)); 8138 return true; 8139 }; 8140 if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS)) 8141 return indicatePessimisticFixpoint(); 8142 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 8143 } 8144 8145 /// See AbstractAttribute::trackStatistics() 8146 void trackStatistics() const override { 8147 if (isAssumedReadNone()) 8148 STATS_DECLTRACK_CS_ATTR(readnone) 8149 } 8150 }; 8151 } // namespace 8152 8153 /// ------------------ Value Constant Range Attribute ------------------------- 8154 8155 namespace { 8156 struct AAValueConstantRangeImpl : AAValueConstantRange { 8157 using StateType = IntegerRangeState; 8158 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) 8159 : AAValueConstantRange(IRP, A) {} 8160 8161 /// See AbstractAttribute::initialize(..). 8162 void initialize(Attributor &A) override { 8163 if (A.hasSimplificationCallback(getIRPosition())) { 8164 indicatePessimisticFixpoint(); 8165 return; 8166 } 8167 8168 // Intersect a range given by SCEV. 8169 intersectKnown(getConstantRangeFromSCEV(A, getCtxI())); 8170 8171 // Intersect a range given by LVI. 8172 intersectKnown(getConstantRangeFromLVI(A, getCtxI())); 8173 } 8174 8175 /// See AbstractAttribute::getAsStr(). 8176 const std::string getAsStr() const override { 8177 std::string Str; 8178 llvm::raw_string_ostream OS(Str); 8179 OS << "range(" << getBitWidth() << ")<"; 8180 getKnown().print(OS); 8181 OS << " / "; 8182 getAssumed().print(OS); 8183 OS << ">"; 8184 return OS.str(); 8185 } 8186 8187 /// Helper function to get a SCEV expr for the associated value at program 8188 /// point \p I. 8189 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { 8190 if (!getAnchorScope()) 8191 return nullptr; 8192 8193 ScalarEvolution *SE = 8194 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 8195 *getAnchorScope()); 8196 8197 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( 8198 *getAnchorScope()); 8199 8200 if (!SE || !LI) 8201 return nullptr; 8202 8203 const SCEV *S = SE->getSCEV(&getAssociatedValue()); 8204 if (!I) 8205 return S; 8206 8207 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent())); 8208 } 8209 8210 /// Helper function to get a range from SCEV for the associated value at 8211 /// program point \p I. 8212 ConstantRange getConstantRangeFromSCEV(Attributor &A, 8213 const Instruction *I = nullptr) const { 8214 if (!getAnchorScope()) 8215 return getWorstState(getBitWidth()); 8216 8217 ScalarEvolution *SE = 8218 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 8219 *getAnchorScope()); 8220 8221 const SCEV *S = getSCEV(A, I); 8222 if (!SE || !S) 8223 return getWorstState(getBitWidth()); 8224 8225 return SE->getUnsignedRange(S); 8226 } 8227 8228 /// Helper function to get a range from LVI for the associated value at 8229 /// program point \p I. 8230 ConstantRange 8231 getConstantRangeFromLVI(Attributor &A, 8232 const Instruction *CtxI = nullptr) const { 8233 if (!getAnchorScope()) 8234 return getWorstState(getBitWidth()); 8235 8236 LazyValueInfo *LVI = 8237 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( 8238 *getAnchorScope()); 8239 8240 if (!LVI || !CtxI) 8241 return getWorstState(getBitWidth()); 8242 return LVI->getConstantRange(&getAssociatedValue(), 8243 const_cast<Instruction *>(CtxI)); 8244 } 8245 8246 /// Return true if \p CtxI is valid for querying outside analyses. 8247 /// This basically makes sure we do not ask intra-procedural analysis 8248 /// about a context in the wrong function or a context that violates 8249 /// dominance assumptions they might have. The \p AllowAACtxI flag indicates 8250 /// if the original context of this AA is OK or should be considered invalid. 8251 bool isValidCtxInstructionForOutsideAnalysis(Attributor &A, 8252 const Instruction *CtxI, 8253 bool AllowAACtxI) const { 8254 if (!CtxI || (!AllowAACtxI && CtxI == getCtxI())) 8255 return false; 8256 8257 // Our context might be in a different function, neither intra-procedural 8258 // analysis (ScalarEvolution nor LazyValueInfo) can handle that. 8259 if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction())) 8260 return false; 8261 8262 // If the context is not dominated by the value there are paths to the 8263 // context that do not define the value. This cannot be handled by 8264 // LazyValueInfo so we need to bail. 8265 if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) { 8266 InformationCache &InfoCache = A.getInfoCache(); 8267 const DominatorTree *DT = 8268 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( 8269 *I->getFunction()); 8270 return DT && DT->dominates(I, CtxI); 8271 } 8272 8273 return true; 8274 } 8275 8276 /// See AAValueConstantRange::getKnownConstantRange(..). 8277 ConstantRange 8278 getKnownConstantRange(Attributor &A, 8279 const Instruction *CtxI = nullptr) const override { 8280 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8281 /* AllowAACtxI */ false)) 8282 return getKnown(); 8283 8284 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8285 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8286 return getKnown().intersectWith(SCEVR).intersectWith(LVIR); 8287 } 8288 8289 /// See AAValueConstantRange::getAssumedConstantRange(..). 8290 ConstantRange 8291 getAssumedConstantRange(Attributor &A, 8292 const Instruction *CtxI = nullptr) const override { 8293 // TODO: Make SCEV use Attributor assumption. 8294 // We may be able to bound a variable range via assumptions in 8295 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to 8296 // evolve to x^2 + x, then we can say that y is in [2, 12]. 8297 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8298 /* AllowAACtxI */ false)) 8299 return getAssumed(); 8300 8301 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8302 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8303 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR); 8304 } 8305 8306 /// Helper function to create MDNode for range metadata. 8307 static MDNode * 8308 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, 8309 const ConstantRange &AssumedConstantRange) { 8310 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get( 8311 Ty, AssumedConstantRange.getLower())), 8312 ConstantAsMetadata::get(ConstantInt::get( 8313 Ty, AssumedConstantRange.getUpper()))}; 8314 return MDNode::get(Ctx, LowAndHigh); 8315 } 8316 8317 /// Return true if \p Assumed is included in \p KnownRanges. 8318 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { 8319 8320 if (Assumed.isFullSet()) 8321 return false; 8322 8323 if (!KnownRanges) 8324 return true; 8325 8326 // If multiple ranges are annotated in IR, we give up to annotate assumed 8327 // range for now. 8328 8329 // TODO: If there exists a known range which containts assumed range, we 8330 // can say assumed range is better. 8331 if (KnownRanges->getNumOperands() > 2) 8332 return false; 8333 8334 ConstantInt *Lower = 8335 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0)); 8336 ConstantInt *Upper = 8337 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1)); 8338 8339 ConstantRange Known(Lower->getValue(), Upper->getValue()); 8340 return Known.contains(Assumed) && Known != Assumed; 8341 } 8342 8343 /// Helper function to set range metadata. 8344 static bool 8345 setRangeMetadataIfisBetterRange(Instruction *I, 8346 const ConstantRange &AssumedConstantRange) { 8347 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range); 8348 if (isBetterRange(AssumedConstantRange, OldRangeMD)) { 8349 if (!AssumedConstantRange.isEmptySet()) { 8350 I->setMetadata(LLVMContext::MD_range, 8351 getMDNodeForConstantRange(I->getType(), I->getContext(), 8352 AssumedConstantRange)); 8353 return true; 8354 } 8355 } 8356 return false; 8357 } 8358 8359 /// See AbstractAttribute::manifest() 8360 ChangeStatus manifest(Attributor &A) override { 8361 ChangeStatus Changed = ChangeStatus::UNCHANGED; 8362 ConstantRange AssumedConstantRange = getAssumedConstantRange(A); 8363 assert(!AssumedConstantRange.isFullSet() && "Invalid state"); 8364 8365 auto &V = getAssociatedValue(); 8366 if (!AssumedConstantRange.isEmptySet() && 8367 !AssumedConstantRange.isSingleElement()) { 8368 if (Instruction *I = dyn_cast<Instruction>(&V)) { 8369 assert(I == getCtxI() && "Should not annotate an instruction which is " 8370 "not the context instruction"); 8371 if (isa<CallInst>(I) || isa<LoadInst>(I)) 8372 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) 8373 Changed = ChangeStatus::CHANGED; 8374 } 8375 } 8376 8377 return Changed; 8378 } 8379 }; 8380 8381 struct AAValueConstantRangeArgument final 8382 : AAArgumentFromCallSiteArguments< 8383 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8384 true /* BridgeCallBaseContext */> { 8385 using Base = AAArgumentFromCallSiteArguments< 8386 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8387 true /* BridgeCallBaseContext */>; 8388 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) 8389 : Base(IRP, A) {} 8390 8391 /// See AbstractAttribute::initialize(..). 8392 void initialize(Attributor &A) override { 8393 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8394 indicatePessimisticFixpoint(); 8395 } else { 8396 Base::initialize(A); 8397 } 8398 } 8399 8400 /// See AbstractAttribute::trackStatistics() 8401 void trackStatistics() const override { 8402 STATS_DECLTRACK_ARG_ATTR(value_range) 8403 } 8404 }; 8405 8406 struct AAValueConstantRangeReturned 8407 : AAReturnedFromReturnedValues<AAValueConstantRange, 8408 AAValueConstantRangeImpl, 8409 AAValueConstantRangeImpl::StateType, 8410 /* PropogateCallBaseContext */ true> { 8411 using Base = 8412 AAReturnedFromReturnedValues<AAValueConstantRange, 8413 AAValueConstantRangeImpl, 8414 AAValueConstantRangeImpl::StateType, 8415 /* PropogateCallBaseContext */ true>; 8416 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) 8417 : Base(IRP, A) {} 8418 8419 /// See AbstractAttribute::initialize(...). 8420 void initialize(Attributor &A) override {} 8421 8422 /// See AbstractAttribute::trackStatistics() 8423 void trackStatistics() const override { 8424 STATS_DECLTRACK_FNRET_ATTR(value_range) 8425 } 8426 }; 8427 8428 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { 8429 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) 8430 : AAValueConstantRangeImpl(IRP, A) {} 8431 8432 /// See AbstractAttribute::initialize(...). 8433 void initialize(Attributor &A) override { 8434 AAValueConstantRangeImpl::initialize(A); 8435 if (isAtFixpoint()) 8436 return; 8437 8438 Value &V = getAssociatedValue(); 8439 8440 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8441 unionAssumed(ConstantRange(C->getValue())); 8442 indicateOptimisticFixpoint(); 8443 return; 8444 } 8445 8446 if (isa<UndefValue>(&V)) { 8447 // Collapse the undef state to 0. 8448 unionAssumed(ConstantRange(APInt(getBitWidth(), 0))); 8449 indicateOptimisticFixpoint(); 8450 return; 8451 } 8452 8453 if (isa<CallBase>(&V)) 8454 return; 8455 8456 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V)) 8457 return; 8458 8459 // If it is a load instruction with range metadata, use it. 8460 if (LoadInst *LI = dyn_cast<LoadInst>(&V)) 8461 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) { 8462 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8463 return; 8464 } 8465 8466 // We can work with PHI and select instruction as we traverse their operands 8467 // during update. 8468 if (isa<SelectInst>(V) || isa<PHINode>(V)) 8469 return; 8470 8471 // Otherwise we give up. 8472 indicatePessimisticFixpoint(); 8473 8474 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: " 8475 << getAssociatedValue() << "\n"); 8476 } 8477 8478 bool calculateBinaryOperator( 8479 Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, 8480 const Instruction *CtxI, 8481 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8482 Value *LHS = BinOp->getOperand(0); 8483 Value *RHS = BinOp->getOperand(1); 8484 8485 // Simplify the operands first. 8486 bool UsedAssumedInformation = false; 8487 const auto &SimplifiedLHS = 8488 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8489 *this, UsedAssumedInformation); 8490 if (!SimplifiedLHS.hasValue()) 8491 return true; 8492 if (!SimplifiedLHS.getValue()) 8493 return false; 8494 LHS = *SimplifiedLHS; 8495 8496 const auto &SimplifiedRHS = 8497 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8498 *this, UsedAssumedInformation); 8499 if (!SimplifiedRHS.hasValue()) 8500 return true; 8501 if (!SimplifiedRHS.getValue()) 8502 return false; 8503 RHS = *SimplifiedRHS; 8504 8505 // TODO: Allow non integers as well. 8506 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8507 return false; 8508 8509 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8510 *this, IRPosition::value(*LHS, getCallBaseContext()), 8511 DepClassTy::REQUIRED); 8512 QuerriedAAs.push_back(&LHSAA); 8513 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8514 8515 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8516 *this, IRPosition::value(*RHS, getCallBaseContext()), 8517 DepClassTy::REQUIRED); 8518 QuerriedAAs.push_back(&RHSAA); 8519 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8520 8521 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange); 8522 8523 T.unionAssumed(AssumedRange); 8524 8525 // TODO: Track a known state too. 8526 8527 return T.isValidState(); 8528 } 8529 8530 bool calculateCastInst( 8531 Attributor &A, CastInst *CastI, IntegerRangeState &T, 8532 const Instruction *CtxI, 8533 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8534 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!"); 8535 // TODO: Allow non integers as well. 8536 Value *OpV = CastI->getOperand(0); 8537 8538 // Simplify the operand first. 8539 bool UsedAssumedInformation = false; 8540 const auto &SimplifiedOpV = 8541 A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()), 8542 *this, UsedAssumedInformation); 8543 if (!SimplifiedOpV.hasValue()) 8544 return true; 8545 if (!SimplifiedOpV.getValue()) 8546 return false; 8547 OpV = *SimplifiedOpV; 8548 8549 if (!OpV->getType()->isIntegerTy()) 8550 return false; 8551 8552 auto &OpAA = A.getAAFor<AAValueConstantRange>( 8553 *this, IRPosition::value(*OpV, getCallBaseContext()), 8554 DepClassTy::REQUIRED); 8555 QuerriedAAs.push_back(&OpAA); 8556 T.unionAssumed( 8557 OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth())); 8558 return T.isValidState(); 8559 } 8560 8561 bool 8562 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, 8563 const Instruction *CtxI, 8564 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8565 Value *LHS = CmpI->getOperand(0); 8566 Value *RHS = CmpI->getOperand(1); 8567 8568 // Simplify the operands first. 8569 bool UsedAssumedInformation = false; 8570 const auto &SimplifiedLHS = 8571 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8572 *this, UsedAssumedInformation); 8573 if (!SimplifiedLHS.hasValue()) 8574 return true; 8575 if (!SimplifiedLHS.getValue()) 8576 return false; 8577 LHS = *SimplifiedLHS; 8578 8579 const auto &SimplifiedRHS = 8580 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8581 *this, UsedAssumedInformation); 8582 if (!SimplifiedRHS.hasValue()) 8583 return true; 8584 if (!SimplifiedRHS.getValue()) 8585 return false; 8586 RHS = *SimplifiedRHS; 8587 8588 // TODO: Allow non integers as well. 8589 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8590 return false; 8591 8592 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8593 *this, IRPosition::value(*LHS, getCallBaseContext()), 8594 DepClassTy::REQUIRED); 8595 QuerriedAAs.push_back(&LHSAA); 8596 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8597 *this, IRPosition::value(*RHS, getCallBaseContext()), 8598 DepClassTy::REQUIRED); 8599 QuerriedAAs.push_back(&RHSAA); 8600 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8601 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8602 8603 // If one of them is empty set, we can't decide. 8604 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) 8605 return true; 8606 8607 bool MustTrue = false, MustFalse = false; 8608 8609 auto AllowedRegion = 8610 ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange); 8611 8612 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet()) 8613 MustFalse = true; 8614 8615 if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange)) 8616 MustTrue = true; 8617 8618 assert((!MustTrue || !MustFalse) && 8619 "Either MustTrue or MustFalse should be false!"); 8620 8621 if (MustTrue) 8622 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); 8623 else if (MustFalse) 8624 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); 8625 else 8626 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); 8627 8628 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA 8629 << " " << RHSAA << "\n"); 8630 8631 // TODO: Track a known state too. 8632 return T.isValidState(); 8633 } 8634 8635 /// See AbstractAttribute::updateImpl(...). 8636 ChangeStatus updateImpl(Attributor &A) override { 8637 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 8638 IntegerRangeState &T, bool Stripped) -> bool { 8639 Instruction *I = dyn_cast<Instruction>(&V); 8640 if (!I || isa<CallBase>(I)) { 8641 8642 // Simplify the operand first. 8643 bool UsedAssumedInformation = false; 8644 const auto &SimplifiedOpV = 8645 A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()), 8646 *this, UsedAssumedInformation); 8647 if (!SimplifiedOpV.hasValue()) 8648 return true; 8649 if (!SimplifiedOpV.getValue()) 8650 return false; 8651 Value *VPtr = *SimplifiedOpV; 8652 8653 // If the value is not instruction, we query AA to Attributor. 8654 const auto &AA = A.getAAFor<AAValueConstantRange>( 8655 *this, IRPosition::value(*VPtr, getCallBaseContext()), 8656 DepClassTy::REQUIRED); 8657 8658 // Clamp operator is not used to utilize a program point CtxI. 8659 T.unionAssumed(AA.getAssumedConstantRange(A, CtxI)); 8660 8661 return T.isValidState(); 8662 } 8663 8664 SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; 8665 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) { 8666 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) 8667 return false; 8668 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) { 8669 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) 8670 return false; 8671 } else if (auto *CastI = dyn_cast<CastInst>(I)) { 8672 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) 8673 return false; 8674 } else { 8675 // Give up with other instructions. 8676 // TODO: Add other instructions 8677 8678 T.indicatePessimisticFixpoint(); 8679 return false; 8680 } 8681 8682 // Catch circular reasoning in a pessimistic way for now. 8683 // TODO: Check how the range evolves and if we stripped anything, see also 8684 // AADereferenceable or AAAlign for similar situations. 8685 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { 8686 if (QueriedAA != this) 8687 continue; 8688 // If we are in a stady state we do not need to worry. 8689 if (T.getAssumed() == getState().getAssumed()) 8690 continue; 8691 T.indicatePessimisticFixpoint(); 8692 } 8693 8694 return T.isValidState(); 8695 }; 8696 8697 IntegerRangeState T(getBitWidth()); 8698 8699 bool UsedAssumedInformation = false; 8700 if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T, 8701 VisitValueCB, getCtxI(), 8702 UsedAssumedInformation, 8703 /* UseValueSimplify */ false)) 8704 return indicatePessimisticFixpoint(); 8705 8706 // Ensure that long def-use chains can't cause circular reasoning either by 8707 // introducing a cutoff below. 8708 if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED) 8709 return ChangeStatus::UNCHANGED; 8710 if (++NumChanges > MaxNumChanges) { 8711 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges 8712 << " but only " << MaxNumChanges 8713 << " are allowed to avoid cyclic reasoning."); 8714 return indicatePessimisticFixpoint(); 8715 } 8716 return ChangeStatus::CHANGED; 8717 } 8718 8719 /// See AbstractAttribute::trackStatistics() 8720 void trackStatistics() const override { 8721 STATS_DECLTRACK_FLOATING_ATTR(value_range) 8722 } 8723 8724 /// Tracker to bail after too many widening steps of the constant range. 8725 int NumChanges = 0; 8726 8727 /// Upper bound for the number of allowed changes (=widening steps) for the 8728 /// constant range before we give up. 8729 static constexpr int MaxNumChanges = 5; 8730 }; 8731 8732 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { 8733 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) 8734 : AAValueConstantRangeImpl(IRP, A) {} 8735 8736 /// See AbstractAttribute::initialize(...). 8737 ChangeStatus updateImpl(Attributor &A) override { 8738 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will " 8739 "not be called"); 8740 } 8741 8742 /// See AbstractAttribute::trackStatistics() 8743 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) } 8744 }; 8745 8746 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { 8747 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) 8748 : AAValueConstantRangeFunction(IRP, A) {} 8749 8750 /// See AbstractAttribute::trackStatistics() 8751 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) } 8752 }; 8753 8754 struct AAValueConstantRangeCallSiteReturned 8755 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8756 AAValueConstantRangeImpl, 8757 AAValueConstantRangeImpl::StateType, 8758 /* IntroduceCallBaseContext */ true> { 8759 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) 8760 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8761 AAValueConstantRangeImpl, 8762 AAValueConstantRangeImpl::StateType, 8763 /* IntroduceCallBaseContext */ true>(IRP, 8764 A) { 8765 } 8766 8767 /// See AbstractAttribute::initialize(...). 8768 void initialize(Attributor &A) override { 8769 // If it is a load instruction with range metadata, use the metadata. 8770 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue())) 8771 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range)) 8772 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8773 8774 AAValueConstantRangeImpl::initialize(A); 8775 } 8776 8777 /// See AbstractAttribute::trackStatistics() 8778 void trackStatistics() const override { 8779 STATS_DECLTRACK_CSRET_ATTR(value_range) 8780 } 8781 }; 8782 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { 8783 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) 8784 : AAValueConstantRangeFloating(IRP, A) {} 8785 8786 /// See AbstractAttribute::manifest() 8787 ChangeStatus manifest(Attributor &A) override { 8788 return ChangeStatus::UNCHANGED; 8789 } 8790 8791 /// See AbstractAttribute::trackStatistics() 8792 void trackStatistics() const override { 8793 STATS_DECLTRACK_CSARG_ATTR(value_range) 8794 } 8795 }; 8796 } // namespace 8797 8798 /// ------------------ Potential Values Attribute ------------------------- 8799 8800 namespace { 8801 struct AAPotentialValuesImpl : AAPotentialValues { 8802 using StateType = PotentialConstantIntValuesState; 8803 8804 AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A) 8805 : AAPotentialValues(IRP, A) {} 8806 8807 /// See AbstractAttribute::initialize(..). 8808 void initialize(Attributor &A) override { 8809 if (A.hasSimplificationCallback(getIRPosition())) 8810 indicatePessimisticFixpoint(); 8811 else 8812 AAPotentialValues::initialize(A); 8813 } 8814 8815 /// See AbstractAttribute::getAsStr(). 8816 const std::string getAsStr() const override { 8817 std::string Str; 8818 llvm::raw_string_ostream OS(Str); 8819 OS << getState(); 8820 return OS.str(); 8821 } 8822 8823 /// See AbstractAttribute::updateImpl(...). 8824 ChangeStatus updateImpl(Attributor &A) override { 8825 return indicatePessimisticFixpoint(); 8826 } 8827 }; 8828 8829 struct AAPotentialValuesArgument final 8830 : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 8831 PotentialConstantIntValuesState> { 8832 using Base = 8833 AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 8834 PotentialConstantIntValuesState>; 8835 AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A) 8836 : Base(IRP, A) {} 8837 8838 /// See AbstractAttribute::initialize(..). 8839 void initialize(Attributor &A) override { 8840 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8841 indicatePessimisticFixpoint(); 8842 } else { 8843 Base::initialize(A); 8844 } 8845 } 8846 8847 /// See AbstractAttribute::trackStatistics() 8848 void trackStatistics() const override { 8849 STATS_DECLTRACK_ARG_ATTR(potential_values) 8850 } 8851 }; 8852 8853 struct AAPotentialValuesReturned 8854 : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> { 8855 using Base = 8856 AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>; 8857 AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A) 8858 : Base(IRP, A) {} 8859 8860 /// See AbstractAttribute::trackStatistics() 8861 void trackStatistics() const override { 8862 STATS_DECLTRACK_FNRET_ATTR(potential_values) 8863 } 8864 }; 8865 8866 struct AAPotentialValuesFloating : AAPotentialValuesImpl { 8867 AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A) 8868 : AAPotentialValuesImpl(IRP, A) {} 8869 8870 /// See AbstractAttribute::initialize(..). 8871 void initialize(Attributor &A) override { 8872 AAPotentialValuesImpl::initialize(A); 8873 if (isAtFixpoint()) 8874 return; 8875 8876 Value &V = getAssociatedValue(); 8877 8878 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8879 unionAssumed(C->getValue()); 8880 indicateOptimisticFixpoint(); 8881 return; 8882 } 8883 8884 if (isa<UndefValue>(&V)) { 8885 unionAssumedWithUndef(); 8886 indicateOptimisticFixpoint(); 8887 return; 8888 } 8889 8890 if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V)) 8891 return; 8892 8893 if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V)) 8894 return; 8895 8896 indicatePessimisticFixpoint(); 8897 8898 LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: " 8899 << getAssociatedValue() << "\n"); 8900 } 8901 8902 static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS, 8903 const APInt &RHS) { 8904 return ICmpInst::compare(LHS, RHS, ICI->getPredicate()); 8905 } 8906 8907 static APInt calculateCastInst(const CastInst *CI, const APInt &Src, 8908 uint32_t ResultBitWidth) { 8909 Instruction::CastOps CastOp = CI->getOpcode(); 8910 switch (CastOp) { 8911 default: 8912 llvm_unreachable("unsupported or not integer cast"); 8913 case Instruction::Trunc: 8914 return Src.trunc(ResultBitWidth); 8915 case Instruction::SExt: 8916 return Src.sext(ResultBitWidth); 8917 case Instruction::ZExt: 8918 return Src.zext(ResultBitWidth); 8919 case Instruction::BitCast: 8920 return Src; 8921 } 8922 } 8923 8924 static APInt calculateBinaryOperator(const BinaryOperator *BinOp, 8925 const APInt &LHS, const APInt &RHS, 8926 bool &SkipOperation, bool &Unsupported) { 8927 Instruction::BinaryOps BinOpcode = BinOp->getOpcode(); 8928 // Unsupported is set to true when the binary operator is not supported. 8929 // SkipOperation is set to true when UB occur with the given operand pair 8930 // (LHS, RHS). 8931 // TODO: we should look at nsw and nuw keywords to handle operations 8932 // that create poison or undef value. 8933 switch (BinOpcode) { 8934 default: 8935 Unsupported = true; 8936 return LHS; 8937 case Instruction::Add: 8938 return LHS + RHS; 8939 case Instruction::Sub: 8940 return LHS - RHS; 8941 case Instruction::Mul: 8942 return LHS * RHS; 8943 case Instruction::UDiv: 8944 if (RHS.isZero()) { 8945 SkipOperation = true; 8946 return LHS; 8947 } 8948 return LHS.udiv(RHS); 8949 case Instruction::SDiv: 8950 if (RHS.isZero()) { 8951 SkipOperation = true; 8952 return LHS; 8953 } 8954 return LHS.sdiv(RHS); 8955 case Instruction::URem: 8956 if (RHS.isZero()) { 8957 SkipOperation = true; 8958 return LHS; 8959 } 8960 return LHS.urem(RHS); 8961 case Instruction::SRem: 8962 if (RHS.isZero()) { 8963 SkipOperation = true; 8964 return LHS; 8965 } 8966 return LHS.srem(RHS); 8967 case Instruction::Shl: 8968 return LHS.shl(RHS); 8969 case Instruction::LShr: 8970 return LHS.lshr(RHS); 8971 case Instruction::AShr: 8972 return LHS.ashr(RHS); 8973 case Instruction::And: 8974 return LHS & RHS; 8975 case Instruction::Or: 8976 return LHS | RHS; 8977 case Instruction::Xor: 8978 return LHS ^ RHS; 8979 } 8980 } 8981 8982 bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp, 8983 const APInt &LHS, const APInt &RHS) { 8984 bool SkipOperation = false; 8985 bool Unsupported = false; 8986 APInt Result = 8987 calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported); 8988 if (Unsupported) 8989 return false; 8990 // If SkipOperation is true, we can ignore this operand pair (L, R). 8991 if (!SkipOperation) 8992 unionAssumed(Result); 8993 return isValidState(); 8994 } 8995 8996 ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) { 8997 auto AssumedBefore = getAssumed(); 8998 Value *LHS = ICI->getOperand(0); 8999 Value *RHS = ICI->getOperand(1); 9000 9001 // Simplify the operands first. 9002 bool UsedAssumedInformation = false; 9003 const auto &SimplifiedLHS = 9004 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 9005 *this, UsedAssumedInformation); 9006 if (!SimplifiedLHS.hasValue()) 9007 return ChangeStatus::UNCHANGED; 9008 if (!SimplifiedLHS.getValue()) 9009 return indicatePessimisticFixpoint(); 9010 LHS = *SimplifiedLHS; 9011 9012 const auto &SimplifiedRHS = 9013 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 9014 *this, UsedAssumedInformation); 9015 if (!SimplifiedRHS.hasValue()) 9016 return ChangeStatus::UNCHANGED; 9017 if (!SimplifiedRHS.getValue()) 9018 return indicatePessimisticFixpoint(); 9019 RHS = *SimplifiedRHS; 9020 9021 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 9022 return indicatePessimisticFixpoint(); 9023 9024 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 9025 DepClassTy::REQUIRED); 9026 if (!LHSAA.isValidState()) 9027 return indicatePessimisticFixpoint(); 9028 9029 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 9030 DepClassTy::REQUIRED); 9031 if (!RHSAA.isValidState()) 9032 return indicatePessimisticFixpoint(); 9033 9034 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 9035 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 9036 9037 // TODO: make use of undef flag to limit potential values aggressively. 9038 bool MaybeTrue = false, MaybeFalse = false; 9039 const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0); 9040 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 9041 // The result of any comparison between undefs can be soundly replaced 9042 // with undef. 9043 unionAssumedWithUndef(); 9044 } else if (LHSAA.undefIsContained()) { 9045 for (const APInt &R : RHSAAPVS) { 9046 bool CmpResult = calculateICmpInst(ICI, Zero, R); 9047 MaybeTrue |= CmpResult; 9048 MaybeFalse |= !CmpResult; 9049 if (MaybeTrue & MaybeFalse) 9050 return indicatePessimisticFixpoint(); 9051 } 9052 } else if (RHSAA.undefIsContained()) { 9053 for (const APInt &L : LHSAAPVS) { 9054 bool CmpResult = calculateICmpInst(ICI, L, Zero); 9055 MaybeTrue |= CmpResult; 9056 MaybeFalse |= !CmpResult; 9057 if (MaybeTrue & MaybeFalse) 9058 return indicatePessimisticFixpoint(); 9059 } 9060 } else { 9061 for (const APInt &L : LHSAAPVS) { 9062 for (const APInt &R : RHSAAPVS) { 9063 bool CmpResult = calculateICmpInst(ICI, L, R); 9064 MaybeTrue |= CmpResult; 9065 MaybeFalse |= !CmpResult; 9066 if (MaybeTrue & MaybeFalse) 9067 return indicatePessimisticFixpoint(); 9068 } 9069 } 9070 } 9071 if (MaybeTrue) 9072 unionAssumed(APInt(/* numBits */ 1, /* val */ 1)); 9073 if (MaybeFalse) 9074 unionAssumed(APInt(/* numBits */ 1, /* val */ 0)); 9075 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9076 : ChangeStatus::CHANGED; 9077 } 9078 9079 ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) { 9080 auto AssumedBefore = getAssumed(); 9081 Value *LHS = SI->getTrueValue(); 9082 Value *RHS = SI->getFalseValue(); 9083 9084 // Simplify the operands first. 9085 bool UsedAssumedInformation = false; 9086 const auto &SimplifiedLHS = 9087 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 9088 *this, UsedAssumedInformation); 9089 if (!SimplifiedLHS.hasValue()) 9090 return ChangeStatus::UNCHANGED; 9091 if (!SimplifiedLHS.getValue()) 9092 return indicatePessimisticFixpoint(); 9093 LHS = *SimplifiedLHS; 9094 9095 const auto &SimplifiedRHS = 9096 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 9097 *this, UsedAssumedInformation); 9098 if (!SimplifiedRHS.hasValue()) 9099 return ChangeStatus::UNCHANGED; 9100 if (!SimplifiedRHS.getValue()) 9101 return indicatePessimisticFixpoint(); 9102 RHS = *SimplifiedRHS; 9103 9104 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 9105 return indicatePessimisticFixpoint(); 9106 9107 Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this, 9108 UsedAssumedInformation); 9109 9110 // Check if we only need one operand. 9111 bool OnlyLeft = false, OnlyRight = false; 9112 if (C.hasValue() && *C && (*C)->isOneValue()) 9113 OnlyLeft = true; 9114 else if (C.hasValue() && *C && (*C)->isZeroValue()) 9115 OnlyRight = true; 9116 9117 const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr; 9118 if (!OnlyRight) { 9119 LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 9120 DepClassTy::REQUIRED); 9121 if (!LHSAA->isValidState()) 9122 return indicatePessimisticFixpoint(); 9123 } 9124 if (!OnlyLeft) { 9125 RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 9126 DepClassTy::REQUIRED); 9127 if (!RHSAA->isValidState()) 9128 return indicatePessimisticFixpoint(); 9129 } 9130 9131 if (!LHSAA || !RHSAA) { 9132 // select (true/false), lhs, rhs 9133 auto *OpAA = LHSAA ? LHSAA : RHSAA; 9134 9135 if (OpAA->undefIsContained()) 9136 unionAssumedWithUndef(); 9137 else 9138 unionAssumed(*OpAA); 9139 9140 } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) { 9141 // select i1 *, undef , undef => undef 9142 unionAssumedWithUndef(); 9143 } else { 9144 unionAssumed(*LHSAA); 9145 unionAssumed(*RHSAA); 9146 } 9147 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9148 : ChangeStatus::CHANGED; 9149 } 9150 9151 ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) { 9152 auto AssumedBefore = getAssumed(); 9153 if (!CI->isIntegerCast()) 9154 return indicatePessimisticFixpoint(); 9155 assert(CI->getNumOperands() == 1 && "Expected cast to be unary!"); 9156 uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth(); 9157 Value *Src = CI->getOperand(0); 9158 9159 // Simplify the operand first. 9160 bool UsedAssumedInformation = false; 9161 const auto &SimplifiedSrc = 9162 A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()), 9163 *this, UsedAssumedInformation); 9164 if (!SimplifiedSrc.hasValue()) 9165 return ChangeStatus::UNCHANGED; 9166 if (!SimplifiedSrc.getValue()) 9167 return indicatePessimisticFixpoint(); 9168 Src = *SimplifiedSrc; 9169 9170 auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src), 9171 DepClassTy::REQUIRED); 9172 if (!SrcAA.isValidState()) 9173 return indicatePessimisticFixpoint(); 9174 const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet(); 9175 if (SrcAA.undefIsContained()) 9176 unionAssumedWithUndef(); 9177 else { 9178 for (const APInt &S : SrcAAPVS) { 9179 APInt T = calculateCastInst(CI, S, ResultBitWidth); 9180 unionAssumed(T); 9181 } 9182 } 9183 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9184 : ChangeStatus::CHANGED; 9185 } 9186 9187 ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) { 9188 auto AssumedBefore = getAssumed(); 9189 Value *LHS = BinOp->getOperand(0); 9190 Value *RHS = BinOp->getOperand(1); 9191 9192 // Simplify the operands first. 9193 bool UsedAssumedInformation = false; 9194 const auto &SimplifiedLHS = 9195 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 9196 *this, UsedAssumedInformation); 9197 if (!SimplifiedLHS.hasValue()) 9198 return ChangeStatus::UNCHANGED; 9199 if (!SimplifiedLHS.getValue()) 9200 return indicatePessimisticFixpoint(); 9201 LHS = *SimplifiedLHS; 9202 9203 const auto &SimplifiedRHS = 9204 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 9205 *this, UsedAssumedInformation); 9206 if (!SimplifiedRHS.hasValue()) 9207 return ChangeStatus::UNCHANGED; 9208 if (!SimplifiedRHS.getValue()) 9209 return indicatePessimisticFixpoint(); 9210 RHS = *SimplifiedRHS; 9211 9212 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 9213 return indicatePessimisticFixpoint(); 9214 9215 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 9216 DepClassTy::REQUIRED); 9217 if (!LHSAA.isValidState()) 9218 return indicatePessimisticFixpoint(); 9219 9220 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 9221 DepClassTy::REQUIRED); 9222 if (!RHSAA.isValidState()) 9223 return indicatePessimisticFixpoint(); 9224 9225 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 9226 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 9227 const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0); 9228 9229 // TODO: make use of undef flag to limit potential values aggressively. 9230 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 9231 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero)) 9232 return indicatePessimisticFixpoint(); 9233 } else if (LHSAA.undefIsContained()) { 9234 for (const APInt &R : RHSAAPVS) { 9235 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R)) 9236 return indicatePessimisticFixpoint(); 9237 } 9238 } else if (RHSAA.undefIsContained()) { 9239 for (const APInt &L : LHSAAPVS) { 9240 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero)) 9241 return indicatePessimisticFixpoint(); 9242 } 9243 } else { 9244 for (const APInt &L : LHSAAPVS) { 9245 for (const APInt &R : RHSAAPVS) { 9246 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R)) 9247 return indicatePessimisticFixpoint(); 9248 } 9249 } 9250 } 9251 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9252 : ChangeStatus::CHANGED; 9253 } 9254 9255 ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) { 9256 auto AssumedBefore = getAssumed(); 9257 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 9258 Value *IncomingValue = PHI->getIncomingValue(u); 9259 9260 // Simplify the operand first. 9261 bool UsedAssumedInformation = false; 9262 const auto &SimplifiedIncomingValue = A.getAssumedSimplified( 9263 IRPosition::value(*IncomingValue, getCallBaseContext()), *this, 9264 UsedAssumedInformation); 9265 if (!SimplifiedIncomingValue.hasValue()) 9266 continue; 9267 if (!SimplifiedIncomingValue.getValue()) 9268 return indicatePessimisticFixpoint(); 9269 IncomingValue = *SimplifiedIncomingValue; 9270 9271 auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>( 9272 *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED); 9273 if (!PotentialValuesAA.isValidState()) 9274 return indicatePessimisticFixpoint(); 9275 if (PotentialValuesAA.undefIsContained()) 9276 unionAssumedWithUndef(); 9277 else 9278 unionAssumed(PotentialValuesAA.getAssumed()); 9279 } 9280 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9281 : ChangeStatus::CHANGED; 9282 } 9283 9284 ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) { 9285 if (!L.getType()->isIntegerTy()) 9286 return indicatePessimisticFixpoint(); 9287 9288 auto Union = [&](Value &V) { 9289 if (isa<UndefValue>(V)) { 9290 unionAssumedWithUndef(); 9291 return true; 9292 } 9293 if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) { 9294 unionAssumed(CI->getValue()); 9295 return true; 9296 } 9297 return false; 9298 }; 9299 auto AssumedBefore = getAssumed(); 9300 9301 if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union)) 9302 return indicatePessimisticFixpoint(); 9303 9304 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9305 : ChangeStatus::CHANGED; 9306 } 9307 9308 /// See AbstractAttribute::updateImpl(...). 9309 ChangeStatus updateImpl(Attributor &A) override { 9310 Value &V = getAssociatedValue(); 9311 Instruction *I = dyn_cast<Instruction>(&V); 9312 9313 if (auto *ICI = dyn_cast<ICmpInst>(I)) 9314 return updateWithICmpInst(A, ICI); 9315 9316 if (auto *SI = dyn_cast<SelectInst>(I)) 9317 return updateWithSelectInst(A, SI); 9318 9319 if (auto *CI = dyn_cast<CastInst>(I)) 9320 return updateWithCastInst(A, CI); 9321 9322 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) 9323 return updateWithBinaryOperator(A, BinOp); 9324 9325 if (auto *PHI = dyn_cast<PHINode>(I)) 9326 return updateWithPHINode(A, PHI); 9327 9328 if (auto *L = dyn_cast<LoadInst>(I)) 9329 return updateWithLoad(A, *L); 9330 9331 return indicatePessimisticFixpoint(); 9332 } 9333 9334 /// See AbstractAttribute::trackStatistics() 9335 void trackStatistics() const override { 9336 STATS_DECLTRACK_FLOATING_ATTR(potential_values) 9337 } 9338 }; 9339 9340 struct AAPotentialValuesFunction : AAPotentialValuesImpl { 9341 AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A) 9342 : AAPotentialValuesImpl(IRP, A) {} 9343 9344 /// See AbstractAttribute::initialize(...). 9345 ChangeStatus updateImpl(Attributor &A) override { 9346 llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will " 9347 "not be called"); 9348 } 9349 9350 /// See AbstractAttribute::trackStatistics() 9351 void trackStatistics() const override { 9352 STATS_DECLTRACK_FN_ATTR(potential_values) 9353 } 9354 }; 9355 9356 struct AAPotentialValuesCallSite : AAPotentialValuesFunction { 9357 AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A) 9358 : AAPotentialValuesFunction(IRP, A) {} 9359 9360 /// See AbstractAttribute::trackStatistics() 9361 void trackStatistics() const override { 9362 STATS_DECLTRACK_CS_ATTR(potential_values) 9363 } 9364 }; 9365 9366 struct AAPotentialValuesCallSiteReturned 9367 : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> { 9368 AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A) 9369 : AACallSiteReturnedFromReturned<AAPotentialValues, 9370 AAPotentialValuesImpl>(IRP, A) {} 9371 9372 /// See AbstractAttribute::trackStatistics() 9373 void trackStatistics() const override { 9374 STATS_DECLTRACK_CSRET_ATTR(potential_values) 9375 } 9376 }; 9377 9378 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating { 9379 AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A) 9380 : AAPotentialValuesFloating(IRP, A) {} 9381 9382 /// See AbstractAttribute::initialize(..). 9383 void initialize(Attributor &A) override { 9384 AAPotentialValuesImpl::initialize(A); 9385 if (isAtFixpoint()) 9386 return; 9387 9388 Value &V = getAssociatedValue(); 9389 9390 if (auto *C = dyn_cast<ConstantInt>(&V)) { 9391 unionAssumed(C->getValue()); 9392 indicateOptimisticFixpoint(); 9393 return; 9394 } 9395 9396 if (isa<UndefValue>(&V)) { 9397 unionAssumedWithUndef(); 9398 indicateOptimisticFixpoint(); 9399 return; 9400 } 9401 } 9402 9403 /// See AbstractAttribute::updateImpl(...). 9404 ChangeStatus updateImpl(Attributor &A) override { 9405 Value &V = getAssociatedValue(); 9406 auto AssumedBefore = getAssumed(); 9407 auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V), 9408 DepClassTy::REQUIRED); 9409 const auto &S = AA.getAssumed(); 9410 unionAssumed(S); 9411 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9412 : ChangeStatus::CHANGED; 9413 } 9414 9415 /// See AbstractAttribute::trackStatistics() 9416 void trackStatistics() const override { 9417 STATS_DECLTRACK_CSARG_ATTR(potential_values) 9418 } 9419 }; 9420 9421 /// ------------------------ NoUndef Attribute --------------------------------- 9422 struct AANoUndefImpl : AANoUndef { 9423 AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {} 9424 9425 /// See AbstractAttribute::initialize(...). 9426 void initialize(Attributor &A) override { 9427 if (getIRPosition().hasAttr({Attribute::NoUndef})) { 9428 indicateOptimisticFixpoint(); 9429 return; 9430 } 9431 Value &V = getAssociatedValue(); 9432 if (isa<UndefValue>(V)) 9433 indicatePessimisticFixpoint(); 9434 else if (isa<FreezeInst>(V)) 9435 indicateOptimisticFixpoint(); 9436 else if (getPositionKind() != IRPosition::IRP_RETURNED && 9437 isGuaranteedNotToBeUndefOrPoison(&V)) 9438 indicateOptimisticFixpoint(); 9439 else 9440 AANoUndef::initialize(A); 9441 } 9442 9443 /// See followUsesInMBEC 9444 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 9445 AANoUndef::StateType &State) { 9446 const Value *UseV = U->get(); 9447 const DominatorTree *DT = nullptr; 9448 AssumptionCache *AC = nullptr; 9449 InformationCache &InfoCache = A.getInfoCache(); 9450 if (Function *F = getAnchorScope()) { 9451 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 9452 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 9453 } 9454 State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT)); 9455 bool TrackUse = false; 9456 // Track use for instructions which must produce undef or poison bits when 9457 // at least one operand contains such bits. 9458 if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I)) 9459 TrackUse = true; 9460 return TrackUse; 9461 } 9462 9463 /// See AbstractAttribute::getAsStr(). 9464 const std::string getAsStr() const override { 9465 return getAssumed() ? "noundef" : "may-undef-or-poison"; 9466 } 9467 9468 ChangeStatus manifest(Attributor &A) override { 9469 // We don't manifest noundef attribute for dead positions because the 9470 // associated values with dead positions would be replaced with undef 9471 // values. 9472 bool UsedAssumedInformation = false; 9473 if (A.isAssumedDead(getIRPosition(), nullptr, nullptr, 9474 UsedAssumedInformation)) 9475 return ChangeStatus::UNCHANGED; 9476 // A position whose simplified value does not have any value is 9477 // considered to be dead. We don't manifest noundef in such positions for 9478 // the same reason above. 9479 if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation) 9480 .hasValue()) 9481 return ChangeStatus::UNCHANGED; 9482 return AANoUndef::manifest(A); 9483 } 9484 }; 9485 9486 struct AANoUndefFloating : public AANoUndefImpl { 9487 AANoUndefFloating(const IRPosition &IRP, Attributor &A) 9488 : AANoUndefImpl(IRP, A) {} 9489 9490 /// See AbstractAttribute::initialize(...). 9491 void initialize(Attributor &A) override { 9492 AANoUndefImpl::initialize(A); 9493 if (!getState().isAtFixpoint()) 9494 if (Instruction *CtxI = getCtxI()) 9495 followUsesInMBEC(*this, A, getState(), *CtxI); 9496 } 9497 9498 /// See AbstractAttribute::updateImpl(...). 9499 ChangeStatus updateImpl(Attributor &A) override { 9500 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 9501 AANoUndef::StateType &T, bool Stripped) -> bool { 9502 const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V), 9503 DepClassTy::REQUIRED); 9504 if (!Stripped && this == &AA) { 9505 T.indicatePessimisticFixpoint(); 9506 } else { 9507 const AANoUndef::StateType &S = 9508 static_cast<const AANoUndef::StateType &>(AA.getState()); 9509 T ^= S; 9510 } 9511 return T.isValidState(); 9512 }; 9513 9514 StateType T; 9515 bool UsedAssumedInformation = false; 9516 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 9517 VisitValueCB, getCtxI(), 9518 UsedAssumedInformation)) 9519 return indicatePessimisticFixpoint(); 9520 9521 return clampStateAndIndicateChange(getState(), T); 9522 } 9523 9524 /// See AbstractAttribute::trackStatistics() 9525 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9526 }; 9527 9528 struct AANoUndefReturned final 9529 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> { 9530 AANoUndefReturned(const IRPosition &IRP, Attributor &A) 9531 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {} 9532 9533 /// See AbstractAttribute::trackStatistics() 9534 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9535 }; 9536 9537 struct AANoUndefArgument final 9538 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> { 9539 AANoUndefArgument(const IRPosition &IRP, Attributor &A) 9540 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {} 9541 9542 /// See AbstractAttribute::trackStatistics() 9543 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) } 9544 }; 9545 9546 struct AANoUndefCallSiteArgument final : AANoUndefFloating { 9547 AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A) 9548 : AANoUndefFloating(IRP, A) {} 9549 9550 /// See AbstractAttribute::trackStatistics() 9551 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) } 9552 }; 9553 9554 struct AANoUndefCallSiteReturned final 9555 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> { 9556 AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A) 9557 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {} 9558 9559 /// See AbstractAttribute::trackStatistics() 9560 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) } 9561 }; 9562 9563 struct AACallEdgesImpl : public AACallEdges { 9564 AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {} 9565 9566 virtual const SetVector<Function *> &getOptimisticEdges() const override { 9567 return CalledFunctions; 9568 } 9569 9570 virtual bool hasUnknownCallee() const override { return HasUnknownCallee; } 9571 9572 virtual bool hasNonAsmUnknownCallee() const override { 9573 return HasUnknownCalleeNonAsm; 9574 } 9575 9576 const std::string getAsStr() const override { 9577 return "CallEdges[" + std::to_string(HasUnknownCallee) + "," + 9578 std::to_string(CalledFunctions.size()) + "]"; 9579 } 9580 9581 void trackStatistics() const override {} 9582 9583 protected: 9584 void addCalledFunction(Function *Fn, ChangeStatus &Change) { 9585 if (CalledFunctions.insert(Fn)) { 9586 Change = ChangeStatus::CHANGED; 9587 LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName() 9588 << "\n"); 9589 } 9590 } 9591 9592 void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) { 9593 if (!HasUnknownCallee) 9594 Change = ChangeStatus::CHANGED; 9595 if (NonAsm && !HasUnknownCalleeNonAsm) 9596 Change = ChangeStatus::CHANGED; 9597 HasUnknownCalleeNonAsm |= NonAsm; 9598 HasUnknownCallee = true; 9599 } 9600 9601 private: 9602 /// Optimistic set of functions that might be called by this position. 9603 SetVector<Function *> CalledFunctions; 9604 9605 /// Is there any call with a unknown callee. 9606 bool HasUnknownCallee = false; 9607 9608 /// Is there any call with a unknown callee, excluding any inline asm. 9609 bool HasUnknownCalleeNonAsm = false; 9610 }; 9611 9612 struct AACallEdgesCallSite : public AACallEdgesImpl { 9613 AACallEdgesCallSite(const IRPosition &IRP, Attributor &A) 9614 : AACallEdgesImpl(IRP, A) {} 9615 /// See AbstractAttribute::updateImpl(...). 9616 ChangeStatus updateImpl(Attributor &A) override { 9617 ChangeStatus Change = ChangeStatus::UNCHANGED; 9618 9619 auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown, 9620 bool Stripped) -> bool { 9621 if (Function *Fn = dyn_cast<Function>(&V)) { 9622 addCalledFunction(Fn, Change); 9623 } else { 9624 LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n"); 9625 setHasUnknownCallee(true, Change); 9626 } 9627 9628 // Explore all values. 9629 return true; 9630 }; 9631 9632 // Process any value that we might call. 9633 auto ProcessCalledOperand = [&](Value *V) { 9634 bool DummyValue = false; 9635 bool UsedAssumedInformation = false; 9636 if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this, 9637 DummyValue, VisitValue, nullptr, 9638 UsedAssumedInformation, false)) { 9639 // If we haven't gone through all values, assume that there are unknown 9640 // callees. 9641 setHasUnknownCallee(true, Change); 9642 } 9643 }; 9644 9645 CallBase *CB = cast<CallBase>(getCtxI()); 9646 9647 if (CB->isInlineAsm()) { 9648 setHasUnknownCallee(false, Change); 9649 return Change; 9650 } 9651 9652 // Process callee metadata if available. 9653 if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) { 9654 for (auto &Op : MD->operands()) { 9655 Function *Callee = mdconst::dyn_extract_or_null<Function>(Op); 9656 if (Callee) 9657 addCalledFunction(Callee, Change); 9658 } 9659 return Change; 9660 } 9661 9662 // The most simple case. 9663 ProcessCalledOperand(CB->getCalledOperand()); 9664 9665 // Process callback functions. 9666 SmallVector<const Use *, 4u> CallbackUses; 9667 AbstractCallSite::getCallbackUses(*CB, CallbackUses); 9668 for (const Use *U : CallbackUses) 9669 ProcessCalledOperand(U->get()); 9670 9671 return Change; 9672 } 9673 }; 9674 9675 struct AACallEdgesFunction : public AACallEdgesImpl { 9676 AACallEdgesFunction(const IRPosition &IRP, Attributor &A) 9677 : AACallEdgesImpl(IRP, A) {} 9678 9679 /// See AbstractAttribute::updateImpl(...). 9680 ChangeStatus updateImpl(Attributor &A) override { 9681 ChangeStatus Change = ChangeStatus::UNCHANGED; 9682 9683 auto ProcessCallInst = [&](Instruction &Inst) { 9684 CallBase &CB = cast<CallBase>(Inst); 9685 9686 auto &CBEdges = A.getAAFor<AACallEdges>( 9687 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9688 if (CBEdges.hasNonAsmUnknownCallee()) 9689 setHasUnknownCallee(true, Change); 9690 if (CBEdges.hasUnknownCallee()) 9691 setHasUnknownCallee(false, Change); 9692 9693 for (Function *F : CBEdges.getOptimisticEdges()) 9694 addCalledFunction(F, Change); 9695 9696 return true; 9697 }; 9698 9699 // Visit all callable instructions. 9700 bool UsedAssumedInformation = false; 9701 if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this, 9702 UsedAssumedInformation, 9703 /* CheckBBLivenessOnly */ true)) { 9704 // If we haven't looked at all call like instructions, assume that there 9705 // are unknown callees. 9706 setHasUnknownCallee(true, Change); 9707 } 9708 9709 return Change; 9710 } 9711 }; 9712 9713 struct AAFunctionReachabilityFunction : public AAFunctionReachability { 9714 private: 9715 struct QuerySet { 9716 void markReachable(const Function &Fn) { 9717 Reachable.insert(&Fn); 9718 Unreachable.erase(&Fn); 9719 } 9720 9721 /// If there is no information about the function None is returned. 9722 Optional<bool> isCachedReachable(const Function &Fn) { 9723 // Assume that we can reach the function. 9724 // TODO: Be more specific with the unknown callee. 9725 if (CanReachUnknownCallee) 9726 return true; 9727 9728 if (Reachable.count(&Fn)) 9729 return true; 9730 9731 if (Unreachable.count(&Fn)) 9732 return false; 9733 9734 return llvm::None; 9735 } 9736 9737 /// Set of functions that we know for sure is reachable. 9738 DenseSet<const Function *> Reachable; 9739 9740 /// Set of functions that are unreachable, but might become reachable. 9741 DenseSet<const Function *> Unreachable; 9742 9743 /// If we can reach a function with a call to a unknown function we assume 9744 /// that we can reach any function. 9745 bool CanReachUnknownCallee = false; 9746 }; 9747 9748 struct QueryResolver : public QuerySet { 9749 ChangeStatus update(Attributor &A, const AAFunctionReachability &AA, 9750 ArrayRef<const AACallEdges *> AAEdgesList) { 9751 ChangeStatus Change = ChangeStatus::UNCHANGED; 9752 9753 for (auto *AAEdges : AAEdgesList) { 9754 if (AAEdges->hasUnknownCallee()) { 9755 if (!CanReachUnknownCallee) 9756 Change = ChangeStatus::CHANGED; 9757 CanReachUnknownCallee = true; 9758 return Change; 9759 } 9760 } 9761 9762 for (const Function *Fn : make_early_inc_range(Unreachable)) { 9763 if (checkIfReachable(A, AA, AAEdgesList, *Fn)) { 9764 Change = ChangeStatus::CHANGED; 9765 markReachable(*Fn); 9766 } 9767 } 9768 return Change; 9769 } 9770 9771 bool isReachable(Attributor &A, AAFunctionReachability &AA, 9772 ArrayRef<const AACallEdges *> AAEdgesList, 9773 const Function &Fn) { 9774 Optional<bool> Cached = isCachedReachable(Fn); 9775 if (Cached.hasValue()) 9776 return Cached.getValue(); 9777 9778 // The query was not cached, thus it is new. We need to request an update 9779 // explicitly to make sure this the information is properly run to a 9780 // fixpoint. 9781 A.registerForUpdate(AA); 9782 9783 // We need to assume that this function can't reach Fn to prevent 9784 // an infinite loop if this function is recursive. 9785 Unreachable.insert(&Fn); 9786 9787 bool Result = checkIfReachable(A, AA, AAEdgesList, Fn); 9788 if (Result) 9789 markReachable(Fn); 9790 return Result; 9791 } 9792 9793 bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA, 9794 ArrayRef<const AACallEdges *> AAEdgesList, 9795 const Function &Fn) const { 9796 9797 // Handle the most trivial case first. 9798 for (auto *AAEdges : AAEdgesList) { 9799 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9800 9801 if (Edges.count(const_cast<Function *>(&Fn))) 9802 return true; 9803 } 9804 9805 SmallVector<const AAFunctionReachability *, 8> Deps; 9806 for (auto &AAEdges : AAEdgesList) { 9807 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9808 9809 for (Function *Edge : Edges) { 9810 // We don't need a dependency if the result is reachable. 9811 const AAFunctionReachability &EdgeReachability = 9812 A.getAAFor<AAFunctionReachability>( 9813 AA, IRPosition::function(*Edge), DepClassTy::NONE); 9814 Deps.push_back(&EdgeReachability); 9815 9816 if (EdgeReachability.canReach(A, Fn)) 9817 return true; 9818 } 9819 } 9820 9821 // The result is false for now, set dependencies and leave. 9822 for (auto *Dep : Deps) 9823 A.recordDependence(*Dep, AA, DepClassTy::REQUIRED); 9824 9825 return false; 9826 } 9827 }; 9828 9829 /// Get call edges that can be reached by this instruction. 9830 bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability, 9831 const Instruction &Inst, 9832 SmallVector<const AACallEdges *> &Result) const { 9833 // Determine call like instructions that we can reach from the inst. 9834 auto CheckCallBase = [&](Instruction &CBInst) { 9835 if (!Reachability.isAssumedReachable(A, Inst, CBInst)) 9836 return true; 9837 9838 auto &CB = cast<CallBase>(CBInst); 9839 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9840 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9841 9842 Result.push_back(&AAEdges); 9843 return true; 9844 }; 9845 9846 bool UsedAssumedInformation = false; 9847 return A.checkForAllCallLikeInstructions(CheckCallBase, *this, 9848 UsedAssumedInformation, 9849 /* CheckBBLivenessOnly */ true); 9850 } 9851 9852 public: 9853 AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A) 9854 : AAFunctionReachability(IRP, A) {} 9855 9856 bool canReach(Attributor &A, const Function &Fn) const override { 9857 if (!isValidState()) 9858 return true; 9859 9860 const AACallEdges &AAEdges = 9861 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9862 9863 // Attributor returns attributes as const, so this function has to be 9864 // const for users of this attribute to use it without having to do 9865 // a const_cast. 9866 // This is a hack for us to be able to cache queries. 9867 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9868 bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis, 9869 {&AAEdges}, Fn); 9870 9871 return Result; 9872 } 9873 9874 /// Can \p CB reach \p Fn 9875 bool canReach(Attributor &A, CallBase &CB, 9876 const Function &Fn) const override { 9877 if (!isValidState()) 9878 return true; 9879 9880 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9881 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9882 9883 // Attributor returns attributes as const, so this function has to be 9884 // const for users of this attribute to use it without having to do 9885 // a const_cast. 9886 // This is a hack for us to be able to cache queries. 9887 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9888 QueryResolver &CBQuery = NonConstThis->CBQueries[&CB]; 9889 9890 bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn); 9891 9892 return Result; 9893 } 9894 9895 bool instructionCanReach(Attributor &A, const Instruction &Inst, 9896 const Function &Fn, 9897 bool UseBackwards) const override { 9898 if (!isValidState()) 9899 return true; 9900 9901 if (UseBackwards) 9902 return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr); 9903 9904 const auto &Reachability = A.getAAFor<AAReachability>( 9905 *this, IRPosition::function(*getAssociatedFunction()), 9906 DepClassTy::REQUIRED); 9907 9908 SmallVector<const AACallEdges *> CallEdges; 9909 bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges); 9910 // Attributor returns attributes as const, so this function has to be 9911 // const for users of this attribute to use it without having to do 9912 // a const_cast. 9913 // This is a hack for us to be able to cache queries. 9914 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9915 QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst]; 9916 if (!AllKnown) 9917 InstQSet.CanReachUnknownCallee = true; 9918 9919 return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn); 9920 } 9921 9922 /// See AbstractAttribute::updateImpl(...). 9923 ChangeStatus updateImpl(Attributor &A) override { 9924 const AACallEdges &AAEdges = 9925 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9926 ChangeStatus Change = ChangeStatus::UNCHANGED; 9927 9928 Change |= WholeFunction.update(A, *this, {&AAEdges}); 9929 9930 for (auto &CBPair : CBQueries) { 9931 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9932 *this, IRPosition::callsite_function(*CBPair.first), 9933 DepClassTy::REQUIRED); 9934 9935 Change |= CBPair.second.update(A, *this, {&AAEdges}); 9936 } 9937 9938 // Update the Instruction queries. 9939 if (!InstQueries.empty()) { 9940 const AAReachability *Reachability = &A.getAAFor<AAReachability>( 9941 *this, IRPosition::function(*getAssociatedFunction()), 9942 DepClassTy::REQUIRED); 9943 9944 // Check for local callbases first. 9945 for (auto &InstPair : InstQueries) { 9946 SmallVector<const AACallEdges *> CallEdges; 9947 bool AllKnown = 9948 getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges); 9949 // Update will return change if we this effects any queries. 9950 if (!AllKnown) 9951 InstPair.second.CanReachUnknownCallee = true; 9952 Change |= InstPair.second.update(A, *this, CallEdges); 9953 } 9954 } 9955 9956 return Change; 9957 } 9958 9959 const std::string getAsStr() const override { 9960 size_t QueryCount = 9961 WholeFunction.Reachable.size() + WholeFunction.Unreachable.size(); 9962 9963 return "FunctionReachability [" + 9964 std::to_string(WholeFunction.Reachable.size()) + "," + 9965 std::to_string(QueryCount) + "]"; 9966 } 9967 9968 void trackStatistics() const override {} 9969 9970 private: 9971 bool canReachUnknownCallee() const override { 9972 return WholeFunction.CanReachUnknownCallee; 9973 } 9974 9975 /// Used to answer if a the whole function can reacha a specific function. 9976 QueryResolver WholeFunction; 9977 9978 /// Used to answer if a call base inside this function can reach a specific 9979 /// function. 9980 DenseMap<const CallBase *, QueryResolver> CBQueries; 9981 9982 /// This is for instruction queries than scan "forward". 9983 DenseMap<const Instruction *, QueryResolver> InstQueries; 9984 }; 9985 } // namespace 9986 9987 /// ---------------------- Assumption Propagation ------------------------------ 9988 namespace { 9989 struct AAAssumptionInfoImpl : public AAAssumptionInfo { 9990 AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A, 9991 const DenseSet<StringRef> &Known) 9992 : AAAssumptionInfo(IRP, A, Known) {} 9993 9994 bool hasAssumption(const StringRef Assumption) const override { 9995 return isValidState() && setContains(Assumption); 9996 } 9997 9998 /// See AbstractAttribute::getAsStr() 9999 const std::string getAsStr() const override { 10000 const SetContents &Known = getKnown(); 10001 const SetContents &Assumed = getAssumed(); 10002 10003 const std::string KnownStr = 10004 llvm::join(Known.getSet().begin(), Known.getSet().end(), ","); 10005 const std::string AssumedStr = 10006 (Assumed.isUniversal()) 10007 ? "Universal" 10008 : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ","); 10009 10010 return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]"; 10011 } 10012 }; 10013 10014 /// Propagates assumption information from parent functions to all of their 10015 /// successors. An assumption can be propagated if the containing function 10016 /// dominates the called function. 10017 /// 10018 /// We start with a "known" set of assumptions already valid for the associated 10019 /// function and an "assumed" set that initially contains all possible 10020 /// assumptions. The assumed set is inter-procedurally updated by narrowing its 10021 /// contents as concrete values are known. The concrete values are seeded by the 10022 /// first nodes that are either entries into the call graph, or contains no 10023 /// assumptions. Each node is updated as the intersection of the assumed state 10024 /// with all of its predecessors. 10025 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl { 10026 AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A) 10027 : AAAssumptionInfoImpl(IRP, A, 10028 getAssumptions(*IRP.getAssociatedFunction())) {} 10029 10030 /// See AbstractAttribute::manifest(...). 10031 ChangeStatus manifest(Attributor &A) override { 10032 const auto &Assumptions = getKnown(); 10033 10034 // Don't manifest a universal set if it somehow made it here. 10035 if (Assumptions.isUniversal()) 10036 return ChangeStatus::UNCHANGED; 10037 10038 Function *AssociatedFunction = getAssociatedFunction(); 10039 10040 bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet()); 10041 10042 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 10043 } 10044 10045 /// See AbstractAttribute::updateImpl(...). 10046 ChangeStatus updateImpl(Attributor &A) override { 10047 bool Changed = false; 10048 10049 auto CallSitePred = [&](AbstractCallSite ACS) { 10050 const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>( 10051 *this, IRPosition::callsite_function(*ACS.getInstruction()), 10052 DepClassTy::REQUIRED); 10053 // Get the set of assumptions shared by all of this function's callers. 10054 Changed |= getIntersection(AssumptionAA.getAssumed()); 10055 return !getAssumed().empty() || !getKnown().empty(); 10056 }; 10057 10058 bool UsedAssumedInformation = false; 10059 // Get the intersection of all assumptions held by this node's predecessors. 10060 // If we don't know all the call sites then this is either an entry into the 10061 // call graph or an empty node. This node is known to only contain its own 10062 // assumptions and can be propagated to its successors. 10063 if (!A.checkForAllCallSites(CallSitePred, *this, true, 10064 UsedAssumedInformation)) 10065 return indicatePessimisticFixpoint(); 10066 10067 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 10068 } 10069 10070 void trackStatistics() const override {} 10071 }; 10072 10073 /// Assumption Info defined for call sites. 10074 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl { 10075 10076 AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A) 10077 : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {} 10078 10079 /// See AbstractAttribute::initialize(...). 10080 void initialize(Attributor &A) override { 10081 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 10082 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 10083 } 10084 10085 /// See AbstractAttribute::manifest(...). 10086 ChangeStatus manifest(Attributor &A) override { 10087 // Don't manifest a universal set if it somehow made it here. 10088 if (getKnown().isUniversal()) 10089 return ChangeStatus::UNCHANGED; 10090 10091 CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue()); 10092 bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet()); 10093 10094 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 10095 } 10096 10097 /// See AbstractAttribute::updateImpl(...). 10098 ChangeStatus updateImpl(Attributor &A) override { 10099 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 10100 auto &AssumptionAA = 10101 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 10102 bool Changed = getIntersection(AssumptionAA.getAssumed()); 10103 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 10104 } 10105 10106 /// See AbstractAttribute::trackStatistics() 10107 void trackStatistics() const override {} 10108 10109 private: 10110 /// Helper to initialized the known set as all the assumptions this call and 10111 /// the callee contain. 10112 DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) { 10113 const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue()); 10114 auto Assumptions = getAssumptions(CB); 10115 if (Function *F = IRP.getAssociatedFunction()) 10116 set_union(Assumptions, getAssumptions(*F)); 10117 if (Function *F = IRP.getAssociatedFunction()) 10118 set_union(Assumptions, getAssumptions(*F)); 10119 return Assumptions; 10120 } 10121 }; 10122 } // namespace 10123 10124 AACallGraphNode *AACallEdgeIterator::operator*() const { 10125 return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>( 10126 &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I)))); 10127 } 10128 10129 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); } 10130 10131 const char AAReturnedValues::ID = 0; 10132 const char AANoUnwind::ID = 0; 10133 const char AANoSync::ID = 0; 10134 const char AANoFree::ID = 0; 10135 const char AANonNull::ID = 0; 10136 const char AANoRecurse::ID = 0; 10137 const char AAWillReturn::ID = 0; 10138 const char AAUndefinedBehavior::ID = 0; 10139 const char AANoAlias::ID = 0; 10140 const char AAReachability::ID = 0; 10141 const char AANoReturn::ID = 0; 10142 const char AAIsDead::ID = 0; 10143 const char AADereferenceable::ID = 0; 10144 const char AAAlign::ID = 0; 10145 const char AANoCapture::ID = 0; 10146 const char AAValueSimplify::ID = 0; 10147 const char AAHeapToStack::ID = 0; 10148 const char AAPrivatizablePtr::ID = 0; 10149 const char AAMemoryBehavior::ID = 0; 10150 const char AAMemoryLocation::ID = 0; 10151 const char AAValueConstantRange::ID = 0; 10152 const char AAPotentialValues::ID = 0; 10153 const char AANoUndef::ID = 0; 10154 const char AACallEdges::ID = 0; 10155 const char AAFunctionReachability::ID = 0; 10156 const char AAPointerInfo::ID = 0; 10157 const char AAAssumptionInfo::ID = 0; 10158 10159 // Macro magic to create the static generator function for attributes that 10160 // follow the naming scheme. 10161 10162 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ 10163 case IRPosition::PK: \ 10164 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); 10165 10166 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ 10167 case IRPosition::PK: \ 10168 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ 10169 ++NumAAs; \ 10170 break; 10171 10172 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10173 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10174 CLASS *AA = nullptr; \ 10175 switch (IRP.getPositionKind()) { \ 10176 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10177 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 10178 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 10179 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10180 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 10181 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 10182 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10183 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10184 } \ 10185 return *AA; \ 10186 } 10187 10188 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10189 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10190 CLASS *AA = nullptr; \ 10191 switch (IRP.getPositionKind()) { \ 10192 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10193 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ 10194 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 10195 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10196 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10197 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 10198 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10199 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10200 } \ 10201 return *AA; \ 10202 } 10203 10204 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10205 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10206 CLASS *AA = nullptr; \ 10207 switch (IRP.getPositionKind()) { \ 10208 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10209 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10210 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10211 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10212 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10213 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 10214 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10215 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10216 } \ 10217 return *AA; \ 10218 } 10219 10220 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10221 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10222 CLASS *AA = nullptr; \ 10223 switch (IRP.getPositionKind()) { \ 10224 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10225 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 10226 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 10227 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10228 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 10229 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 10230 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 10231 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10232 } \ 10233 return *AA; \ 10234 } 10235 10236 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10237 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10238 CLASS *AA = nullptr; \ 10239 switch (IRP.getPositionKind()) { \ 10240 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10241 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10242 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10243 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10244 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10245 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10246 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10247 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10248 } \ 10249 return *AA; \ 10250 } 10251 10252 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) 10253 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) 10254 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) 10255 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) 10256 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) 10257 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) 10258 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) 10259 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges) 10260 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo) 10261 10262 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) 10263 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) 10264 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) 10265 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) 10266 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) 10267 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) 10268 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) 10269 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues) 10270 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef) 10271 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo) 10272 10273 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) 10274 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) 10275 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) 10276 10277 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) 10278 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability) 10279 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) 10280 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability) 10281 10282 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) 10283 10284 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION 10285 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION 10286 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION 10287 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION 10288 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION 10289 #undef SWITCH_PK_CREATE 10290 #undef SWITCH_PK_INV 10291