1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // See the Attributor.h file comment and the class descriptions in that file for 10 // more information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/ADT/SetVector.h" 15 #include "llvm/Transforms/IPO/Attributor.h" 16 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/MapVector.h" 19 #include "llvm/ADT/SCCIterator.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetOperations.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/Analysis/AssumeBundleQueries.h" 26 #include "llvm/Analysis/AssumptionCache.h" 27 #include "llvm/Analysis/CaptureTracking.h" 28 #include "llvm/Analysis/InstructionSimplify.h" 29 #include "llvm/Analysis/LazyValueInfo.h" 30 #include "llvm/Analysis/MemoryBuiltins.h" 31 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 32 #include "llvm/Analysis/ScalarEvolution.h" 33 #include "llvm/Analysis/TargetTransformInfo.h" 34 #include "llvm/Analysis/ValueTracking.h" 35 #include "llvm/IR/Assumptions.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/IRBuilder.h" 39 #include "llvm/IR/Instruction.h" 40 #include "llvm/IR/Instructions.h" 41 #include "llvm/IR/IntrinsicInst.h" 42 #include "llvm/IR/NoFolder.h" 43 #include "llvm/IR/Value.h" 44 #include "llvm/Support/Alignment.h" 45 #include "llvm/Support/Casting.h" 46 #include "llvm/Support/CommandLine.h" 47 #include "llvm/Support/ErrorHandling.h" 48 #include "llvm/Support/GraphWriter.h" 49 #include "llvm/Support/MathExtras.h" 50 #include "llvm/Support/raw_ostream.h" 51 #include "llvm/Transforms/IPO/ArgumentPromotion.h" 52 #include "llvm/Transforms/Utils/Local.h" 53 #include <cassert> 54 55 using namespace llvm; 56 57 #define DEBUG_TYPE "attributor" 58 59 static cl::opt<bool> ManifestInternal( 60 "attributor-manifest-internal", cl::Hidden, 61 cl::desc("Manifest Attributor internal string attributes."), 62 cl::init(false)); 63 64 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), 65 cl::Hidden); 66 67 template <> 68 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0; 69 70 static cl::opt<unsigned, true> MaxPotentialValues( 71 "attributor-max-potential-values", cl::Hidden, 72 cl::desc("Maximum number of potential values to be " 73 "tracked for each position."), 74 cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues), 75 cl::init(7)); 76 77 static cl::opt<unsigned> MaxInterferingAccesses( 78 "attributor-max-interfering-accesses", cl::Hidden, 79 cl::desc("Maximum number of interfering accesses to " 80 "check before assuming all might interfere."), 81 cl::init(6)); 82 83 STATISTIC(NumAAs, "Number of abstract attributes created"); 84 85 // Some helper macros to deal with statistics tracking. 86 // 87 // Usage: 88 // For simple IR attribute tracking overload trackStatistics in the abstract 89 // attribute and choose the right STATS_DECLTRACK_********* macro, 90 // e.g.,: 91 // void trackStatistics() const override { 92 // STATS_DECLTRACK_ARG_ATTR(returned) 93 // } 94 // If there is a single "increment" side one can use the macro 95 // STATS_DECLTRACK with a custom message. If there are multiple increment 96 // sides, STATS_DECL and STATS_TRACK can also be used separately. 97 // 98 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ 99 ("Number of " #TYPE " marked '" #NAME "'") 100 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME 101 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); 102 #define STATS_DECL(NAME, TYPE, MSG) \ 103 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); 104 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); 105 #define STATS_DECLTRACK(NAME, TYPE, MSG) \ 106 { \ 107 STATS_DECL(NAME, TYPE, MSG) \ 108 STATS_TRACK(NAME, TYPE) \ 109 } 110 #define STATS_DECLTRACK_ARG_ATTR(NAME) \ 111 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) 112 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ 113 STATS_DECLTRACK(NAME, CSArguments, \ 114 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) 115 #define STATS_DECLTRACK_FN_ATTR(NAME) \ 116 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) 117 #define STATS_DECLTRACK_CS_ATTR(NAME) \ 118 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) 119 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ 120 STATS_DECLTRACK(NAME, FunctionReturn, \ 121 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) 122 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ 123 STATS_DECLTRACK(NAME, CSReturn, \ 124 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) 125 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ 126 STATS_DECLTRACK(NAME, Floating, \ 127 ("Number of floating values known to be '" #NAME "'")) 128 129 // Specialization of the operator<< for abstract attributes subclasses. This 130 // disambiguates situations where multiple operators are applicable. 131 namespace llvm { 132 #define PIPE_OPERATOR(CLASS) \ 133 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ 134 return OS << static_cast<const AbstractAttribute &>(AA); \ 135 } 136 137 PIPE_OPERATOR(AAIsDead) 138 PIPE_OPERATOR(AANoUnwind) 139 PIPE_OPERATOR(AANoSync) 140 PIPE_OPERATOR(AANoRecurse) 141 PIPE_OPERATOR(AAWillReturn) 142 PIPE_OPERATOR(AANoReturn) 143 PIPE_OPERATOR(AAReturnedValues) 144 PIPE_OPERATOR(AANonNull) 145 PIPE_OPERATOR(AANoAlias) 146 PIPE_OPERATOR(AADereferenceable) 147 PIPE_OPERATOR(AAAlign) 148 PIPE_OPERATOR(AANoCapture) 149 PIPE_OPERATOR(AAValueSimplify) 150 PIPE_OPERATOR(AANoFree) 151 PIPE_OPERATOR(AAHeapToStack) 152 PIPE_OPERATOR(AAReachability) 153 PIPE_OPERATOR(AAMemoryBehavior) 154 PIPE_OPERATOR(AAMemoryLocation) 155 PIPE_OPERATOR(AAValueConstantRange) 156 PIPE_OPERATOR(AAPrivatizablePtr) 157 PIPE_OPERATOR(AAUndefinedBehavior) 158 PIPE_OPERATOR(AAPotentialValues) 159 PIPE_OPERATOR(AANoUndef) 160 PIPE_OPERATOR(AACallEdges) 161 PIPE_OPERATOR(AAFunctionReachability) 162 PIPE_OPERATOR(AAPointerInfo) 163 PIPE_OPERATOR(AAAssumptionInfo) 164 165 #undef PIPE_OPERATOR 166 167 template <> 168 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, 169 const DerefState &R) { 170 ChangeStatus CS0 = 171 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); 172 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); 173 return CS0 | CS1; 174 } 175 176 } // namespace llvm 177 178 /// Get pointer operand of memory accessing instruction. If \p I is 179 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, 180 /// is set to false and the instruction is volatile, return nullptr. 181 static const Value *getPointerOperand(const Instruction *I, 182 bool AllowVolatile) { 183 if (!AllowVolatile && I->isVolatile()) 184 return nullptr; 185 186 if (auto *LI = dyn_cast<LoadInst>(I)) { 187 return LI->getPointerOperand(); 188 } 189 190 if (auto *SI = dyn_cast<StoreInst>(I)) { 191 return SI->getPointerOperand(); 192 } 193 194 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { 195 return CXI->getPointerOperand(); 196 } 197 198 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { 199 return RMWI->getPointerOperand(); 200 } 201 202 return nullptr; 203 } 204 205 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and 206 /// advanced by \p Offset bytes. To aid later analysis the method tries to build 207 /// getelement pointer instructions that traverse the natural type of \p Ptr if 208 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence 209 /// through a cast to i8*. 210 /// 211 /// TODO: This could probably live somewhere more prominantly if it doesn't 212 /// already exist. 213 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr, 214 int64_t Offset, IRBuilder<NoFolder> &IRB, 215 const DataLayout &DL) { 216 assert(Offset >= 0 && "Negative offset not supported yet!"); 217 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset 218 << "-bytes as " << *ResTy << "\n"); 219 220 if (Offset) { 221 Type *Ty = PtrElemTy; 222 APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset); 223 SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset); 224 225 SmallVector<Value *, 4> ValIndices; 226 std::string GEPName = Ptr->getName().str(); 227 for (const APInt &Index : IntIndices) { 228 ValIndices.push_back(IRB.getInt(Index)); 229 GEPName += "." + std::to_string(Index.getZExtValue()); 230 } 231 232 // Create a GEP for the indices collected above. 233 Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName); 234 235 // If an offset is left we use byte-wise adjustment. 236 if (IntOffset != 0) { 237 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); 238 Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset), 239 GEPName + ".b" + Twine(IntOffset.getZExtValue())); 240 } 241 } 242 243 // Ensure the result has the requested type. 244 Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy, 245 Ptr->getName() + ".cast"); 246 247 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n"); 248 return Ptr; 249 } 250 251 /// Recursively visit all values that might become \p IRP at some point. This 252 /// will be done by looking through cast instructions, selects, phis, and calls 253 /// with the "returned" attribute. Once we cannot look through the value any 254 /// further, the callback \p VisitValueCB is invoked and passed the current 255 /// value, the \p State, and a flag to indicate if we stripped anything. 256 /// Stripped means that we unpacked the value associated with \p IRP at least 257 /// once. Note that the value used for the callback may still be the value 258 /// associated with \p IRP (due to PHIs). To limit how much effort is invested, 259 /// we will never visit more values than specified by \p MaxValues. 260 /// If \p Intraprocedural is set to true only values valid in the scope of 261 /// \p CtxI will be visited and simplification into other scopes is prevented. 262 template <typename StateTy> 263 static bool genericValueTraversal( 264 Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA, 265 StateTy &State, 266 function_ref<bool(Value &, const Instruction *, StateTy &, bool)> 267 VisitValueCB, 268 const Instruction *CtxI, bool &UsedAssumedInformation, 269 bool UseValueSimplify = true, int MaxValues = 16, 270 function_ref<Value *(Value *)> StripCB = nullptr, 271 bool Intraprocedural = false) { 272 273 struct LivenessInfo { 274 const AAIsDead *LivenessAA = nullptr; 275 bool AnyDead = false; 276 }; 277 SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs; 278 auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & { 279 LivenessInfo &LI = LivenessAAs[&F]; 280 if (!LI.LivenessAA) 281 LI.LivenessAA = &A.getAAFor<AAIsDead>(QueryingAA, IRPosition::function(F), 282 DepClassTy::NONE); 283 return LI; 284 }; 285 286 Value *InitialV = &IRP.getAssociatedValue(); 287 using Item = std::pair<Value *, const Instruction *>; 288 SmallSet<Item, 16> Visited; 289 SmallVector<Item, 16> Worklist; 290 Worklist.push_back({InitialV, CtxI}); 291 292 int Iteration = 0; 293 do { 294 Item I = Worklist.pop_back_val(); 295 Value *V = I.first; 296 CtxI = I.second; 297 if (StripCB) 298 V = StripCB(V); 299 300 // Check if we should process the current value. To prevent endless 301 // recursion keep a record of the values we followed! 302 if (!Visited.insert(I).second) 303 continue; 304 305 // Make sure we limit the compile time for complex expressions. 306 if (Iteration++ >= MaxValues) { 307 LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: " 308 << Iteration << "!\n"); 309 return false; 310 } 311 312 // Explicitly look through calls with a "returned" attribute if we do 313 // not have a pointer as stripPointerCasts only works on them. 314 Value *NewV = nullptr; 315 if (V->getType()->isPointerTy()) { 316 NewV = V->stripPointerCasts(); 317 } else { 318 auto *CB = dyn_cast<CallBase>(V); 319 if (CB && CB->getCalledFunction()) { 320 for (Argument &Arg : CB->getCalledFunction()->args()) 321 if (Arg.hasReturnedAttr()) { 322 NewV = CB->getArgOperand(Arg.getArgNo()); 323 break; 324 } 325 } 326 } 327 if (NewV && NewV != V) { 328 Worklist.push_back({NewV, CtxI}); 329 continue; 330 } 331 332 // Look through select instructions, visit assumed potential values. 333 if (auto *SI = dyn_cast<SelectInst>(V)) { 334 Optional<Constant *> C = A.getAssumedConstant( 335 *SI->getCondition(), QueryingAA, UsedAssumedInformation); 336 bool NoValueYet = !C.hasValue(); 337 if (NoValueYet || isa_and_nonnull<UndefValue>(*C)) 338 continue; 339 if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) { 340 if (CI->isZero()) 341 Worklist.push_back({SI->getFalseValue(), CtxI}); 342 else 343 Worklist.push_back({SI->getTrueValue(), CtxI}); 344 continue; 345 } 346 // We could not simplify the condition, assume both values.( 347 Worklist.push_back({SI->getTrueValue(), CtxI}); 348 Worklist.push_back({SI->getFalseValue(), CtxI}); 349 continue; 350 } 351 352 // Look through phi nodes, visit all live operands. 353 if (auto *PHI = dyn_cast<PHINode>(V)) { 354 LivenessInfo &LI = GetLivenessInfo(*PHI->getFunction()); 355 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 356 BasicBlock *IncomingBB = PHI->getIncomingBlock(u); 357 if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI->getParent())) { 358 LI.AnyDead = true; 359 UsedAssumedInformation |= !LI.LivenessAA->isAtFixpoint(); 360 continue; 361 } 362 Worklist.push_back( 363 {PHI->getIncomingValue(u), IncomingBB->getTerminator()}); 364 } 365 continue; 366 } 367 368 if (auto *Arg = dyn_cast<Argument>(V)) { 369 if (!Intraprocedural && !Arg->hasPassPointeeByValueCopyAttr()) { 370 SmallVector<Item> CallSiteValues; 371 bool UsedAssumedInformation = false; 372 if (A.checkForAllCallSites( 373 [&](AbstractCallSite ACS) { 374 // Callbacks might not have a corresponding call site operand, 375 // stick with the argument in that case. 376 Value *CSOp = ACS.getCallArgOperand(*Arg); 377 if (!CSOp) 378 return false; 379 CallSiteValues.push_back({CSOp, ACS.getInstruction()}); 380 return true; 381 }, 382 *Arg->getParent(), true, &QueryingAA, UsedAssumedInformation)) { 383 Worklist.append(CallSiteValues); 384 continue; 385 } 386 } 387 } 388 389 if (UseValueSimplify && !isa<Constant>(V)) { 390 Optional<Value *> SimpleV = 391 A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation); 392 if (!SimpleV.hasValue()) 393 continue; 394 Value *NewV = SimpleV.getValue(); 395 if (NewV && NewV != V) { 396 if (!Intraprocedural || !CtxI || 397 AA::isValidInScope(*NewV, CtxI->getFunction())) { 398 Worklist.push_back({NewV, CtxI}); 399 continue; 400 } 401 } 402 } 403 404 if (auto *LI = dyn_cast<LoadInst>(V)) { 405 bool UsedAssumedInformation = false; 406 // If we ask for the potentially loaded values from the initial pointer we 407 // will simply end up here again. The load is as far as we can make it. 408 if (LI->getPointerOperand() != InitialV) { 409 SmallSetVector<Value *, 4> PotentialCopies; 410 if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies, QueryingAA, 411 UsedAssumedInformation, 412 /* OnlyExact */ true)) { 413 // Values have to be dynamically unique or we loose the fact that a 414 // single llvm::Value might represent two runtime values (e.g., stack 415 // locations in different recursive calls). 416 bool DynamicallyUnique = 417 llvm::all_of(PotentialCopies, [&A, &QueryingAA](Value *PC) { 418 return AA::isDynamicallyUnique(A, QueryingAA, *PC); 419 }); 420 if (DynamicallyUnique && 421 (!Intraprocedural || !CtxI || 422 llvm::all_of(PotentialCopies, [CtxI](Value *PC) { 423 return AA::isValidInScope(*PC, CtxI->getFunction()); 424 }))) { 425 for (auto *PotentialCopy : PotentialCopies) 426 Worklist.push_back({PotentialCopy, CtxI}); 427 continue; 428 } 429 } 430 } 431 } 432 433 // Once a leaf is reached we inform the user through the callback. 434 if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) { 435 LLVM_DEBUG(dbgs() << "Generic value traversal visit callback failed for: " 436 << *V << "!\n"); 437 return false; 438 } 439 } while (!Worklist.empty()); 440 441 // If we actually used liveness information so we have to record a dependence. 442 for (auto &It : LivenessAAs) 443 if (It.second.AnyDead) 444 A.recordDependence(*It.second.LivenessAA, QueryingAA, 445 DepClassTy::OPTIONAL); 446 447 // All values have been visited. 448 return true; 449 } 450 451 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr, 452 SmallVectorImpl<Value *> &Objects, 453 const AbstractAttribute &QueryingAA, 454 const Instruction *CtxI, 455 bool &UsedAssumedInformation, 456 bool Intraprocedural) { 457 auto StripCB = [&](Value *V) { return getUnderlyingObject(V); }; 458 SmallPtrSet<Value *, 8> SeenObjects; 459 auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *, 460 SmallVectorImpl<Value *> &Objects, 461 bool) -> bool { 462 if (SeenObjects.insert(&Val).second) 463 Objects.push_back(&Val); 464 return true; 465 }; 466 if (!genericValueTraversal<decltype(Objects)>( 467 A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI, 468 UsedAssumedInformation, true, 32, StripCB, Intraprocedural)) 469 return false; 470 return true; 471 } 472 473 static const Value * 474 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA, 475 const Value *Val, const DataLayout &DL, APInt &Offset, 476 bool GetMinOffset, bool AllowNonInbounds, 477 bool UseAssumed = false) { 478 479 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { 480 const IRPosition &Pos = IRPosition::value(V); 481 // Only track dependence if we are going to use the assumed info. 482 const AAValueConstantRange &ValueConstantRangeAA = 483 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, 484 UseAssumed ? DepClassTy::OPTIONAL 485 : DepClassTy::NONE); 486 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() 487 : ValueConstantRangeAA.getKnown(); 488 if (Range.isFullSet()) 489 return false; 490 491 // We can only use the lower part of the range because the upper part can 492 // be higher than what the value can really be. 493 if (GetMinOffset) 494 ROffset = Range.getSignedMin(); 495 else 496 ROffset = Range.getSignedMax(); 497 return true; 498 }; 499 500 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, 501 /* AllowInvariant */ true, 502 AttributorAnalysis); 503 } 504 505 static const Value * 506 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA, 507 const Value *Ptr, int64_t &BytesOffset, 508 const DataLayout &DL, bool AllowNonInbounds = false) { 509 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 510 const Value *Base = 511 stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt, 512 /* GetMinOffset */ true, AllowNonInbounds); 513 514 BytesOffset = OffsetAPInt.getSExtValue(); 515 return Base; 516 } 517 518 /// Clamp the information known for all returned values of a function 519 /// (identified by \p QueryingAA) into \p S. 520 template <typename AAType, typename StateType = typename AAType::StateType> 521 static void clampReturnedValueStates( 522 Attributor &A, const AAType &QueryingAA, StateType &S, 523 const IRPosition::CallBaseContext *CBContext = nullptr) { 524 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " 525 << QueryingAA << " into " << S << "\n"); 526 527 assert((QueryingAA.getIRPosition().getPositionKind() == 528 IRPosition::IRP_RETURNED || 529 QueryingAA.getIRPosition().getPositionKind() == 530 IRPosition::IRP_CALL_SITE_RETURNED) && 531 "Can only clamp returned value states for a function returned or call " 532 "site returned position!"); 533 534 // Use an optional state as there might not be any return values and we want 535 // to join (IntegerState::operator&) the state of all there are. 536 Optional<StateType> T; 537 538 // Callback for each possibly returned value. 539 auto CheckReturnValue = [&](Value &RV) -> bool { 540 const IRPosition &RVPos = IRPosition::value(RV, CBContext); 541 const AAType &AA = 542 A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED); 543 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() 544 << " @ " << RVPos << "\n"); 545 const StateType &AAS = AA.getState(); 546 if (T.hasValue()) 547 *T &= AAS; 548 else 549 T = AAS; 550 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T 551 << "\n"); 552 return T->isValidState(); 553 }; 554 555 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) 556 S.indicatePessimisticFixpoint(); 557 else if (T.hasValue()) 558 S ^= *T; 559 } 560 561 namespace { 562 /// Helper class for generic deduction: return value -> returned position. 563 template <typename AAType, typename BaseType, 564 typename StateType = typename BaseType::StateType, 565 bool PropagateCallBaseContext = false> 566 struct AAReturnedFromReturnedValues : public BaseType { 567 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) 568 : BaseType(IRP, A) {} 569 570 /// See AbstractAttribute::updateImpl(...). 571 ChangeStatus updateImpl(Attributor &A) override { 572 StateType S(StateType::getBestState(this->getState())); 573 clampReturnedValueStates<AAType, StateType>( 574 A, *this, S, 575 PropagateCallBaseContext ? this->getCallBaseContext() : nullptr); 576 // TODO: If we know we visited all returned values, thus no are assumed 577 // dead, we can take the known information from the state T. 578 return clampStateAndIndicateChange<StateType>(this->getState(), S); 579 } 580 }; 581 582 /// Clamp the information known at all call sites for a given argument 583 /// (identified by \p QueryingAA) into \p S. 584 template <typename AAType, typename StateType = typename AAType::StateType> 585 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, 586 StateType &S) { 587 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " 588 << QueryingAA << " into " << S << "\n"); 589 590 assert(QueryingAA.getIRPosition().getPositionKind() == 591 IRPosition::IRP_ARGUMENT && 592 "Can only clamp call site argument states for an argument position!"); 593 594 // Use an optional state as there might not be any return values and we want 595 // to join (IntegerState::operator&) the state of all there are. 596 Optional<StateType> T; 597 598 // The argument number which is also the call site argument number. 599 unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo(); 600 601 auto CallSiteCheck = [&](AbstractCallSite ACS) { 602 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 603 // Check if a coresponding argument was found or if it is on not associated 604 // (which can happen for callback calls). 605 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 606 return false; 607 608 const AAType &AA = 609 A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED); 610 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() 611 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n"); 612 const StateType &AAS = AA.getState(); 613 if (T.hasValue()) 614 *T &= AAS; 615 else 616 T = AAS; 617 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T 618 << "\n"); 619 return T->isValidState(); 620 }; 621 622 bool UsedAssumedInformation = false; 623 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, 624 UsedAssumedInformation)) 625 S.indicatePessimisticFixpoint(); 626 else if (T.hasValue()) 627 S ^= *T; 628 } 629 630 /// This function is the bridge between argument position and the call base 631 /// context. 632 template <typename AAType, typename BaseType, 633 typename StateType = typename AAType::StateType> 634 bool getArgumentStateFromCallBaseContext(Attributor &A, 635 BaseType &QueryingAttribute, 636 IRPosition &Pos, StateType &State) { 637 assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && 638 "Expected an 'argument' position !"); 639 const CallBase *CBContext = Pos.getCallBaseContext(); 640 if (!CBContext) 641 return false; 642 643 int ArgNo = Pos.getCallSiteArgNo(); 644 assert(ArgNo >= 0 && "Invalid Arg No!"); 645 646 const auto &AA = A.getAAFor<AAType>( 647 QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo), 648 DepClassTy::REQUIRED); 649 const StateType &CBArgumentState = 650 static_cast<const StateType &>(AA.getState()); 651 652 LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument" 653 << "Position:" << Pos << "CB Arg state:" << CBArgumentState 654 << "\n"); 655 656 // NOTE: If we want to do call site grouping it should happen here. 657 State ^= CBArgumentState; 658 return true; 659 } 660 661 /// Helper class for generic deduction: call site argument -> argument position. 662 template <typename AAType, typename BaseType, 663 typename StateType = typename AAType::StateType, 664 bool BridgeCallBaseContext = false> 665 struct AAArgumentFromCallSiteArguments : public BaseType { 666 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) 667 : BaseType(IRP, A) {} 668 669 /// See AbstractAttribute::updateImpl(...). 670 ChangeStatus updateImpl(Attributor &A) override { 671 StateType S = StateType::getBestState(this->getState()); 672 673 if (BridgeCallBaseContext) { 674 bool Success = 675 getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>( 676 A, *this, this->getIRPosition(), S); 677 if (Success) 678 return clampStateAndIndicateChange<StateType>(this->getState(), S); 679 } 680 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); 681 682 // TODO: If we know we visited all incoming values, thus no are assumed 683 // dead, we can take the known information from the state T. 684 return clampStateAndIndicateChange<StateType>(this->getState(), S); 685 } 686 }; 687 688 /// Helper class for generic replication: function returned -> cs returned. 689 template <typename AAType, typename BaseType, 690 typename StateType = typename BaseType::StateType, 691 bool IntroduceCallBaseContext = false> 692 struct AACallSiteReturnedFromReturned : public BaseType { 693 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) 694 : BaseType(IRP, A) {} 695 696 /// See AbstractAttribute::updateImpl(...). 697 ChangeStatus updateImpl(Attributor &A) override { 698 assert(this->getIRPosition().getPositionKind() == 699 IRPosition::IRP_CALL_SITE_RETURNED && 700 "Can only wrap function returned positions for call site returned " 701 "positions!"); 702 auto &S = this->getState(); 703 704 const Function *AssociatedFunction = 705 this->getIRPosition().getAssociatedFunction(); 706 if (!AssociatedFunction) 707 return S.indicatePessimisticFixpoint(); 708 709 CallBase &CBContext = cast<CallBase>(this->getAnchorValue()); 710 if (IntroduceCallBaseContext) 711 LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:" 712 << CBContext << "\n"); 713 714 IRPosition FnPos = IRPosition::returned( 715 *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr); 716 const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED); 717 return clampStateAndIndicateChange(S, AA.getState()); 718 } 719 }; 720 721 /// Helper function to accumulate uses. 722 template <class AAType, typename StateType = typename AAType::StateType> 723 static void followUsesInContext(AAType &AA, Attributor &A, 724 MustBeExecutedContextExplorer &Explorer, 725 const Instruction *CtxI, 726 SetVector<const Use *> &Uses, 727 StateType &State) { 728 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); 729 for (unsigned u = 0; u < Uses.size(); ++u) { 730 const Use *U = Uses[u]; 731 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { 732 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); 733 if (Found && AA.followUseInMBEC(A, U, UserI, State)) 734 for (const Use &Us : UserI->uses()) 735 Uses.insert(&Us); 736 } 737 } 738 } 739 740 /// Use the must-be-executed-context around \p I to add information into \p S. 741 /// The AAType class is required to have `followUseInMBEC` method with the 742 /// following signature and behaviour: 743 /// 744 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) 745 /// U - Underlying use. 746 /// I - The user of the \p U. 747 /// Returns true if the value should be tracked transitively. 748 /// 749 template <class AAType, typename StateType = typename AAType::StateType> 750 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, 751 Instruction &CtxI) { 752 753 // Container for (transitive) uses of the associated value. 754 SetVector<const Use *> Uses; 755 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) 756 Uses.insert(&U); 757 758 MustBeExecutedContextExplorer &Explorer = 759 A.getInfoCache().getMustBeExecutedContextExplorer(); 760 761 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); 762 763 if (S.isAtFixpoint()) 764 return; 765 766 SmallVector<const BranchInst *, 4> BrInsts; 767 auto Pred = [&](const Instruction *I) { 768 if (const BranchInst *Br = dyn_cast<BranchInst>(I)) 769 if (Br->isConditional()) 770 BrInsts.push_back(Br); 771 return true; 772 }; 773 774 // Here, accumulate conditional branch instructions in the context. We 775 // explore the child paths and collect the known states. The disjunction of 776 // those states can be merged to its own state. Let ParentState_i be a state 777 // to indicate the known information for an i-th branch instruction in the 778 // context. ChildStates are created for its successors respectively. 779 // 780 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} 781 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} 782 // ... 783 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} 784 // 785 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m 786 // 787 // FIXME: Currently, recursive branches are not handled. For example, we 788 // can't deduce that ptr must be dereferenced in below function. 789 // 790 // void f(int a, int c, int *ptr) { 791 // if(a) 792 // if (b) { 793 // *ptr = 0; 794 // } else { 795 // *ptr = 1; 796 // } 797 // else { 798 // if (b) { 799 // *ptr = 0; 800 // } else { 801 // *ptr = 1; 802 // } 803 // } 804 // } 805 806 Explorer.checkForAllContext(&CtxI, Pred); 807 for (const BranchInst *Br : BrInsts) { 808 StateType ParentState; 809 810 // The known state of the parent state is a conjunction of children's 811 // known states so it is initialized with a best state. 812 ParentState.indicateOptimisticFixpoint(); 813 814 for (const BasicBlock *BB : Br->successors()) { 815 StateType ChildState; 816 817 size_t BeforeSize = Uses.size(); 818 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); 819 820 // Erase uses which only appear in the child. 821 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) 822 It = Uses.erase(It); 823 824 ParentState &= ChildState; 825 } 826 827 // Use only known state. 828 S += ParentState; 829 } 830 } 831 } // namespace 832 833 /// ------------------------ PointerInfo --------------------------------------- 834 835 namespace llvm { 836 namespace AA { 837 namespace PointerInfo { 838 839 struct State; 840 841 } // namespace PointerInfo 842 } // namespace AA 843 844 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage. 845 template <> 846 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> { 847 using Access = AAPointerInfo::Access; 848 static inline Access getEmptyKey(); 849 static inline Access getTombstoneKey(); 850 static unsigned getHashValue(const Access &A); 851 static bool isEqual(const Access &LHS, const Access &RHS); 852 }; 853 854 /// Helper that allows OffsetAndSize as a key in a DenseMap. 855 template <> 856 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize> 857 : DenseMapInfo<std::pair<int64_t, int64_t>> {}; 858 859 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign 860 /// but the instruction 861 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> { 862 using Base = DenseMapInfo<Instruction *>; 863 using Access = AAPointerInfo::Access; 864 static inline Access getEmptyKey(); 865 static inline Access getTombstoneKey(); 866 static unsigned getHashValue(const Access &A); 867 static bool isEqual(const Access &LHS, const Access &RHS); 868 }; 869 870 } // namespace llvm 871 872 /// A type to track pointer/struct usage and accesses for AAPointerInfo. 873 struct AA::PointerInfo::State : public AbstractState { 874 875 ~State() { 876 // We do not delete the Accesses objects but need to destroy them still. 877 for (auto &It : AccessBins) 878 It.second->~Accesses(); 879 } 880 881 /// Return the best possible representable state. 882 static State getBestState(const State &SIS) { return State(); } 883 884 /// Return the worst possible representable state. 885 static State getWorstState(const State &SIS) { 886 State R; 887 R.indicatePessimisticFixpoint(); 888 return R; 889 } 890 891 State() = default; 892 State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) { 893 SIS.AccessBins.clear(); 894 } 895 896 const State &getAssumed() const { return *this; } 897 898 /// See AbstractState::isValidState(). 899 bool isValidState() const override { return BS.isValidState(); } 900 901 /// See AbstractState::isAtFixpoint(). 902 bool isAtFixpoint() const override { return BS.isAtFixpoint(); } 903 904 /// See AbstractState::indicateOptimisticFixpoint(). 905 ChangeStatus indicateOptimisticFixpoint() override { 906 BS.indicateOptimisticFixpoint(); 907 return ChangeStatus::UNCHANGED; 908 } 909 910 /// See AbstractState::indicatePessimisticFixpoint(). 911 ChangeStatus indicatePessimisticFixpoint() override { 912 BS.indicatePessimisticFixpoint(); 913 return ChangeStatus::CHANGED; 914 } 915 916 State &operator=(const State &R) { 917 if (this == &R) 918 return *this; 919 BS = R.BS; 920 AccessBins = R.AccessBins; 921 return *this; 922 } 923 924 State &operator=(State &&R) { 925 if (this == &R) 926 return *this; 927 std::swap(BS, R.BS); 928 std::swap(AccessBins, R.AccessBins); 929 return *this; 930 } 931 932 bool operator==(const State &R) const { 933 if (BS != R.BS) 934 return false; 935 if (AccessBins.size() != R.AccessBins.size()) 936 return false; 937 auto It = begin(), RIt = R.begin(), E = end(); 938 while (It != E) { 939 if (It->getFirst() != RIt->getFirst()) 940 return false; 941 auto &Accs = It->getSecond(); 942 auto &RAccs = RIt->getSecond(); 943 if (Accs->size() != RAccs->size()) 944 return false; 945 for (const auto &ZipIt : llvm::zip(*Accs, *RAccs)) 946 if (std::get<0>(ZipIt) != std::get<1>(ZipIt)) 947 return false; 948 ++It; 949 ++RIt; 950 } 951 return true; 952 } 953 bool operator!=(const State &R) const { return !(*this == R); } 954 955 /// We store accesses in a set with the instruction as key. 956 struct Accesses { 957 SmallVector<AAPointerInfo::Access, 4> Accesses; 958 DenseMap<const Instruction *, unsigned> Map; 959 960 unsigned size() const { return Accesses.size(); } 961 962 using vec_iterator = decltype(Accesses)::iterator; 963 vec_iterator begin() { return Accesses.begin(); } 964 vec_iterator end() { return Accesses.end(); } 965 966 using iterator = decltype(Map)::const_iterator; 967 iterator find(AAPointerInfo::Access &Acc) { 968 return Map.find(Acc.getRemoteInst()); 969 } 970 iterator find_end() { return Map.end(); } 971 972 AAPointerInfo::Access &get(iterator &It) { 973 return Accesses[It->getSecond()]; 974 } 975 976 void insert(AAPointerInfo::Access &Acc) { 977 Map[Acc.getRemoteInst()] = Accesses.size(); 978 Accesses.push_back(Acc); 979 } 980 }; 981 982 /// We store all accesses in bins denoted by their offset and size. 983 using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>; 984 985 AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); } 986 AccessBinsTy::const_iterator end() const { return AccessBins.end(); } 987 988 protected: 989 /// The bins with all the accesses for the associated pointer. 990 AccessBinsTy AccessBins; 991 992 /// Add a new access to the state at offset \p Offset and with size \p Size. 993 /// The access is associated with \p I, writes \p Content (if anything), and 994 /// is of kind \p Kind. 995 /// \Returns CHANGED, if the state changed, UNCHANGED otherwise. 996 ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size, 997 Instruction &I, Optional<Value *> Content, 998 AAPointerInfo::AccessKind Kind, Type *Ty, 999 Instruction *RemoteI = nullptr, 1000 Accesses *BinPtr = nullptr) { 1001 AAPointerInfo::OffsetAndSize Key{Offset, Size}; 1002 Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key]; 1003 if (!Bin) 1004 Bin = new (A.Allocator) Accesses; 1005 AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty); 1006 // Check if we have an access for this instruction in this bin, if not, 1007 // simply add it. 1008 auto It = Bin->find(Acc); 1009 if (It == Bin->find_end()) { 1010 Bin->insert(Acc); 1011 return ChangeStatus::CHANGED; 1012 } 1013 // If the existing access is the same as then new one, nothing changed. 1014 AAPointerInfo::Access &Current = Bin->get(It); 1015 AAPointerInfo::Access Before = Current; 1016 // The new one will be combined with the existing one. 1017 Current &= Acc; 1018 return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; 1019 } 1020 1021 /// See AAPointerInfo::forallInterferingAccesses. 1022 bool forallInterferingAccesses( 1023 AAPointerInfo::OffsetAndSize OAS, 1024 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { 1025 if (!isValidState()) 1026 return false; 1027 1028 for (auto &It : AccessBins) { 1029 AAPointerInfo::OffsetAndSize ItOAS = It.getFirst(); 1030 if (!OAS.mayOverlap(ItOAS)) 1031 continue; 1032 bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown(); 1033 for (auto &Access : *It.getSecond()) 1034 if (!CB(Access, IsExact)) 1035 return false; 1036 } 1037 return true; 1038 } 1039 1040 /// See AAPointerInfo::forallInterferingAccesses. 1041 bool forallInterferingAccesses( 1042 Instruction &I, 1043 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { 1044 if (!isValidState()) 1045 return false; 1046 1047 // First find the offset and size of I. 1048 AAPointerInfo::OffsetAndSize OAS(-1, -1); 1049 for (auto &It : AccessBins) { 1050 for (auto &Access : *It.getSecond()) { 1051 if (Access.getRemoteInst() == &I) { 1052 OAS = It.getFirst(); 1053 break; 1054 } 1055 } 1056 if (OAS.getSize() != -1) 1057 break; 1058 } 1059 // No access for I was found, we are done. 1060 if (OAS.getSize() == -1) 1061 return true; 1062 1063 // Now that we have an offset and size, find all overlapping ones and use 1064 // the callback on the accesses. 1065 return forallInterferingAccesses(OAS, CB); 1066 } 1067 1068 private: 1069 /// State to track fixpoint and validity. 1070 BooleanState BS; 1071 }; 1072 1073 namespace { 1074 struct AAPointerInfoImpl 1075 : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> { 1076 using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>; 1077 AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {} 1078 1079 /// See AbstractAttribute::initialize(...). 1080 void initialize(Attributor &A) override { AAPointerInfo::initialize(A); } 1081 1082 /// See AbstractAttribute::getAsStr(). 1083 const std::string getAsStr() const override { 1084 return std::string("PointerInfo ") + 1085 (isValidState() ? (std::string("#") + 1086 std::to_string(AccessBins.size()) + " bins") 1087 : "<invalid>"); 1088 } 1089 1090 /// See AbstractAttribute::manifest(...). 1091 ChangeStatus manifest(Attributor &A) override { 1092 return AAPointerInfo::manifest(A); 1093 } 1094 1095 bool forallInterferingAccesses( 1096 OffsetAndSize OAS, 1097 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) 1098 const override { 1099 return State::forallInterferingAccesses(OAS, CB); 1100 } 1101 bool forallInterferingAccesses( 1102 Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I, 1103 function_ref<bool(const Access &, bool)> UserCB) const override { 1104 SmallPtrSet<const Access *, 8> DominatingWrites; 1105 SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses; 1106 1107 Function &Scope = *I.getFunction(); 1108 const auto &NoSyncAA = A.getAAFor<AANoSync>( 1109 QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL); 1110 const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>( 1111 IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL); 1112 const bool NoSync = NoSyncAA.isAssumedNoSync(); 1113 1114 // Helper to determine if we need to consider threading, which we cannot 1115 // right now. However, if the function is (assumed) nosync or the thread 1116 // executing all instructions is the main thread only we can ignore 1117 // threading. 1118 auto CanIgnoreThreading = [&](const Instruction &I) -> bool { 1119 if (NoSync) 1120 return true; 1121 if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I)) 1122 return true; 1123 return false; 1124 }; 1125 1126 // Helper to determine if the access is executed by the same thread as the 1127 // load, for now it is sufficient to avoid any potential threading effects 1128 // as we cannot deal with them anyway. 1129 auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool { 1130 return CanIgnoreThreading(*Acc.getLocalInst()); 1131 }; 1132 1133 // TODO: Use inter-procedural reachability and dominance. 1134 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1135 QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL); 1136 1137 const bool FindInterferingWrites = I.mayReadFromMemory(); 1138 const bool FindInterferingReads = I.mayWriteToMemory(); 1139 const bool UseDominanceReasoning = FindInterferingWrites; 1140 const bool CanUseCFGResoning = CanIgnoreThreading(I); 1141 InformationCache &InfoCache = A.getInfoCache(); 1142 const DominatorTree *DT = 1143 NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning 1144 ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( 1145 Scope) 1146 : nullptr; 1147 1148 enum GPUAddressSpace : unsigned { 1149 Generic = 0, 1150 Global = 1, 1151 Shared = 3, 1152 Constant = 4, 1153 Local = 5, 1154 }; 1155 1156 // Helper to check if a value has "kernel lifetime", that is it will not 1157 // outlive a GPU kernel. This is true for shared, constant, and local 1158 // globals on AMD and NVIDIA GPUs. 1159 auto HasKernelLifetime = [&](Value *V, Module &M) { 1160 Triple T(M.getTargetTriple()); 1161 if (!(T.isAMDGPU() || T.isNVPTX())) 1162 return false; 1163 switch (V->getType()->getPointerAddressSpace()) { 1164 case GPUAddressSpace::Shared: 1165 case GPUAddressSpace::Constant: 1166 case GPUAddressSpace::Local: 1167 return true; 1168 default: 1169 return false; 1170 }; 1171 }; 1172 1173 // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query 1174 // to determine if we should look at reachability from the callee. For 1175 // certain pointers we know the lifetime and we do not have to step into the 1176 // callee to determine reachability as the pointer would be dead in the 1177 // callee. See the conditional initialization below. 1178 std::function<bool(const Function &)> IsLiveInCalleeCB; 1179 1180 if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) { 1181 // If the alloca containing function is not recursive the alloca 1182 // must be dead in the callee. 1183 const Function *AIFn = AI->getFunction(); 1184 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1185 *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL); 1186 if (NoRecurseAA.isAssumedNoRecurse()) { 1187 IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; }; 1188 } 1189 } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) { 1190 // If the global has kernel lifetime we can stop if we reach a kernel 1191 // as it is "dead" in the (unknown) callees. 1192 if (HasKernelLifetime(GV, *GV->getParent())) 1193 IsLiveInCalleeCB = [](const Function &Fn) { 1194 return !Fn.hasFnAttribute("kernel"); 1195 }; 1196 } 1197 1198 auto AccessCB = [&](const Access &Acc, bool Exact) { 1199 if ((!FindInterferingWrites || !Acc.isWrite()) && 1200 (!FindInterferingReads || !Acc.isRead())) 1201 return true; 1202 1203 // For now we only filter accesses based on CFG reasoning which does not 1204 // work yet if we have threading effects, or the access is complicated. 1205 if (CanUseCFGResoning) { 1206 if ((!Acc.isWrite() || 1207 !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA, 1208 IsLiveInCalleeCB)) && 1209 (!Acc.isRead() || 1210 !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA, 1211 IsLiveInCalleeCB))) 1212 return true; 1213 if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) && 1214 IsSameThreadAsLoad(Acc)) { 1215 if (DT->dominates(Acc.getLocalInst(), &I)) 1216 DominatingWrites.insert(&Acc); 1217 } 1218 } 1219 1220 InterferingAccesses.push_back({&Acc, Exact}); 1221 return true; 1222 }; 1223 if (!State::forallInterferingAccesses(I, AccessCB)) 1224 return false; 1225 1226 // If we cannot use CFG reasoning we only filter the non-write accesses 1227 // and are done here. 1228 if (!CanUseCFGResoning) { 1229 for (auto &It : InterferingAccesses) 1230 if (!UserCB(*It.first, It.second)) 1231 return false; 1232 return true; 1233 } 1234 1235 // Helper to determine if we can skip a specific write access. This is in 1236 // the worst case quadratic as we are looking for another write that will 1237 // hide the effect of this one. 1238 auto CanSkipAccess = [&](const Access &Acc, bool Exact) { 1239 if (!IsSameThreadAsLoad(Acc)) 1240 return false; 1241 if (!DominatingWrites.count(&Acc)) 1242 return false; 1243 for (const Access *DomAcc : DominatingWrites) { 1244 assert(Acc.getLocalInst()->getFunction() == 1245 DomAcc->getLocalInst()->getFunction() && 1246 "Expected dominating writes to be in the same function!"); 1247 1248 if (DomAcc != &Acc && 1249 DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) { 1250 return true; 1251 } 1252 } 1253 return false; 1254 }; 1255 1256 // Run the user callback on all accesses we cannot skip and return if that 1257 // succeeded for all or not. 1258 unsigned NumInterferingAccesses = InterferingAccesses.size(); 1259 for (auto &It : InterferingAccesses) { 1260 if (!DT || NumInterferingAccesses > MaxInterferingAccesses || 1261 !CanSkipAccess(*It.first, It.second)) { 1262 if (!UserCB(*It.first, It.second)) 1263 return false; 1264 } 1265 } 1266 return true; 1267 } 1268 1269 ChangeStatus translateAndAddCalleeState(Attributor &A, 1270 const AAPointerInfo &CalleeAA, 1271 int64_t CallArgOffset, CallBase &CB) { 1272 using namespace AA::PointerInfo; 1273 if (!CalleeAA.getState().isValidState() || !isValidState()) 1274 return indicatePessimisticFixpoint(); 1275 1276 const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA); 1277 bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr(); 1278 1279 // Combine the accesses bin by bin. 1280 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1281 for (auto &It : CalleeImplAA.getState()) { 1282 OffsetAndSize OAS = OffsetAndSize::getUnknown(); 1283 if (CallArgOffset != OffsetAndSize::Unknown) 1284 OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset, 1285 It.first.getSize()); 1286 Accesses *Bin = AccessBins[OAS]; 1287 for (const AAPointerInfo::Access &RAcc : *It.second) { 1288 if (IsByval && !RAcc.isRead()) 1289 continue; 1290 bool UsedAssumedInformation = false; 1291 Optional<Value *> Content = A.translateArgumentToCallSiteContent( 1292 RAcc.getContent(), CB, *this, UsedAssumedInformation); 1293 AccessKind AK = 1294 AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ 1295 : AccessKind::AK_READ_WRITE)); 1296 Changed = 1297 Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content, 1298 AK, RAcc.getType(), RAcc.getRemoteInst(), Bin); 1299 } 1300 } 1301 return Changed; 1302 } 1303 1304 /// Statistic tracking for all AAPointerInfo implementations. 1305 /// See AbstractAttribute::trackStatistics(). 1306 void trackPointerInfoStatistics(const IRPosition &IRP) const {} 1307 }; 1308 1309 struct AAPointerInfoFloating : public AAPointerInfoImpl { 1310 using AccessKind = AAPointerInfo::AccessKind; 1311 AAPointerInfoFloating(const IRPosition &IRP, Attributor &A) 1312 : AAPointerInfoImpl(IRP, A) {} 1313 1314 /// See AbstractAttribute::initialize(...). 1315 void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); } 1316 1317 /// Deal with an access and signal if it was handled successfully. 1318 bool handleAccess(Attributor &A, Instruction &I, Value &Ptr, 1319 Optional<Value *> Content, AccessKind Kind, int64_t Offset, 1320 ChangeStatus &Changed, Type *Ty, 1321 int64_t Size = OffsetAndSize::Unknown) { 1322 using namespace AA::PointerInfo; 1323 // No need to find a size if one is given or the offset is unknown. 1324 if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown && 1325 Ty) { 1326 const DataLayout &DL = A.getDataLayout(); 1327 TypeSize AccessSize = DL.getTypeStoreSize(Ty); 1328 if (!AccessSize.isScalable()) 1329 Size = AccessSize.getFixedSize(); 1330 } 1331 Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty); 1332 return true; 1333 }; 1334 1335 /// Helper struct, will support ranges eventually. 1336 struct OffsetInfo { 1337 int64_t Offset = OffsetAndSize::Unknown; 1338 1339 bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; } 1340 }; 1341 1342 /// See AbstractAttribute::updateImpl(...). 1343 ChangeStatus updateImpl(Attributor &A) override { 1344 using namespace AA::PointerInfo; 1345 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1346 Value &AssociatedValue = getAssociatedValue(); 1347 1348 const DataLayout &DL = A.getDataLayout(); 1349 DenseMap<Value *, OffsetInfo> OffsetInfoMap; 1350 OffsetInfoMap[&AssociatedValue] = OffsetInfo{0}; 1351 1352 auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI, 1353 bool &Follow) { 1354 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1355 UsrOI = PtrOI; 1356 Follow = true; 1357 return true; 1358 }; 1359 1360 const auto *TLI = getAnchorScope() 1361 ? A.getInfoCache().getTargetLibraryInfoForFunction( 1362 *getAnchorScope()) 1363 : nullptr; 1364 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 1365 Value *CurPtr = U.get(); 1366 User *Usr = U.getUser(); 1367 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " 1368 << *Usr << "\n"); 1369 assert(OffsetInfoMap.count(CurPtr) && 1370 "The current pointer offset should have been seeded!"); 1371 1372 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) { 1373 if (CE->isCast()) 1374 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); 1375 if (CE->isCompare()) 1376 return true; 1377 if (!isa<GEPOperator>(CE)) { 1378 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE 1379 << "\n"); 1380 return false; 1381 } 1382 } 1383 if (auto *GEP = dyn_cast<GEPOperator>(Usr)) { 1384 // Note the order here, the Usr access might change the map, CurPtr is 1385 // already in it though. 1386 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1387 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1388 UsrOI = PtrOI; 1389 1390 // TODO: Use range information. 1391 if (PtrOI.Offset == OffsetAndSize::Unknown || 1392 !GEP->hasAllConstantIndices()) { 1393 UsrOI.Offset = OffsetAndSize::Unknown; 1394 Follow = true; 1395 return true; 1396 } 1397 1398 SmallVector<Value *, 8> Indices; 1399 for (Use &Idx : GEP->indices()) { 1400 if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) { 1401 Indices.push_back(CIdx); 1402 continue; 1403 } 1404 1405 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP 1406 << " : " << *Idx << "\n"); 1407 return false; 1408 } 1409 UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType( 1410 GEP->getSourceElementType(), Indices); 1411 Follow = true; 1412 return true; 1413 } 1414 if (isa<CastInst>(Usr) || isa<SelectInst>(Usr)) 1415 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); 1416 1417 // For PHIs we need to take care of the recurrence explicitly as the value 1418 // might change while we iterate through a loop. For now, we give up if 1419 // the PHI is not invariant. 1420 if (isa<PHINode>(Usr)) { 1421 // Note the order here, the Usr access might change the map, CurPtr is 1422 // already in it though. 1423 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1424 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1425 // Check if the PHI is invariant (so far). 1426 if (UsrOI == PtrOI) 1427 return true; 1428 1429 // Check if the PHI operand has already an unknown offset as we can't 1430 // improve on that anymore. 1431 if (PtrOI.Offset == OffsetAndSize::Unknown) { 1432 UsrOI = PtrOI; 1433 Follow = true; 1434 return true; 1435 } 1436 1437 // Check if the PHI operand is not dependent on the PHI itself. 1438 // TODO: This is not great as we look at the pointer type. However, it 1439 // is unclear where the Offset size comes from with typeless pointers. 1440 APInt Offset( 1441 DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()), 1442 0); 1443 if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets( 1444 DL, Offset, /* AllowNonInbounds */ true)) { 1445 if (Offset != PtrOI.Offset) { 1446 LLVM_DEBUG(dbgs() 1447 << "[AAPointerInfo] PHI operand pointer offset mismatch " 1448 << *CurPtr << " in " << *Usr << "\n"); 1449 return false; 1450 } 1451 return HandlePassthroughUser(Usr, PtrOI, Follow); 1452 } 1453 1454 // TODO: Approximate in case we know the direction of the recurrence. 1455 LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex " 1456 << *CurPtr << " in " << *Usr << "\n"); 1457 UsrOI = PtrOI; 1458 UsrOI.Offset = OffsetAndSize::Unknown; 1459 Follow = true; 1460 return true; 1461 } 1462 1463 if (auto *LoadI = dyn_cast<LoadInst>(Usr)) 1464 return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr, 1465 AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset, 1466 Changed, LoadI->getType()); 1467 if (auto *StoreI = dyn_cast<StoreInst>(Usr)) { 1468 if (StoreI->getValueOperand() == CurPtr) { 1469 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store " 1470 << *StoreI << "\n"); 1471 return false; 1472 } 1473 bool UsedAssumedInformation = false; 1474 Optional<Value *> Content = A.getAssumedSimplified( 1475 *StoreI->getValueOperand(), *this, UsedAssumedInformation); 1476 return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE, 1477 OffsetInfoMap[CurPtr].Offset, Changed, 1478 StoreI->getValueOperand()->getType()); 1479 } 1480 if (auto *CB = dyn_cast<CallBase>(Usr)) { 1481 if (CB->isLifetimeStartOrEnd()) 1482 return true; 1483 if (TLI && isFreeCall(CB, TLI)) 1484 return true; 1485 if (CB->isArgOperand(&U)) { 1486 unsigned ArgNo = CB->getArgOperandNo(&U); 1487 const auto &CSArgPI = A.getAAFor<AAPointerInfo>( 1488 *this, IRPosition::callsite_argument(*CB, ArgNo), 1489 DepClassTy::REQUIRED); 1490 Changed = translateAndAddCalleeState( 1491 A, CSArgPI, OffsetInfoMap[CurPtr].Offset, *CB) | 1492 Changed; 1493 return true; 1494 } 1495 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB 1496 << "\n"); 1497 // TODO: Allow some call uses 1498 return false; 1499 } 1500 1501 LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n"); 1502 return false; 1503 }; 1504 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { 1505 if (OffsetInfoMap.count(NewU)) 1506 return OffsetInfoMap[NewU] == OffsetInfoMap[OldU]; 1507 OffsetInfoMap[NewU] = OffsetInfoMap[OldU]; 1508 return true; 1509 }; 1510 if (!A.checkForAllUses(UsePred, *this, AssociatedValue, 1511 /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL, 1512 EquivalentUseCB)) 1513 return indicatePessimisticFixpoint(); 1514 1515 LLVM_DEBUG({ 1516 dbgs() << "Accesses by bin after update:\n"; 1517 for (auto &It : AccessBins) { 1518 dbgs() << "[" << It.first.getOffset() << "-" 1519 << It.first.getOffset() + It.first.getSize() 1520 << "] : " << It.getSecond()->size() << "\n"; 1521 for (auto &Acc : *It.getSecond()) { 1522 dbgs() << " - " << Acc.getKind() << " - " << *Acc.getLocalInst() 1523 << "\n"; 1524 if (Acc.getLocalInst() != Acc.getRemoteInst()) 1525 dbgs() << " --> " 1526 << *Acc.getRemoteInst() << "\n"; 1527 if (!Acc.isWrittenValueYetUndetermined()) { 1528 if (Acc.getWrittenValue()) 1529 dbgs() << " - c: " << *Acc.getWrittenValue() << "\n"; 1530 else 1531 dbgs() << " - c: <unknown>\n"; 1532 } 1533 } 1534 } 1535 }); 1536 1537 return Changed; 1538 } 1539 1540 /// See AbstractAttribute::trackStatistics() 1541 void trackStatistics() const override { 1542 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1543 } 1544 }; 1545 1546 struct AAPointerInfoReturned final : AAPointerInfoImpl { 1547 AAPointerInfoReturned(const IRPosition &IRP, Attributor &A) 1548 : AAPointerInfoImpl(IRP, A) {} 1549 1550 /// See AbstractAttribute::updateImpl(...). 1551 ChangeStatus updateImpl(Attributor &A) override { 1552 return indicatePessimisticFixpoint(); 1553 } 1554 1555 /// See AbstractAttribute::trackStatistics() 1556 void trackStatistics() const override { 1557 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1558 } 1559 }; 1560 1561 struct AAPointerInfoArgument final : AAPointerInfoFloating { 1562 AAPointerInfoArgument(const IRPosition &IRP, Attributor &A) 1563 : AAPointerInfoFloating(IRP, A) {} 1564 1565 /// See AbstractAttribute::initialize(...). 1566 void initialize(Attributor &A) override { 1567 AAPointerInfoFloating::initialize(A); 1568 if (getAnchorScope()->isDeclaration()) 1569 indicatePessimisticFixpoint(); 1570 } 1571 1572 /// See AbstractAttribute::trackStatistics() 1573 void trackStatistics() const override { 1574 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1575 } 1576 }; 1577 1578 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating { 1579 AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) 1580 : AAPointerInfoFloating(IRP, A) {} 1581 1582 /// See AbstractAttribute::updateImpl(...). 1583 ChangeStatus updateImpl(Attributor &A) override { 1584 using namespace AA::PointerInfo; 1585 // We handle memory intrinsics explicitly, at least the first (= 1586 // destination) and second (=source) arguments as we know how they are 1587 // accessed. 1588 if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) { 1589 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 1590 int64_t LengthVal = OffsetAndSize::Unknown; 1591 if (Length) 1592 LengthVal = Length->getSExtValue(); 1593 Value &Ptr = getAssociatedValue(); 1594 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 1595 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1596 if (ArgNo == 0) { 1597 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed, 1598 nullptr, LengthVal); 1599 } else if (ArgNo == 1) { 1600 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed, 1601 nullptr, LengthVal); 1602 } else { 1603 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic " 1604 << *MI << "\n"); 1605 return indicatePessimisticFixpoint(); 1606 } 1607 return Changed; 1608 } 1609 1610 // TODO: Once we have call site specific value information we can provide 1611 // call site specific liveness information and then it makes 1612 // sense to specialize attributes for call sites arguments instead of 1613 // redirecting requests to the callee argument. 1614 Argument *Arg = getAssociatedArgument(); 1615 if (!Arg) 1616 return indicatePessimisticFixpoint(); 1617 const IRPosition &ArgPos = IRPosition::argument(*Arg); 1618 auto &ArgAA = 1619 A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED); 1620 return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI())); 1621 } 1622 1623 /// See AbstractAttribute::trackStatistics() 1624 void trackStatistics() const override { 1625 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1626 } 1627 }; 1628 1629 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating { 1630 AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) 1631 : AAPointerInfoFloating(IRP, A) {} 1632 1633 /// See AbstractAttribute::trackStatistics() 1634 void trackStatistics() const override { 1635 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1636 } 1637 }; 1638 } // namespace 1639 1640 /// -----------------------NoUnwind Function Attribute-------------------------- 1641 1642 namespace { 1643 struct AANoUnwindImpl : AANoUnwind { 1644 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} 1645 1646 const std::string getAsStr() const override { 1647 return getAssumed() ? "nounwind" : "may-unwind"; 1648 } 1649 1650 /// See AbstractAttribute::updateImpl(...). 1651 ChangeStatus updateImpl(Attributor &A) override { 1652 auto Opcodes = { 1653 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 1654 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 1655 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 1656 1657 auto CheckForNoUnwind = [&](Instruction &I) { 1658 if (!I.mayThrow()) 1659 return true; 1660 1661 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1662 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( 1663 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 1664 return NoUnwindAA.isAssumedNoUnwind(); 1665 } 1666 return false; 1667 }; 1668 1669 bool UsedAssumedInformation = false; 1670 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes, 1671 UsedAssumedInformation)) 1672 return indicatePessimisticFixpoint(); 1673 1674 return ChangeStatus::UNCHANGED; 1675 } 1676 }; 1677 1678 struct AANoUnwindFunction final : public AANoUnwindImpl { 1679 AANoUnwindFunction(const IRPosition &IRP, Attributor &A) 1680 : AANoUnwindImpl(IRP, A) {} 1681 1682 /// See AbstractAttribute::trackStatistics() 1683 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } 1684 }; 1685 1686 /// NoUnwind attribute deduction for a call sites. 1687 struct AANoUnwindCallSite final : AANoUnwindImpl { 1688 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) 1689 : AANoUnwindImpl(IRP, A) {} 1690 1691 /// See AbstractAttribute::initialize(...). 1692 void initialize(Attributor &A) override { 1693 AANoUnwindImpl::initialize(A); 1694 Function *F = getAssociatedFunction(); 1695 if (!F || F->isDeclaration()) 1696 indicatePessimisticFixpoint(); 1697 } 1698 1699 /// See AbstractAttribute::updateImpl(...). 1700 ChangeStatus updateImpl(Attributor &A) override { 1701 // TODO: Once we have call site specific value information we can provide 1702 // call site specific liveness information and then it makes 1703 // sense to specialize attributes for call sites arguments instead of 1704 // redirecting requests to the callee argument. 1705 Function *F = getAssociatedFunction(); 1706 const IRPosition &FnPos = IRPosition::function(*F); 1707 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED); 1708 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1709 } 1710 1711 /// See AbstractAttribute::trackStatistics() 1712 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } 1713 }; 1714 } // namespace 1715 1716 /// --------------------- Function Return Values ------------------------------- 1717 1718 namespace { 1719 /// "Attribute" that collects all potential returned values and the return 1720 /// instructions that they arise from. 1721 /// 1722 /// If there is a unique returned value R, the manifest method will: 1723 /// - mark R with the "returned" attribute, if R is an argument. 1724 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { 1725 1726 /// Mapping of values potentially returned by the associated function to the 1727 /// return instructions that might return them. 1728 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; 1729 1730 /// State flags 1731 /// 1732 ///{ 1733 bool IsFixed = false; 1734 bool IsValidState = true; 1735 ///} 1736 1737 public: 1738 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) 1739 : AAReturnedValues(IRP, A) {} 1740 1741 /// See AbstractAttribute::initialize(...). 1742 void initialize(Attributor &A) override { 1743 // Reset the state. 1744 IsFixed = false; 1745 IsValidState = true; 1746 ReturnedValues.clear(); 1747 1748 Function *F = getAssociatedFunction(); 1749 if (!F || F->isDeclaration()) { 1750 indicatePessimisticFixpoint(); 1751 return; 1752 } 1753 assert(!F->getReturnType()->isVoidTy() && 1754 "Did not expect a void return type!"); 1755 1756 // The map from instruction opcodes to those instructions in the function. 1757 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); 1758 1759 // Look through all arguments, if one is marked as returned we are done. 1760 for (Argument &Arg : F->args()) { 1761 if (Arg.hasReturnedAttr()) { 1762 auto &ReturnInstSet = ReturnedValues[&Arg]; 1763 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) 1764 for (Instruction *RI : *Insts) 1765 ReturnInstSet.insert(cast<ReturnInst>(RI)); 1766 1767 indicateOptimisticFixpoint(); 1768 return; 1769 } 1770 } 1771 1772 if (!A.isFunctionIPOAmendable(*F)) 1773 indicatePessimisticFixpoint(); 1774 } 1775 1776 /// See AbstractAttribute::manifest(...). 1777 ChangeStatus manifest(Attributor &A) override; 1778 1779 /// See AbstractAttribute::getState(...). 1780 AbstractState &getState() override { return *this; } 1781 1782 /// See AbstractAttribute::getState(...). 1783 const AbstractState &getState() const override { return *this; } 1784 1785 /// See AbstractAttribute::updateImpl(Attributor &A). 1786 ChangeStatus updateImpl(Attributor &A) override; 1787 1788 llvm::iterator_range<iterator> returned_values() override { 1789 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1790 } 1791 1792 llvm::iterator_range<const_iterator> returned_values() const override { 1793 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1794 } 1795 1796 /// Return the number of potential return values, -1 if unknown. 1797 size_t getNumReturnValues() const override { 1798 return isValidState() ? ReturnedValues.size() : -1; 1799 } 1800 1801 /// Return an assumed unique return value if a single candidate is found. If 1802 /// there cannot be one, return a nullptr. If it is not clear yet, return the 1803 /// Optional::NoneType. 1804 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 1805 1806 /// See AbstractState::checkForAllReturnedValues(...). 1807 bool checkForAllReturnedValuesAndReturnInsts( 1808 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1809 const override; 1810 1811 /// Pretty print the attribute similar to the IR representation. 1812 const std::string getAsStr() const override; 1813 1814 /// See AbstractState::isAtFixpoint(). 1815 bool isAtFixpoint() const override { return IsFixed; } 1816 1817 /// See AbstractState::isValidState(). 1818 bool isValidState() const override { return IsValidState; } 1819 1820 /// See AbstractState::indicateOptimisticFixpoint(...). 1821 ChangeStatus indicateOptimisticFixpoint() override { 1822 IsFixed = true; 1823 return ChangeStatus::UNCHANGED; 1824 } 1825 1826 ChangeStatus indicatePessimisticFixpoint() override { 1827 IsFixed = true; 1828 IsValidState = false; 1829 return ChangeStatus::CHANGED; 1830 } 1831 }; 1832 1833 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 1834 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1835 1836 // Bookkeeping. 1837 assert(isValidState()); 1838 STATS_DECLTRACK(KnownReturnValues, FunctionReturn, 1839 "Number of function with known return values"); 1840 1841 // Check if we have an assumed unique return value that we could manifest. 1842 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); 1843 1844 if (!UniqueRV.hasValue() || !UniqueRV.getValue()) 1845 return Changed; 1846 1847 // Bookkeeping. 1848 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, 1849 "Number of function with unique return"); 1850 // If the assumed unique return value is an argument, annotate it. 1851 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { 1852 if (UniqueRVArg->getType()->canLosslesslyBitCastTo( 1853 getAssociatedFunction()->getReturnType())) { 1854 getIRPosition() = IRPosition::argument(*UniqueRVArg); 1855 Changed = IRAttribute::manifest(A); 1856 } 1857 } 1858 return Changed; 1859 } 1860 1861 const std::string AAReturnedValuesImpl::getAsStr() const { 1862 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 1863 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")"; 1864 } 1865 1866 Optional<Value *> 1867 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { 1868 // If checkForAllReturnedValues provides a unique value, ignoring potential 1869 // undef values that can also be present, it is assumed to be the actual 1870 // return value and forwarded to the caller of this method. If there are 1871 // multiple, a nullptr is returned indicating there cannot be a unique 1872 // returned value. 1873 Optional<Value *> UniqueRV; 1874 Type *Ty = getAssociatedFunction()->getReturnType(); 1875 1876 auto Pred = [&](Value &RV) -> bool { 1877 UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty); 1878 return UniqueRV != Optional<Value *>(nullptr); 1879 }; 1880 1881 if (!A.checkForAllReturnedValues(Pred, *this)) 1882 UniqueRV = nullptr; 1883 1884 return UniqueRV; 1885 } 1886 1887 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( 1888 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1889 const { 1890 if (!isValidState()) 1891 return false; 1892 1893 // Check all returned values but ignore call sites as long as we have not 1894 // encountered an overdefined one during an update. 1895 for (auto &It : ReturnedValues) { 1896 Value *RV = It.first; 1897 if (!Pred(*RV, It.second)) 1898 return false; 1899 } 1900 1901 return true; 1902 } 1903 1904 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 1905 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1906 1907 auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret, 1908 bool) -> bool { 1909 assert(AA::isValidInScope(V, Ret.getFunction()) && 1910 "Assumed returned value should be valid in function scope!"); 1911 if (ReturnedValues[&V].insert(&Ret)) 1912 Changed = ChangeStatus::CHANGED; 1913 return true; 1914 }; 1915 1916 bool UsedAssumedInformation = false; 1917 auto ReturnInstCB = [&](Instruction &I) { 1918 ReturnInst &Ret = cast<ReturnInst>(I); 1919 return genericValueTraversal<ReturnInst>( 1920 A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB, 1921 &I, UsedAssumedInformation, /* UseValueSimplify */ true, 1922 /* MaxValues */ 16, 1923 /* StripCB */ nullptr, /* Intraprocedural */ true); 1924 }; 1925 1926 // Discover returned values from all live returned instructions in the 1927 // associated function. 1928 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret}, 1929 UsedAssumedInformation)) 1930 return indicatePessimisticFixpoint(); 1931 return Changed; 1932 } 1933 1934 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { 1935 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) 1936 : AAReturnedValuesImpl(IRP, A) {} 1937 1938 /// See AbstractAttribute::trackStatistics() 1939 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } 1940 }; 1941 1942 /// Returned values information for a call sites. 1943 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { 1944 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) 1945 : AAReturnedValuesImpl(IRP, A) {} 1946 1947 /// See AbstractAttribute::initialize(...). 1948 void initialize(Attributor &A) override { 1949 // TODO: Once we have call site specific value information we can provide 1950 // call site specific liveness information and then it makes 1951 // sense to specialize attributes for call sites instead of 1952 // redirecting requests to the callee. 1953 llvm_unreachable("Abstract attributes for returned values are not " 1954 "supported for call sites yet!"); 1955 } 1956 1957 /// See AbstractAttribute::updateImpl(...). 1958 ChangeStatus updateImpl(Attributor &A) override { 1959 return indicatePessimisticFixpoint(); 1960 } 1961 1962 /// See AbstractAttribute::trackStatistics() 1963 void trackStatistics() const override {} 1964 }; 1965 } // namespace 1966 1967 /// ------------------------ NoSync Function Attribute ------------------------- 1968 1969 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) { 1970 if (!I->isAtomic()) 1971 return false; 1972 1973 if (auto *FI = dyn_cast<FenceInst>(I)) 1974 // All legal orderings for fence are stronger than monotonic. 1975 return FI->getSyncScopeID() != SyncScope::SingleThread; 1976 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) { 1977 // Unordered is not a legal ordering for cmpxchg. 1978 return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic || 1979 AI->getFailureOrdering() != AtomicOrdering::Monotonic); 1980 } 1981 1982 AtomicOrdering Ordering; 1983 switch (I->getOpcode()) { 1984 case Instruction::AtomicRMW: 1985 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 1986 break; 1987 case Instruction::Store: 1988 Ordering = cast<StoreInst>(I)->getOrdering(); 1989 break; 1990 case Instruction::Load: 1991 Ordering = cast<LoadInst>(I)->getOrdering(); 1992 break; 1993 default: 1994 llvm_unreachable( 1995 "New atomic operations need to be known in the attributor."); 1996 } 1997 1998 return (Ordering != AtomicOrdering::Unordered && 1999 Ordering != AtomicOrdering::Monotonic); 2000 } 2001 2002 /// Return true if this intrinsic is nosync. This is only used for intrinsics 2003 /// which would be nosync except that they have a volatile flag. All other 2004 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td. 2005 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) { 2006 if (auto *MI = dyn_cast<MemIntrinsic>(I)) 2007 return !MI->isVolatile(); 2008 return false; 2009 } 2010 2011 namespace { 2012 struct AANoSyncImpl : AANoSync { 2013 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} 2014 2015 const std::string getAsStr() const override { 2016 return getAssumed() ? "nosync" : "may-sync"; 2017 } 2018 2019 /// See AbstractAttribute::updateImpl(...). 2020 ChangeStatus updateImpl(Attributor &A) override; 2021 }; 2022 2023 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { 2024 2025 auto CheckRWInstForNoSync = [&](Instruction &I) { 2026 return AA::isNoSyncInst(A, I, *this); 2027 }; 2028 2029 auto CheckForNoSync = [&](Instruction &I) { 2030 // At this point we handled all read/write effects and they are all 2031 // nosync, so they can be skipped. 2032 if (I.mayReadOrWriteMemory()) 2033 return true; 2034 2035 // non-convergent and readnone imply nosync. 2036 return !cast<CallBase>(I).isConvergent(); 2037 }; 2038 2039 bool UsedAssumedInformation = false; 2040 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this, 2041 UsedAssumedInformation) || 2042 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this, 2043 UsedAssumedInformation)) 2044 return indicatePessimisticFixpoint(); 2045 2046 return ChangeStatus::UNCHANGED; 2047 } 2048 2049 struct AANoSyncFunction final : public AANoSyncImpl { 2050 AANoSyncFunction(const IRPosition &IRP, Attributor &A) 2051 : AANoSyncImpl(IRP, A) {} 2052 2053 /// See AbstractAttribute::trackStatistics() 2054 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } 2055 }; 2056 2057 /// NoSync attribute deduction for a call sites. 2058 struct AANoSyncCallSite final : AANoSyncImpl { 2059 AANoSyncCallSite(const IRPosition &IRP, Attributor &A) 2060 : AANoSyncImpl(IRP, A) {} 2061 2062 /// See AbstractAttribute::initialize(...). 2063 void initialize(Attributor &A) override { 2064 AANoSyncImpl::initialize(A); 2065 Function *F = getAssociatedFunction(); 2066 if (!F || F->isDeclaration()) 2067 indicatePessimisticFixpoint(); 2068 } 2069 2070 /// See AbstractAttribute::updateImpl(...). 2071 ChangeStatus updateImpl(Attributor &A) override { 2072 // TODO: Once we have call site specific value information we can provide 2073 // call site specific liveness information and then it makes 2074 // sense to specialize attributes for call sites arguments instead of 2075 // redirecting requests to the callee argument. 2076 Function *F = getAssociatedFunction(); 2077 const IRPosition &FnPos = IRPosition::function(*F); 2078 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED); 2079 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2080 } 2081 2082 /// See AbstractAttribute::trackStatistics() 2083 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } 2084 }; 2085 } // namespace 2086 2087 /// ------------------------ No-Free Attributes ---------------------------- 2088 2089 namespace { 2090 struct AANoFreeImpl : public AANoFree { 2091 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} 2092 2093 /// See AbstractAttribute::updateImpl(...). 2094 ChangeStatus updateImpl(Attributor &A) override { 2095 auto CheckForNoFree = [&](Instruction &I) { 2096 const auto &CB = cast<CallBase>(I); 2097 if (CB.hasFnAttr(Attribute::NoFree)) 2098 return true; 2099 2100 const auto &NoFreeAA = A.getAAFor<AANoFree>( 2101 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 2102 return NoFreeAA.isAssumedNoFree(); 2103 }; 2104 2105 bool UsedAssumedInformation = false; 2106 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this, 2107 UsedAssumedInformation)) 2108 return indicatePessimisticFixpoint(); 2109 return ChangeStatus::UNCHANGED; 2110 } 2111 2112 /// See AbstractAttribute::getAsStr(). 2113 const std::string getAsStr() const override { 2114 return getAssumed() ? "nofree" : "may-free"; 2115 } 2116 }; 2117 2118 struct AANoFreeFunction final : public AANoFreeImpl { 2119 AANoFreeFunction(const IRPosition &IRP, Attributor &A) 2120 : AANoFreeImpl(IRP, A) {} 2121 2122 /// See AbstractAttribute::trackStatistics() 2123 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } 2124 }; 2125 2126 /// NoFree attribute deduction for a call sites. 2127 struct AANoFreeCallSite final : AANoFreeImpl { 2128 AANoFreeCallSite(const IRPosition &IRP, Attributor &A) 2129 : AANoFreeImpl(IRP, A) {} 2130 2131 /// See AbstractAttribute::initialize(...). 2132 void initialize(Attributor &A) override { 2133 AANoFreeImpl::initialize(A); 2134 Function *F = getAssociatedFunction(); 2135 if (!F || F->isDeclaration()) 2136 indicatePessimisticFixpoint(); 2137 } 2138 2139 /// See AbstractAttribute::updateImpl(...). 2140 ChangeStatus updateImpl(Attributor &A) override { 2141 // TODO: Once we have call site specific value information we can provide 2142 // call site specific liveness information and then it makes 2143 // sense to specialize attributes for call sites arguments instead of 2144 // redirecting requests to the callee argument. 2145 Function *F = getAssociatedFunction(); 2146 const IRPosition &FnPos = IRPosition::function(*F); 2147 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED); 2148 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2149 } 2150 2151 /// See AbstractAttribute::trackStatistics() 2152 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } 2153 }; 2154 2155 /// NoFree attribute for floating values. 2156 struct AANoFreeFloating : AANoFreeImpl { 2157 AANoFreeFloating(const IRPosition &IRP, Attributor &A) 2158 : AANoFreeImpl(IRP, A) {} 2159 2160 /// See AbstractAttribute::trackStatistics() 2161 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} 2162 2163 /// See Abstract Attribute::updateImpl(...). 2164 ChangeStatus updateImpl(Attributor &A) override { 2165 const IRPosition &IRP = getIRPosition(); 2166 2167 const auto &NoFreeAA = A.getAAFor<AANoFree>( 2168 *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL); 2169 if (NoFreeAA.isAssumedNoFree()) 2170 return ChangeStatus::UNCHANGED; 2171 2172 Value &AssociatedValue = getIRPosition().getAssociatedValue(); 2173 auto Pred = [&](const Use &U, bool &Follow) -> bool { 2174 Instruction *UserI = cast<Instruction>(U.getUser()); 2175 if (auto *CB = dyn_cast<CallBase>(UserI)) { 2176 if (CB->isBundleOperand(&U)) 2177 return false; 2178 if (!CB->isArgOperand(&U)) 2179 return true; 2180 unsigned ArgNo = CB->getArgOperandNo(&U); 2181 2182 const auto &NoFreeArg = A.getAAFor<AANoFree>( 2183 *this, IRPosition::callsite_argument(*CB, ArgNo), 2184 DepClassTy::REQUIRED); 2185 return NoFreeArg.isAssumedNoFree(); 2186 } 2187 2188 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 2189 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 2190 Follow = true; 2191 return true; 2192 } 2193 if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) || 2194 isa<ReturnInst>(UserI)) 2195 return true; 2196 2197 // Unknown user. 2198 return false; 2199 }; 2200 if (!A.checkForAllUses(Pred, *this, AssociatedValue)) 2201 return indicatePessimisticFixpoint(); 2202 2203 return ChangeStatus::UNCHANGED; 2204 } 2205 }; 2206 2207 /// NoFree attribute for a call site argument. 2208 struct AANoFreeArgument final : AANoFreeFloating { 2209 AANoFreeArgument(const IRPosition &IRP, Attributor &A) 2210 : AANoFreeFloating(IRP, A) {} 2211 2212 /// See AbstractAttribute::trackStatistics() 2213 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } 2214 }; 2215 2216 /// NoFree attribute for call site arguments. 2217 struct AANoFreeCallSiteArgument final : AANoFreeFloating { 2218 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) 2219 : AANoFreeFloating(IRP, A) {} 2220 2221 /// See AbstractAttribute::updateImpl(...). 2222 ChangeStatus updateImpl(Attributor &A) override { 2223 // TODO: Once we have call site specific value information we can provide 2224 // call site specific liveness information and then it makes 2225 // sense to specialize attributes for call sites arguments instead of 2226 // redirecting requests to the callee argument. 2227 Argument *Arg = getAssociatedArgument(); 2228 if (!Arg) 2229 return indicatePessimisticFixpoint(); 2230 const IRPosition &ArgPos = IRPosition::argument(*Arg); 2231 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED); 2232 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 2233 } 2234 2235 /// See AbstractAttribute::trackStatistics() 2236 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)}; 2237 }; 2238 2239 /// NoFree attribute for function return value. 2240 struct AANoFreeReturned final : AANoFreeFloating { 2241 AANoFreeReturned(const IRPosition &IRP, Attributor &A) 2242 : AANoFreeFloating(IRP, A) { 2243 llvm_unreachable("NoFree is not applicable to function returns!"); 2244 } 2245 2246 /// See AbstractAttribute::initialize(...). 2247 void initialize(Attributor &A) override { 2248 llvm_unreachable("NoFree is not applicable to function returns!"); 2249 } 2250 2251 /// See AbstractAttribute::updateImpl(...). 2252 ChangeStatus updateImpl(Attributor &A) override { 2253 llvm_unreachable("NoFree is not applicable to function returns!"); 2254 } 2255 2256 /// See AbstractAttribute::trackStatistics() 2257 void trackStatistics() const override {} 2258 }; 2259 2260 /// NoFree attribute deduction for a call site return value. 2261 struct AANoFreeCallSiteReturned final : AANoFreeFloating { 2262 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) 2263 : AANoFreeFloating(IRP, A) {} 2264 2265 ChangeStatus manifest(Attributor &A) override { 2266 return ChangeStatus::UNCHANGED; 2267 } 2268 /// See AbstractAttribute::trackStatistics() 2269 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } 2270 }; 2271 } // namespace 2272 2273 /// ------------------------ NonNull Argument Attribute ------------------------ 2274 namespace { 2275 static int64_t getKnownNonNullAndDerefBytesForUse( 2276 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, 2277 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { 2278 TrackUse = false; 2279 2280 const Value *UseV = U->get(); 2281 if (!UseV->getType()->isPointerTy()) 2282 return 0; 2283 2284 // We need to follow common pointer manipulation uses to the accesses they 2285 // feed into. We can try to be smart to avoid looking through things we do not 2286 // like for now, e.g., non-inbounds GEPs. 2287 if (isa<CastInst>(I)) { 2288 TrackUse = true; 2289 return 0; 2290 } 2291 2292 if (isa<GetElementPtrInst>(I)) { 2293 TrackUse = true; 2294 return 0; 2295 } 2296 2297 Type *PtrTy = UseV->getType(); 2298 const Function *F = I->getFunction(); 2299 bool NullPointerIsDefined = 2300 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; 2301 const DataLayout &DL = A.getInfoCache().getDL(); 2302 if (const auto *CB = dyn_cast<CallBase>(I)) { 2303 if (CB->isBundleOperand(U)) { 2304 if (RetainedKnowledge RK = getKnowledgeFromUse( 2305 U, {Attribute::NonNull, Attribute::Dereferenceable})) { 2306 IsNonNull |= 2307 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); 2308 return RK.ArgValue; 2309 } 2310 return 0; 2311 } 2312 2313 if (CB->isCallee(U)) { 2314 IsNonNull |= !NullPointerIsDefined; 2315 return 0; 2316 } 2317 2318 unsigned ArgNo = CB->getArgOperandNo(U); 2319 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 2320 // As long as we only use known information there is no need to track 2321 // dependences here. 2322 auto &DerefAA = 2323 A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE); 2324 IsNonNull |= DerefAA.isKnownNonNull(); 2325 return DerefAA.getKnownDereferenceableBytes(); 2326 } 2327 2328 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); 2329 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) 2330 return 0; 2331 2332 int64_t Offset; 2333 const Value *Base = 2334 getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL); 2335 if (Base && Base == &AssociatedValue) { 2336 int64_t DerefBytes = Loc->Size.getValue() + Offset; 2337 IsNonNull |= !NullPointerIsDefined; 2338 return std::max(int64_t(0), DerefBytes); 2339 } 2340 2341 /// Corner case when an offset is 0. 2342 Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL, 2343 /*AllowNonInbounds*/ true); 2344 if (Base && Base == &AssociatedValue && Offset == 0) { 2345 int64_t DerefBytes = Loc->Size.getValue(); 2346 IsNonNull |= !NullPointerIsDefined; 2347 return std::max(int64_t(0), DerefBytes); 2348 } 2349 2350 return 0; 2351 } 2352 2353 struct AANonNullImpl : AANonNull { 2354 AANonNullImpl(const IRPosition &IRP, Attributor &A) 2355 : AANonNull(IRP, A), 2356 NullIsDefined(NullPointerIsDefined( 2357 getAnchorScope(), 2358 getAssociatedValue().getType()->getPointerAddressSpace())) {} 2359 2360 /// See AbstractAttribute::initialize(...). 2361 void initialize(Attributor &A) override { 2362 Value &V = getAssociatedValue(); 2363 if (!NullIsDefined && 2364 hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, 2365 /* IgnoreSubsumingPositions */ false, &A)) { 2366 indicateOptimisticFixpoint(); 2367 return; 2368 } 2369 2370 if (isa<ConstantPointerNull>(V)) { 2371 indicatePessimisticFixpoint(); 2372 return; 2373 } 2374 2375 AANonNull::initialize(A); 2376 2377 bool CanBeNull, CanBeFreed; 2378 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull, 2379 CanBeFreed)) { 2380 if (!CanBeNull) { 2381 indicateOptimisticFixpoint(); 2382 return; 2383 } 2384 } 2385 2386 if (isa<GlobalValue>(&getAssociatedValue())) { 2387 indicatePessimisticFixpoint(); 2388 return; 2389 } 2390 2391 if (Instruction *CtxI = getCtxI()) 2392 followUsesInMBEC(*this, A, getState(), *CtxI); 2393 } 2394 2395 /// See followUsesInMBEC 2396 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 2397 AANonNull::StateType &State) { 2398 bool IsNonNull = false; 2399 bool TrackUse = false; 2400 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, 2401 IsNonNull, TrackUse); 2402 State.setKnown(IsNonNull); 2403 return TrackUse; 2404 } 2405 2406 /// See AbstractAttribute::getAsStr(). 2407 const std::string getAsStr() const override { 2408 return getAssumed() ? "nonnull" : "may-null"; 2409 } 2410 2411 /// Flag to determine if the underlying value can be null and still allow 2412 /// valid accesses. 2413 const bool NullIsDefined; 2414 }; 2415 2416 /// NonNull attribute for a floating value. 2417 struct AANonNullFloating : public AANonNullImpl { 2418 AANonNullFloating(const IRPosition &IRP, Attributor &A) 2419 : AANonNullImpl(IRP, A) {} 2420 2421 /// See AbstractAttribute::updateImpl(...). 2422 ChangeStatus updateImpl(Attributor &A) override { 2423 const DataLayout &DL = A.getDataLayout(); 2424 2425 DominatorTree *DT = nullptr; 2426 AssumptionCache *AC = nullptr; 2427 InformationCache &InfoCache = A.getInfoCache(); 2428 if (const Function *Fn = getAnchorScope()) { 2429 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); 2430 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); 2431 } 2432 2433 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 2434 AANonNull::StateType &T, bool Stripped) -> bool { 2435 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V), 2436 DepClassTy::REQUIRED); 2437 if (!Stripped && this == &AA) { 2438 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) 2439 T.indicatePessimisticFixpoint(); 2440 } else { 2441 // Use abstract attribute information. 2442 const AANonNull::StateType &NS = AA.getState(); 2443 T ^= NS; 2444 } 2445 return T.isValidState(); 2446 }; 2447 2448 StateType T; 2449 bool UsedAssumedInformation = false; 2450 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 2451 VisitValueCB, getCtxI(), 2452 UsedAssumedInformation)) 2453 return indicatePessimisticFixpoint(); 2454 2455 return clampStateAndIndicateChange(getState(), T); 2456 } 2457 2458 /// See AbstractAttribute::trackStatistics() 2459 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2460 }; 2461 2462 /// NonNull attribute for function return value. 2463 struct AANonNullReturned final 2464 : AAReturnedFromReturnedValues<AANonNull, AANonNull> { 2465 AANonNullReturned(const IRPosition &IRP, Attributor &A) 2466 : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {} 2467 2468 /// See AbstractAttribute::getAsStr(). 2469 const std::string getAsStr() const override { 2470 return getAssumed() ? "nonnull" : "may-null"; 2471 } 2472 2473 /// See AbstractAttribute::trackStatistics() 2474 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2475 }; 2476 2477 /// NonNull attribute for function argument. 2478 struct AANonNullArgument final 2479 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { 2480 AANonNullArgument(const IRPosition &IRP, Attributor &A) 2481 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} 2482 2483 /// See AbstractAttribute::trackStatistics() 2484 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } 2485 }; 2486 2487 struct AANonNullCallSiteArgument final : AANonNullFloating { 2488 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) 2489 : AANonNullFloating(IRP, A) {} 2490 2491 /// See AbstractAttribute::trackStatistics() 2492 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } 2493 }; 2494 2495 /// NonNull attribute for a call site return position. 2496 struct AANonNullCallSiteReturned final 2497 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { 2498 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) 2499 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} 2500 2501 /// See AbstractAttribute::trackStatistics() 2502 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } 2503 }; 2504 } // namespace 2505 2506 /// ------------------------ No-Recurse Attributes ---------------------------- 2507 2508 namespace { 2509 struct AANoRecurseImpl : public AANoRecurse { 2510 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} 2511 2512 /// See AbstractAttribute::getAsStr() 2513 const std::string getAsStr() const override { 2514 return getAssumed() ? "norecurse" : "may-recurse"; 2515 } 2516 }; 2517 2518 struct AANoRecurseFunction final : AANoRecurseImpl { 2519 AANoRecurseFunction(const IRPosition &IRP, Attributor &A) 2520 : AANoRecurseImpl(IRP, A) {} 2521 2522 /// See AbstractAttribute::updateImpl(...). 2523 ChangeStatus updateImpl(Attributor &A) override { 2524 2525 // If all live call sites are known to be no-recurse, we are as well. 2526 auto CallSitePred = [&](AbstractCallSite ACS) { 2527 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 2528 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 2529 DepClassTy::NONE); 2530 return NoRecurseAA.isKnownNoRecurse(); 2531 }; 2532 bool UsedAssumedInformation = false; 2533 if (A.checkForAllCallSites(CallSitePred, *this, true, 2534 UsedAssumedInformation)) { 2535 // If we know all call sites and all are known no-recurse, we are done. 2536 // If all known call sites, which might not be all that exist, are known 2537 // to be no-recurse, we are not done but we can continue to assume 2538 // no-recurse. If one of the call sites we have not visited will become 2539 // live, another update is triggered. 2540 if (!UsedAssumedInformation) 2541 indicateOptimisticFixpoint(); 2542 return ChangeStatus::UNCHANGED; 2543 } 2544 2545 const AAFunctionReachability &EdgeReachability = 2546 A.getAAFor<AAFunctionReachability>(*this, getIRPosition(), 2547 DepClassTy::REQUIRED); 2548 if (EdgeReachability.canReach(A, *getAnchorScope())) 2549 return indicatePessimisticFixpoint(); 2550 return ChangeStatus::UNCHANGED; 2551 } 2552 2553 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } 2554 }; 2555 2556 /// NoRecurse attribute deduction for a call sites. 2557 struct AANoRecurseCallSite final : AANoRecurseImpl { 2558 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) 2559 : AANoRecurseImpl(IRP, A) {} 2560 2561 /// See AbstractAttribute::initialize(...). 2562 void initialize(Attributor &A) override { 2563 AANoRecurseImpl::initialize(A); 2564 Function *F = getAssociatedFunction(); 2565 if (!F || F->isDeclaration()) 2566 indicatePessimisticFixpoint(); 2567 } 2568 2569 /// See AbstractAttribute::updateImpl(...). 2570 ChangeStatus updateImpl(Attributor &A) override { 2571 // TODO: Once we have call site specific value information we can provide 2572 // call site specific liveness information and then it makes 2573 // sense to specialize attributes for call sites arguments instead of 2574 // redirecting requests to the callee argument. 2575 Function *F = getAssociatedFunction(); 2576 const IRPosition &FnPos = IRPosition::function(*F); 2577 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED); 2578 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2579 } 2580 2581 /// See AbstractAttribute::trackStatistics() 2582 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } 2583 }; 2584 } // namespace 2585 2586 /// -------------------- Undefined-Behavior Attributes ------------------------ 2587 2588 namespace { 2589 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { 2590 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) 2591 : AAUndefinedBehavior(IRP, A) {} 2592 2593 /// See AbstractAttribute::updateImpl(...). 2594 // through a pointer (i.e. also branches etc.) 2595 ChangeStatus updateImpl(Attributor &A) override { 2596 const size_t UBPrevSize = KnownUBInsts.size(); 2597 const size_t NoUBPrevSize = AssumedNoUBInsts.size(); 2598 2599 auto InspectMemAccessInstForUB = [&](Instruction &I) { 2600 // Lang ref now states volatile store is not UB, let's skip them. 2601 if (I.isVolatile() && I.mayWriteToMemory()) 2602 return true; 2603 2604 // Skip instructions that are already saved. 2605 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2606 return true; 2607 2608 // If we reach here, we know we have an instruction 2609 // that accesses memory through a pointer operand, 2610 // for which getPointerOperand() should give it to us. 2611 Value *PtrOp = 2612 const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true)); 2613 assert(PtrOp && 2614 "Expected pointer operand of memory accessing instruction"); 2615 2616 // Either we stopped and the appropriate action was taken, 2617 // or we got back a simplified value to continue. 2618 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); 2619 if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue()) 2620 return true; 2621 const Value *PtrOpVal = SimplifiedPtrOp.getValue(); 2622 2623 // A memory access through a pointer is considered UB 2624 // only if the pointer has constant null value. 2625 // TODO: Expand it to not only check constant values. 2626 if (!isa<ConstantPointerNull>(PtrOpVal)) { 2627 AssumedNoUBInsts.insert(&I); 2628 return true; 2629 } 2630 const Type *PtrTy = PtrOpVal->getType(); 2631 2632 // Because we only consider instructions inside functions, 2633 // assume that a parent function exists. 2634 const Function *F = I.getFunction(); 2635 2636 // A memory access using constant null pointer is only considered UB 2637 // if null pointer is _not_ defined for the target platform. 2638 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) 2639 AssumedNoUBInsts.insert(&I); 2640 else 2641 KnownUBInsts.insert(&I); 2642 return true; 2643 }; 2644 2645 auto InspectBrInstForUB = [&](Instruction &I) { 2646 // A conditional branch instruction is considered UB if it has `undef` 2647 // condition. 2648 2649 // Skip instructions that are already saved. 2650 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2651 return true; 2652 2653 // We know we have a branch instruction. 2654 auto *BrInst = cast<BranchInst>(&I); 2655 2656 // Unconditional branches are never considered UB. 2657 if (BrInst->isUnconditional()) 2658 return true; 2659 2660 // Either we stopped and the appropriate action was taken, 2661 // or we got back a simplified value to continue. 2662 Optional<Value *> SimplifiedCond = 2663 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); 2664 if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue()) 2665 return true; 2666 AssumedNoUBInsts.insert(&I); 2667 return true; 2668 }; 2669 2670 auto InspectCallSiteForUB = [&](Instruction &I) { 2671 // Check whether a callsite always cause UB or not 2672 2673 // Skip instructions that are already saved. 2674 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2675 return true; 2676 2677 // Check nonnull and noundef argument attribute violation for each 2678 // callsite. 2679 CallBase &CB = cast<CallBase>(I); 2680 Function *Callee = CB.getCalledFunction(); 2681 if (!Callee) 2682 return true; 2683 for (unsigned idx = 0; idx < CB.arg_size(); idx++) { 2684 // If current argument is known to be simplified to null pointer and the 2685 // corresponding argument position is known to have nonnull attribute, 2686 // the argument is poison. Furthermore, if the argument is poison and 2687 // the position is known to have noundef attriubte, this callsite is 2688 // considered UB. 2689 if (idx >= Callee->arg_size()) 2690 break; 2691 Value *ArgVal = CB.getArgOperand(idx); 2692 if (!ArgVal) 2693 continue; 2694 // Here, we handle three cases. 2695 // (1) Not having a value means it is dead. (we can replace the value 2696 // with undef) 2697 // (2) Simplified to undef. The argument violate noundef attriubte. 2698 // (3) Simplified to null pointer where known to be nonnull. 2699 // The argument is a poison value and violate noundef attribute. 2700 IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx); 2701 auto &NoUndefAA = 2702 A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2703 if (!NoUndefAA.isKnownNoUndef()) 2704 continue; 2705 bool UsedAssumedInformation = false; 2706 Optional<Value *> SimplifiedVal = A.getAssumedSimplified( 2707 IRPosition::value(*ArgVal), *this, UsedAssumedInformation); 2708 if (UsedAssumedInformation) 2709 continue; 2710 if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue()) 2711 return true; 2712 if (!SimplifiedVal.hasValue() || 2713 isa<UndefValue>(*SimplifiedVal.getValue())) { 2714 KnownUBInsts.insert(&I); 2715 continue; 2716 } 2717 if (!ArgVal->getType()->isPointerTy() || 2718 !isa<ConstantPointerNull>(*SimplifiedVal.getValue())) 2719 continue; 2720 auto &NonNullAA = 2721 A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2722 if (NonNullAA.isKnownNonNull()) 2723 KnownUBInsts.insert(&I); 2724 } 2725 return true; 2726 }; 2727 2728 auto InspectReturnInstForUB = [&](Instruction &I) { 2729 auto &RI = cast<ReturnInst>(I); 2730 // Either we stopped and the appropriate action was taken, 2731 // or we got back a simplified return value to continue. 2732 Optional<Value *> SimplifiedRetValue = 2733 stopOnUndefOrAssumed(A, RI.getReturnValue(), &I); 2734 if (!SimplifiedRetValue.hasValue() || !SimplifiedRetValue.getValue()) 2735 return true; 2736 2737 // Check if a return instruction always cause UB or not 2738 // Note: It is guaranteed that the returned position of the anchor 2739 // scope has noundef attribute when this is called. 2740 // We also ensure the return position is not "assumed dead" 2741 // because the returned value was then potentially simplified to 2742 // `undef` in AAReturnedValues without removing the `noundef` 2743 // attribute yet. 2744 2745 // When the returned position has noundef attriubte, UB occurs in the 2746 // following cases. 2747 // (1) Returned value is known to be undef. 2748 // (2) The value is known to be a null pointer and the returned 2749 // position has nonnull attribute (because the returned value is 2750 // poison). 2751 if (isa<ConstantPointerNull>(*SimplifiedRetValue)) { 2752 auto &NonNullAA = A.getAAFor<AANonNull>( 2753 *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE); 2754 if (NonNullAA.isKnownNonNull()) 2755 KnownUBInsts.insert(&I); 2756 } 2757 2758 return true; 2759 }; 2760 2761 bool UsedAssumedInformation = false; 2762 A.checkForAllInstructions(InspectMemAccessInstForUB, *this, 2763 {Instruction::Load, Instruction::Store, 2764 Instruction::AtomicCmpXchg, 2765 Instruction::AtomicRMW}, 2766 UsedAssumedInformation, 2767 /* CheckBBLivenessOnly */ true); 2768 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, 2769 UsedAssumedInformation, 2770 /* CheckBBLivenessOnly */ true); 2771 A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this, 2772 UsedAssumedInformation); 2773 2774 // If the returned position of the anchor scope has noundef attriubte, check 2775 // all returned instructions. 2776 if (!getAnchorScope()->getReturnType()->isVoidTy()) { 2777 const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope()); 2778 if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) { 2779 auto &RetPosNoUndefAA = 2780 A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE); 2781 if (RetPosNoUndefAA.isKnownNoUndef()) 2782 A.checkForAllInstructions(InspectReturnInstForUB, *this, 2783 {Instruction::Ret}, UsedAssumedInformation, 2784 /* CheckBBLivenessOnly */ true); 2785 } 2786 } 2787 2788 if (NoUBPrevSize != AssumedNoUBInsts.size() || 2789 UBPrevSize != KnownUBInsts.size()) 2790 return ChangeStatus::CHANGED; 2791 return ChangeStatus::UNCHANGED; 2792 } 2793 2794 bool isKnownToCauseUB(Instruction *I) const override { 2795 return KnownUBInsts.count(I); 2796 } 2797 2798 bool isAssumedToCauseUB(Instruction *I) const override { 2799 // In simple words, if an instruction is not in the assumed to _not_ 2800 // cause UB, then it is assumed UB (that includes those 2801 // in the KnownUBInsts set). The rest is boilerplate 2802 // is to ensure that it is one of the instructions we test 2803 // for UB. 2804 2805 switch (I->getOpcode()) { 2806 case Instruction::Load: 2807 case Instruction::Store: 2808 case Instruction::AtomicCmpXchg: 2809 case Instruction::AtomicRMW: 2810 return !AssumedNoUBInsts.count(I); 2811 case Instruction::Br: { 2812 auto *BrInst = cast<BranchInst>(I); 2813 if (BrInst->isUnconditional()) 2814 return false; 2815 return !AssumedNoUBInsts.count(I); 2816 } break; 2817 default: 2818 return false; 2819 } 2820 return false; 2821 } 2822 2823 ChangeStatus manifest(Attributor &A) override { 2824 if (KnownUBInsts.empty()) 2825 return ChangeStatus::UNCHANGED; 2826 for (Instruction *I : KnownUBInsts) 2827 A.changeToUnreachableAfterManifest(I); 2828 return ChangeStatus::CHANGED; 2829 } 2830 2831 /// See AbstractAttribute::getAsStr() 2832 const std::string getAsStr() const override { 2833 return getAssumed() ? "undefined-behavior" : "no-ub"; 2834 } 2835 2836 /// Note: The correctness of this analysis depends on the fact that the 2837 /// following 2 sets will stop changing after some point. 2838 /// "Change" here means that their size changes. 2839 /// The size of each set is monotonically increasing 2840 /// (we only add items to them) and it is upper bounded by the number of 2841 /// instructions in the processed function (we can never save more 2842 /// elements in either set than this number). Hence, at some point, 2843 /// they will stop increasing. 2844 /// Consequently, at some point, both sets will have stopped 2845 /// changing, effectively making the analysis reach a fixpoint. 2846 2847 /// Note: These 2 sets are disjoint and an instruction can be considered 2848 /// one of 3 things: 2849 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in 2850 /// the KnownUBInsts set. 2851 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior 2852 /// has a reason to assume it). 2853 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior 2854 /// could not find a reason to assume or prove that it can cause UB, 2855 /// hence it assumes it doesn't. We have a set for these instructions 2856 /// so that we don't reprocess them in every update. 2857 /// Note however that instructions in this set may cause UB. 2858 2859 protected: 2860 /// A set of all live instructions _known_ to cause UB. 2861 SmallPtrSet<Instruction *, 8> KnownUBInsts; 2862 2863 private: 2864 /// A set of all the (live) instructions that are assumed to _not_ cause UB. 2865 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; 2866 2867 // Should be called on updates in which if we're processing an instruction 2868 // \p I that depends on a value \p V, one of the following has to happen: 2869 // - If the value is assumed, then stop. 2870 // - If the value is known but undef, then consider it UB. 2871 // - Otherwise, do specific processing with the simplified value. 2872 // We return None in the first 2 cases to signify that an appropriate 2873 // action was taken and the caller should stop. 2874 // Otherwise, we return the simplified value that the caller should 2875 // use for specific processing. 2876 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V, 2877 Instruction *I) { 2878 bool UsedAssumedInformation = false; 2879 Optional<Value *> SimplifiedV = A.getAssumedSimplified( 2880 IRPosition::value(*V), *this, UsedAssumedInformation); 2881 if (!UsedAssumedInformation) { 2882 // Don't depend on assumed values. 2883 if (!SimplifiedV.hasValue()) { 2884 // If it is known (which we tested above) but it doesn't have a value, 2885 // then we can assume `undef` and hence the instruction is UB. 2886 KnownUBInsts.insert(I); 2887 return llvm::None; 2888 } 2889 if (!SimplifiedV.getValue()) 2890 return nullptr; 2891 V = *SimplifiedV; 2892 } 2893 if (isa<UndefValue>(V)) { 2894 KnownUBInsts.insert(I); 2895 return llvm::None; 2896 } 2897 return V; 2898 } 2899 }; 2900 2901 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { 2902 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) 2903 : AAUndefinedBehaviorImpl(IRP, A) {} 2904 2905 /// See AbstractAttribute::trackStatistics() 2906 void trackStatistics() const override { 2907 STATS_DECL(UndefinedBehaviorInstruction, Instruction, 2908 "Number of instructions known to have UB"); 2909 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += 2910 KnownUBInsts.size(); 2911 } 2912 }; 2913 } // namespace 2914 2915 /// ------------------------ Will-Return Attributes ---------------------------- 2916 2917 namespace { 2918 // Helper function that checks whether a function has any cycle which we don't 2919 // know if it is bounded or not. 2920 // Loops with maximum trip count are considered bounded, any other cycle not. 2921 static bool mayContainUnboundedCycle(Function &F, Attributor &A) { 2922 ScalarEvolution *SE = 2923 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); 2924 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); 2925 // If either SCEV or LoopInfo is not available for the function then we assume 2926 // any cycle to be unbounded cycle. 2927 // We use scc_iterator which uses Tarjan algorithm to find all the maximal 2928 // SCCs.To detect if there's a cycle, we only need to find the maximal ones. 2929 if (!SE || !LI) { 2930 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) 2931 if (SCCI.hasCycle()) 2932 return true; 2933 return false; 2934 } 2935 2936 // If there's irreducible control, the function may contain non-loop cycles. 2937 if (mayContainIrreducibleControl(F, LI)) 2938 return true; 2939 2940 // Any loop that does not have a max trip count is considered unbounded cycle. 2941 for (auto *L : LI->getLoopsInPreorder()) { 2942 if (!SE->getSmallConstantMaxTripCount(L)) 2943 return true; 2944 } 2945 return false; 2946 } 2947 2948 struct AAWillReturnImpl : public AAWillReturn { 2949 AAWillReturnImpl(const IRPosition &IRP, Attributor &A) 2950 : AAWillReturn(IRP, A) {} 2951 2952 /// See AbstractAttribute::initialize(...). 2953 void initialize(Attributor &A) override { 2954 AAWillReturn::initialize(A); 2955 2956 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) { 2957 indicateOptimisticFixpoint(); 2958 return; 2959 } 2960 } 2961 2962 /// Check for `mustprogress` and `readonly` as they imply `willreturn`. 2963 bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) { 2964 // Check for `mustprogress` in the scope and the associated function which 2965 // might be different if this is a call site. 2966 if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) && 2967 (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress())) 2968 return false; 2969 2970 bool IsKnown; 2971 if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 2972 return IsKnown || !KnownOnly; 2973 return false; 2974 } 2975 2976 /// See AbstractAttribute::updateImpl(...). 2977 ChangeStatus updateImpl(Attributor &A) override { 2978 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 2979 return ChangeStatus::UNCHANGED; 2980 2981 auto CheckForWillReturn = [&](Instruction &I) { 2982 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); 2983 const auto &WillReturnAA = 2984 A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED); 2985 if (WillReturnAA.isKnownWillReturn()) 2986 return true; 2987 if (!WillReturnAA.isAssumedWillReturn()) 2988 return false; 2989 const auto &NoRecurseAA = 2990 A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED); 2991 return NoRecurseAA.isAssumedNoRecurse(); 2992 }; 2993 2994 bool UsedAssumedInformation = false; 2995 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this, 2996 UsedAssumedInformation)) 2997 return indicatePessimisticFixpoint(); 2998 2999 return ChangeStatus::UNCHANGED; 3000 } 3001 3002 /// See AbstractAttribute::getAsStr() 3003 const std::string getAsStr() const override { 3004 return getAssumed() ? "willreturn" : "may-noreturn"; 3005 } 3006 }; 3007 3008 struct AAWillReturnFunction final : AAWillReturnImpl { 3009 AAWillReturnFunction(const IRPosition &IRP, Attributor &A) 3010 : AAWillReturnImpl(IRP, A) {} 3011 3012 /// See AbstractAttribute::initialize(...). 3013 void initialize(Attributor &A) override { 3014 AAWillReturnImpl::initialize(A); 3015 3016 Function *F = getAnchorScope(); 3017 if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A)) 3018 indicatePessimisticFixpoint(); 3019 } 3020 3021 /// See AbstractAttribute::trackStatistics() 3022 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } 3023 }; 3024 3025 /// WillReturn attribute deduction for a call sites. 3026 struct AAWillReturnCallSite final : AAWillReturnImpl { 3027 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) 3028 : AAWillReturnImpl(IRP, A) {} 3029 3030 /// See AbstractAttribute::initialize(...). 3031 void initialize(Attributor &A) override { 3032 AAWillReturnImpl::initialize(A); 3033 Function *F = getAssociatedFunction(); 3034 if (!F || !A.isFunctionIPOAmendable(*F)) 3035 indicatePessimisticFixpoint(); 3036 } 3037 3038 /// See AbstractAttribute::updateImpl(...). 3039 ChangeStatus updateImpl(Attributor &A) override { 3040 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 3041 return ChangeStatus::UNCHANGED; 3042 3043 // TODO: Once we have call site specific value information we can provide 3044 // call site specific liveness information and then it makes 3045 // sense to specialize attributes for call sites arguments instead of 3046 // redirecting requests to the callee argument. 3047 Function *F = getAssociatedFunction(); 3048 const IRPosition &FnPos = IRPosition::function(*F); 3049 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED); 3050 return clampStateAndIndicateChange(getState(), FnAA.getState()); 3051 } 3052 3053 /// See AbstractAttribute::trackStatistics() 3054 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } 3055 }; 3056 } // namespace 3057 3058 /// -------------------AAReachability Attribute-------------------------- 3059 3060 namespace { 3061 struct AAReachabilityImpl : AAReachability { 3062 AAReachabilityImpl(const IRPosition &IRP, Attributor &A) 3063 : AAReachability(IRP, A) {} 3064 3065 const std::string getAsStr() const override { 3066 // TODO: Return the number of reachable queries. 3067 return "reachable"; 3068 } 3069 3070 /// See AbstractAttribute::updateImpl(...). 3071 ChangeStatus updateImpl(Attributor &A) override { 3072 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 3073 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 3074 if (!NoRecurseAA.isAssumedNoRecurse()) 3075 return indicatePessimisticFixpoint(); 3076 return ChangeStatus::UNCHANGED; 3077 } 3078 }; 3079 3080 struct AAReachabilityFunction final : public AAReachabilityImpl { 3081 AAReachabilityFunction(const IRPosition &IRP, Attributor &A) 3082 : AAReachabilityImpl(IRP, A) {} 3083 3084 /// See AbstractAttribute::trackStatistics() 3085 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); } 3086 }; 3087 } // namespace 3088 3089 /// ------------------------ NoAlias Argument Attribute ------------------------ 3090 3091 namespace { 3092 struct AANoAliasImpl : AANoAlias { 3093 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { 3094 assert(getAssociatedType()->isPointerTy() && 3095 "Noalias is a pointer attribute"); 3096 } 3097 3098 const std::string getAsStr() const override { 3099 return getAssumed() ? "noalias" : "may-alias"; 3100 } 3101 }; 3102 3103 /// NoAlias attribute for a floating value. 3104 struct AANoAliasFloating final : AANoAliasImpl { 3105 AANoAliasFloating(const IRPosition &IRP, Attributor &A) 3106 : AANoAliasImpl(IRP, A) {} 3107 3108 /// See AbstractAttribute::initialize(...). 3109 void initialize(Attributor &A) override { 3110 AANoAliasImpl::initialize(A); 3111 Value *Val = &getAssociatedValue(); 3112 do { 3113 CastInst *CI = dyn_cast<CastInst>(Val); 3114 if (!CI) 3115 break; 3116 Value *Base = CI->getOperand(0); 3117 if (!Base->hasOneUse()) 3118 break; 3119 Val = Base; 3120 } while (true); 3121 3122 if (!Val->getType()->isPointerTy()) { 3123 indicatePessimisticFixpoint(); 3124 return; 3125 } 3126 3127 if (isa<AllocaInst>(Val)) 3128 indicateOptimisticFixpoint(); 3129 else if (isa<ConstantPointerNull>(Val) && 3130 !NullPointerIsDefined(getAnchorScope(), 3131 Val->getType()->getPointerAddressSpace())) 3132 indicateOptimisticFixpoint(); 3133 else if (Val != &getAssociatedValue()) { 3134 const auto &ValNoAliasAA = A.getAAFor<AANoAlias>( 3135 *this, IRPosition::value(*Val), DepClassTy::OPTIONAL); 3136 if (ValNoAliasAA.isKnownNoAlias()) 3137 indicateOptimisticFixpoint(); 3138 } 3139 } 3140 3141 /// See AbstractAttribute::updateImpl(...). 3142 ChangeStatus updateImpl(Attributor &A) override { 3143 // TODO: Implement this. 3144 return indicatePessimisticFixpoint(); 3145 } 3146 3147 /// See AbstractAttribute::trackStatistics() 3148 void trackStatistics() const override { 3149 STATS_DECLTRACK_FLOATING_ATTR(noalias) 3150 } 3151 }; 3152 3153 /// NoAlias attribute for an argument. 3154 struct AANoAliasArgument final 3155 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { 3156 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; 3157 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 3158 3159 /// See AbstractAttribute::initialize(...). 3160 void initialize(Attributor &A) override { 3161 Base::initialize(A); 3162 // See callsite argument attribute and callee argument attribute. 3163 if (hasAttr({Attribute::ByVal})) 3164 indicateOptimisticFixpoint(); 3165 } 3166 3167 /// See AbstractAttribute::update(...). 3168 ChangeStatus updateImpl(Attributor &A) override { 3169 // We have to make sure no-alias on the argument does not break 3170 // synchronization when this is a callback argument, see also [1] below. 3171 // If synchronization cannot be affected, we delegate to the base updateImpl 3172 // function, otherwise we give up for now. 3173 3174 // If the function is no-sync, no-alias cannot break synchronization. 3175 const auto &NoSyncAA = 3176 A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()), 3177 DepClassTy::OPTIONAL); 3178 if (NoSyncAA.isAssumedNoSync()) 3179 return Base::updateImpl(A); 3180 3181 // If the argument is read-only, no-alias cannot break synchronization. 3182 bool IsKnown; 3183 if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 3184 return Base::updateImpl(A); 3185 3186 // If the argument is never passed through callbacks, no-alias cannot break 3187 // synchronization. 3188 bool UsedAssumedInformation = false; 3189 if (A.checkForAllCallSites( 3190 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, 3191 true, UsedAssumedInformation)) 3192 return Base::updateImpl(A); 3193 3194 // TODO: add no-alias but make sure it doesn't break synchronization by 3195 // introducing fake uses. See: 3196 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, 3197 // International Workshop on OpenMP 2018, 3198 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf 3199 3200 return indicatePessimisticFixpoint(); 3201 } 3202 3203 /// See AbstractAttribute::trackStatistics() 3204 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } 3205 }; 3206 3207 struct AANoAliasCallSiteArgument final : AANoAliasImpl { 3208 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) 3209 : AANoAliasImpl(IRP, A) {} 3210 3211 /// See AbstractAttribute::initialize(...). 3212 void initialize(Attributor &A) override { 3213 // See callsite argument attribute and callee argument attribute. 3214 const auto &CB = cast<CallBase>(getAnchorValue()); 3215 if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias)) 3216 indicateOptimisticFixpoint(); 3217 Value &Val = getAssociatedValue(); 3218 if (isa<ConstantPointerNull>(Val) && 3219 !NullPointerIsDefined(getAnchorScope(), 3220 Val.getType()->getPointerAddressSpace())) 3221 indicateOptimisticFixpoint(); 3222 } 3223 3224 /// Determine if the underlying value may alias with the call site argument 3225 /// \p OtherArgNo of \p ICS (= the underlying call site). 3226 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, 3227 const AAMemoryBehavior &MemBehaviorAA, 3228 const CallBase &CB, unsigned OtherArgNo) { 3229 // We do not need to worry about aliasing with the underlying IRP. 3230 if (this->getCalleeArgNo() == (int)OtherArgNo) 3231 return false; 3232 3233 // If it is not a pointer or pointer vector we do not alias. 3234 const Value *ArgOp = CB.getArgOperand(OtherArgNo); 3235 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 3236 return false; 3237 3238 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 3239 *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE); 3240 3241 // If the argument is readnone, there is no read-write aliasing. 3242 if (CBArgMemBehaviorAA.isAssumedReadNone()) { 3243 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3244 return false; 3245 } 3246 3247 // If the argument is readonly and the underlying value is readonly, there 3248 // is no read-write aliasing. 3249 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); 3250 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { 3251 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3252 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3253 return false; 3254 } 3255 3256 // We have to utilize actual alias analysis queries so we need the object. 3257 if (!AAR) 3258 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); 3259 3260 // Try to rule it out at the call site. 3261 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); 3262 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " 3263 "callsite arguments: " 3264 << getAssociatedValue() << " " << *ArgOp << " => " 3265 << (IsAliasing ? "" : "no-") << "alias \n"); 3266 3267 return IsAliasing; 3268 } 3269 3270 bool 3271 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, 3272 const AAMemoryBehavior &MemBehaviorAA, 3273 const AANoAlias &NoAliasAA) { 3274 // We can deduce "noalias" if the following conditions hold. 3275 // (i) Associated value is assumed to be noalias in the definition. 3276 // (ii) Associated value is assumed to be no-capture in all the uses 3277 // possibly executed before this callsite. 3278 // (iii) There is no other pointer argument which could alias with the 3279 // value. 3280 3281 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); 3282 if (!AssociatedValueIsNoAliasAtDef) { 3283 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() 3284 << " is not no-alias at the definition\n"); 3285 return false; 3286 } 3287 3288 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); 3289 3290 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3291 const Function *ScopeFn = VIRP.getAnchorScope(); 3292 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE); 3293 // Check whether the value is captured in the scope using AANoCapture. 3294 // Look at CFG and check only uses possibly executed before this 3295 // callsite. 3296 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 3297 Instruction *UserI = cast<Instruction>(U.getUser()); 3298 3299 // If UserI is the curr instruction and there is a single potential use of 3300 // the value in UserI we allow the use. 3301 // TODO: We should inspect the operands and allow those that cannot alias 3302 // with the value. 3303 if (UserI == getCtxI() && UserI->getNumOperands() == 1) 3304 return true; 3305 3306 if (ScopeFn) { 3307 const auto &ReachabilityAA = A.getAAFor<AAReachability>( 3308 *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL); 3309 3310 if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI())) 3311 return true; 3312 3313 if (auto *CB = dyn_cast<CallBase>(UserI)) { 3314 if (CB->isArgOperand(&U)) { 3315 3316 unsigned ArgNo = CB->getArgOperandNo(&U); 3317 3318 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 3319 *this, IRPosition::callsite_argument(*CB, ArgNo), 3320 DepClassTy::OPTIONAL); 3321 3322 if (NoCaptureAA.isAssumedNoCapture()) 3323 return true; 3324 } 3325 } 3326 } 3327 3328 // For cases which can potentially have more users 3329 if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) || 3330 isa<SelectInst>(U)) { 3331 Follow = true; 3332 return true; 3333 } 3334 3335 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n"); 3336 return false; 3337 }; 3338 3339 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 3340 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { 3341 LLVM_DEBUG( 3342 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() 3343 << " cannot be noalias as it is potentially captured\n"); 3344 return false; 3345 } 3346 } 3347 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); 3348 3349 // Check there is no other pointer argument which could alias with the 3350 // value passed at this call site. 3351 // TODO: AbstractCallSite 3352 const auto &CB = cast<CallBase>(getAnchorValue()); 3353 for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++) 3354 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) 3355 return false; 3356 3357 return true; 3358 } 3359 3360 /// See AbstractAttribute::updateImpl(...). 3361 ChangeStatus updateImpl(Attributor &A) override { 3362 // If the argument is readnone we are done as there are no accesses via the 3363 // argument. 3364 auto &MemBehaviorAA = 3365 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 3366 if (MemBehaviorAA.isAssumedReadNone()) { 3367 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3368 return ChangeStatus::UNCHANGED; 3369 } 3370 3371 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3372 const auto &NoAliasAA = 3373 A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE); 3374 3375 AAResults *AAR = nullptr; 3376 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, 3377 NoAliasAA)) { 3378 LLVM_DEBUG( 3379 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"); 3380 return ChangeStatus::UNCHANGED; 3381 } 3382 3383 return indicatePessimisticFixpoint(); 3384 } 3385 3386 /// See AbstractAttribute::trackStatistics() 3387 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } 3388 }; 3389 3390 /// NoAlias attribute for function return value. 3391 struct AANoAliasReturned final : AANoAliasImpl { 3392 AANoAliasReturned(const IRPosition &IRP, Attributor &A) 3393 : AANoAliasImpl(IRP, A) {} 3394 3395 /// See AbstractAttribute::initialize(...). 3396 void initialize(Attributor &A) override { 3397 AANoAliasImpl::initialize(A); 3398 Function *F = getAssociatedFunction(); 3399 if (!F || F->isDeclaration()) 3400 indicatePessimisticFixpoint(); 3401 } 3402 3403 /// See AbstractAttribute::updateImpl(...). 3404 virtual ChangeStatus updateImpl(Attributor &A) override { 3405 3406 auto CheckReturnValue = [&](Value &RV) -> bool { 3407 if (Constant *C = dyn_cast<Constant>(&RV)) 3408 if (C->isNullValue() || isa<UndefValue>(C)) 3409 return true; 3410 3411 /// For now, we can only deduce noalias if we have call sites. 3412 /// FIXME: add more support. 3413 if (!isa<CallBase>(&RV)) 3414 return false; 3415 3416 const IRPosition &RVPos = IRPosition::value(RV); 3417 const auto &NoAliasAA = 3418 A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED); 3419 if (!NoAliasAA.isAssumedNoAlias()) 3420 return false; 3421 3422 const auto &NoCaptureAA = 3423 A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED); 3424 return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); 3425 }; 3426 3427 if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) 3428 return indicatePessimisticFixpoint(); 3429 3430 return ChangeStatus::UNCHANGED; 3431 } 3432 3433 /// See AbstractAttribute::trackStatistics() 3434 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } 3435 }; 3436 3437 /// NoAlias attribute deduction for a call site return value. 3438 struct AANoAliasCallSiteReturned final : AANoAliasImpl { 3439 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) 3440 : AANoAliasImpl(IRP, A) {} 3441 3442 /// See AbstractAttribute::initialize(...). 3443 void initialize(Attributor &A) override { 3444 AANoAliasImpl::initialize(A); 3445 Function *F = getAssociatedFunction(); 3446 if (!F || F->isDeclaration()) 3447 indicatePessimisticFixpoint(); 3448 } 3449 3450 /// See AbstractAttribute::updateImpl(...). 3451 ChangeStatus updateImpl(Attributor &A) override { 3452 // TODO: Once we have call site specific value information we can provide 3453 // call site specific liveness information and then it makes 3454 // sense to specialize attributes for call sites arguments instead of 3455 // redirecting requests to the callee argument. 3456 Function *F = getAssociatedFunction(); 3457 const IRPosition &FnPos = IRPosition::returned(*F); 3458 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED); 3459 return clampStateAndIndicateChange(getState(), FnAA.getState()); 3460 } 3461 3462 /// See AbstractAttribute::trackStatistics() 3463 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } 3464 }; 3465 } // namespace 3466 3467 /// -------------------AAIsDead Function Attribute----------------------- 3468 3469 namespace { 3470 struct AAIsDeadValueImpl : public AAIsDead { 3471 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3472 3473 /// See AbstractAttribute::initialize(...). 3474 void initialize(Attributor &A) override { 3475 if (auto *Scope = getAnchorScope()) 3476 if (!A.isRunOn(*Scope)) 3477 indicatePessimisticFixpoint(); 3478 } 3479 3480 /// See AAIsDead::isAssumedDead(). 3481 bool isAssumedDead() const override { return isAssumed(IS_DEAD); } 3482 3483 /// See AAIsDead::isKnownDead(). 3484 bool isKnownDead() const override { return isKnown(IS_DEAD); } 3485 3486 /// See AAIsDead::isAssumedDead(BasicBlock *). 3487 bool isAssumedDead(const BasicBlock *BB) const override { return false; } 3488 3489 /// See AAIsDead::isKnownDead(BasicBlock *). 3490 bool isKnownDead(const BasicBlock *BB) const override { return false; } 3491 3492 /// See AAIsDead::isAssumedDead(Instruction *I). 3493 bool isAssumedDead(const Instruction *I) const override { 3494 return I == getCtxI() && isAssumedDead(); 3495 } 3496 3497 /// See AAIsDead::isKnownDead(Instruction *I). 3498 bool isKnownDead(const Instruction *I) const override { 3499 return isAssumedDead(I) && isKnownDead(); 3500 } 3501 3502 /// See AbstractAttribute::getAsStr(). 3503 virtual const std::string getAsStr() const override { 3504 return isAssumedDead() ? "assumed-dead" : "assumed-live"; 3505 } 3506 3507 /// Check if all uses are assumed dead. 3508 bool areAllUsesAssumedDead(Attributor &A, Value &V) { 3509 // Callers might not check the type, void has no uses. 3510 if (V.getType()->isVoidTy() || V.use_empty()) 3511 return true; 3512 3513 // If we replace a value with a constant there are no uses left afterwards. 3514 if (!isa<Constant>(V)) { 3515 if (auto *I = dyn_cast<Instruction>(&V)) 3516 if (!A.isRunOn(*I->getFunction())) 3517 return false; 3518 bool UsedAssumedInformation = false; 3519 Optional<Constant *> C = 3520 A.getAssumedConstant(V, *this, UsedAssumedInformation); 3521 if (!C.hasValue() || *C) 3522 return true; 3523 } 3524 3525 auto UsePred = [&](const Use &U, bool &Follow) { return false; }; 3526 // Explicitly set the dependence class to required because we want a long 3527 // chain of N dependent instructions to be considered live as soon as one is 3528 // without going through N update cycles. This is not required for 3529 // correctness. 3530 return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false, 3531 DepClassTy::REQUIRED); 3532 } 3533 3534 /// Determine if \p I is assumed to be side-effect free. 3535 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { 3536 if (!I || wouldInstructionBeTriviallyDead(I)) 3537 return true; 3538 3539 auto *CB = dyn_cast<CallBase>(I); 3540 if (!CB || isa<IntrinsicInst>(CB)) 3541 return false; 3542 3543 const IRPosition &CallIRP = IRPosition::callsite_function(*CB); 3544 const auto &NoUnwindAA = 3545 A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE); 3546 if (!NoUnwindAA.isAssumedNoUnwind()) 3547 return false; 3548 if (!NoUnwindAA.isKnownNoUnwind()) 3549 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); 3550 3551 bool IsKnown; 3552 return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown); 3553 } 3554 }; 3555 3556 struct AAIsDeadFloating : public AAIsDeadValueImpl { 3557 AAIsDeadFloating(const IRPosition &IRP, Attributor &A) 3558 : AAIsDeadValueImpl(IRP, A) {} 3559 3560 /// See AbstractAttribute::initialize(...). 3561 void initialize(Attributor &A) override { 3562 AAIsDeadValueImpl::initialize(A); 3563 3564 if (isa<UndefValue>(getAssociatedValue())) { 3565 indicatePessimisticFixpoint(); 3566 return; 3567 } 3568 3569 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3570 if (!isAssumedSideEffectFree(A, I)) { 3571 if (!isa_and_nonnull<StoreInst>(I)) 3572 indicatePessimisticFixpoint(); 3573 else 3574 removeAssumedBits(HAS_NO_EFFECT); 3575 } 3576 } 3577 3578 bool isDeadStore(Attributor &A, StoreInst &SI) { 3579 // Lang ref now states volatile store is not UB/dead, let's skip them. 3580 if (SI.isVolatile()) 3581 return false; 3582 3583 bool UsedAssumedInformation = false; 3584 SmallSetVector<Value *, 4> PotentialCopies; 3585 if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this, 3586 UsedAssumedInformation)) 3587 return false; 3588 return llvm::all_of(PotentialCopies, [&](Value *V) { 3589 return A.isAssumedDead(IRPosition::value(*V), this, nullptr, 3590 UsedAssumedInformation); 3591 }); 3592 } 3593 3594 /// See AbstractAttribute::getAsStr(). 3595 const std::string getAsStr() const override { 3596 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3597 if (isa_and_nonnull<StoreInst>(I)) 3598 if (isValidState()) 3599 return "assumed-dead-store"; 3600 return AAIsDeadValueImpl::getAsStr(); 3601 } 3602 3603 /// See AbstractAttribute::updateImpl(...). 3604 ChangeStatus updateImpl(Attributor &A) override { 3605 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3606 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { 3607 if (!isDeadStore(A, *SI)) 3608 return indicatePessimisticFixpoint(); 3609 } else { 3610 if (!isAssumedSideEffectFree(A, I)) 3611 return indicatePessimisticFixpoint(); 3612 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3613 return indicatePessimisticFixpoint(); 3614 } 3615 return ChangeStatus::UNCHANGED; 3616 } 3617 3618 /// See AbstractAttribute::manifest(...). 3619 ChangeStatus manifest(Attributor &A) override { 3620 Value &V = getAssociatedValue(); 3621 if (auto *I = dyn_cast<Instruction>(&V)) { 3622 // If we get here we basically know the users are all dead. We check if 3623 // isAssumedSideEffectFree returns true here again because it might not be 3624 // the case and only the users are dead but the instruction (=call) is 3625 // still needed. 3626 if (isa<StoreInst>(I) || 3627 (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) { 3628 A.deleteAfterManifest(*I); 3629 return ChangeStatus::CHANGED; 3630 } 3631 } 3632 return ChangeStatus::UNCHANGED; 3633 } 3634 3635 /// See AbstractAttribute::trackStatistics() 3636 void trackStatistics() const override { 3637 STATS_DECLTRACK_FLOATING_ATTR(IsDead) 3638 } 3639 }; 3640 3641 struct AAIsDeadArgument : public AAIsDeadFloating { 3642 AAIsDeadArgument(const IRPosition &IRP, Attributor &A) 3643 : AAIsDeadFloating(IRP, A) {} 3644 3645 /// See AbstractAttribute::initialize(...). 3646 void initialize(Attributor &A) override { 3647 AAIsDeadFloating::initialize(A); 3648 if (!A.isFunctionIPOAmendable(*getAnchorScope())) 3649 indicatePessimisticFixpoint(); 3650 } 3651 3652 /// See AbstractAttribute::manifest(...). 3653 ChangeStatus manifest(Attributor &A) override { 3654 Argument &Arg = *getAssociatedArgument(); 3655 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) 3656 if (A.registerFunctionSignatureRewrite( 3657 Arg, /* ReplacementTypes */ {}, 3658 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, 3659 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { 3660 return ChangeStatus::CHANGED; 3661 } 3662 return ChangeStatus::UNCHANGED; 3663 } 3664 3665 /// See AbstractAttribute::trackStatistics() 3666 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) } 3667 }; 3668 3669 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { 3670 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) 3671 : AAIsDeadValueImpl(IRP, A) {} 3672 3673 /// See AbstractAttribute::initialize(...). 3674 void initialize(Attributor &A) override { 3675 AAIsDeadValueImpl::initialize(A); 3676 if (isa<UndefValue>(getAssociatedValue())) 3677 indicatePessimisticFixpoint(); 3678 } 3679 3680 /// See AbstractAttribute::updateImpl(...). 3681 ChangeStatus updateImpl(Attributor &A) override { 3682 // TODO: Once we have call site specific value information we can provide 3683 // call site specific liveness information and then it makes 3684 // sense to specialize attributes for call sites arguments instead of 3685 // redirecting requests to the callee argument. 3686 Argument *Arg = getAssociatedArgument(); 3687 if (!Arg) 3688 return indicatePessimisticFixpoint(); 3689 const IRPosition &ArgPos = IRPosition::argument(*Arg); 3690 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED); 3691 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 3692 } 3693 3694 /// See AbstractAttribute::manifest(...). 3695 ChangeStatus manifest(Attributor &A) override { 3696 CallBase &CB = cast<CallBase>(getAnchorValue()); 3697 Use &U = CB.getArgOperandUse(getCallSiteArgNo()); 3698 assert(!isa<UndefValue>(U.get()) && 3699 "Expected undef values to be filtered out!"); 3700 UndefValue &UV = *UndefValue::get(U->getType()); 3701 if (A.changeUseAfterManifest(U, UV)) 3702 return ChangeStatus::CHANGED; 3703 return ChangeStatus::UNCHANGED; 3704 } 3705 3706 /// See AbstractAttribute::trackStatistics() 3707 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) } 3708 }; 3709 3710 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { 3711 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) 3712 : AAIsDeadFloating(IRP, A) {} 3713 3714 /// See AAIsDead::isAssumedDead(). 3715 bool isAssumedDead() const override { 3716 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; 3717 } 3718 3719 /// See AbstractAttribute::initialize(...). 3720 void initialize(Attributor &A) override { 3721 AAIsDeadFloating::initialize(A); 3722 if (isa<UndefValue>(getAssociatedValue())) { 3723 indicatePessimisticFixpoint(); 3724 return; 3725 } 3726 3727 // We track this separately as a secondary state. 3728 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); 3729 } 3730 3731 /// See AbstractAttribute::updateImpl(...). 3732 ChangeStatus updateImpl(Attributor &A) override { 3733 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3734 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { 3735 IsAssumedSideEffectFree = false; 3736 Changed = ChangeStatus::CHANGED; 3737 } 3738 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3739 return indicatePessimisticFixpoint(); 3740 return Changed; 3741 } 3742 3743 /// See AbstractAttribute::trackStatistics() 3744 void trackStatistics() const override { 3745 if (IsAssumedSideEffectFree) 3746 STATS_DECLTRACK_CSRET_ATTR(IsDead) 3747 else 3748 STATS_DECLTRACK_CSRET_ATTR(UnusedResult) 3749 } 3750 3751 /// See AbstractAttribute::getAsStr(). 3752 const std::string getAsStr() const override { 3753 return isAssumedDead() 3754 ? "assumed-dead" 3755 : (getAssumed() ? "assumed-dead-users" : "assumed-live"); 3756 } 3757 3758 private: 3759 bool IsAssumedSideEffectFree = true; 3760 }; 3761 3762 struct AAIsDeadReturned : public AAIsDeadValueImpl { 3763 AAIsDeadReturned(const IRPosition &IRP, Attributor &A) 3764 : AAIsDeadValueImpl(IRP, A) {} 3765 3766 /// See AbstractAttribute::updateImpl(...). 3767 ChangeStatus updateImpl(Attributor &A) override { 3768 3769 bool UsedAssumedInformation = false; 3770 A.checkForAllInstructions([](Instruction &) { return true; }, *this, 3771 {Instruction::Ret}, UsedAssumedInformation); 3772 3773 auto PredForCallSite = [&](AbstractCallSite ACS) { 3774 if (ACS.isCallbackCall() || !ACS.getInstruction()) 3775 return false; 3776 return areAllUsesAssumedDead(A, *ACS.getInstruction()); 3777 }; 3778 3779 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 3780 UsedAssumedInformation)) 3781 return indicatePessimisticFixpoint(); 3782 3783 return ChangeStatus::UNCHANGED; 3784 } 3785 3786 /// See AbstractAttribute::manifest(...). 3787 ChangeStatus manifest(Attributor &A) override { 3788 // TODO: Rewrite the signature to return void? 3789 bool AnyChange = false; 3790 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); 3791 auto RetInstPred = [&](Instruction &I) { 3792 ReturnInst &RI = cast<ReturnInst>(I); 3793 if (!isa<UndefValue>(RI.getReturnValue())) 3794 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); 3795 return true; 3796 }; 3797 bool UsedAssumedInformation = false; 3798 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}, 3799 UsedAssumedInformation); 3800 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3801 } 3802 3803 /// See AbstractAttribute::trackStatistics() 3804 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) } 3805 }; 3806 3807 struct AAIsDeadFunction : public AAIsDead { 3808 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3809 3810 /// See AbstractAttribute::initialize(...). 3811 void initialize(Attributor &A) override { 3812 Function *F = getAnchorScope(); 3813 if (!F || F->isDeclaration() || !A.isRunOn(*F)) { 3814 indicatePessimisticFixpoint(); 3815 return; 3816 } 3817 ToBeExploredFrom.insert(&F->getEntryBlock().front()); 3818 assumeLive(A, F->getEntryBlock()); 3819 } 3820 3821 /// See AbstractAttribute::getAsStr(). 3822 const std::string getAsStr() const override { 3823 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + 3824 std::to_string(getAnchorScope()->size()) + "][#TBEP " + 3825 std::to_string(ToBeExploredFrom.size()) + "][#KDE " + 3826 std::to_string(KnownDeadEnds.size()) + "]"; 3827 } 3828 3829 /// See AbstractAttribute::manifest(...). 3830 ChangeStatus manifest(Attributor &A) override { 3831 assert(getState().isValidState() && 3832 "Attempted to manifest an invalid state!"); 3833 3834 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 3835 Function &F = *getAnchorScope(); 3836 3837 if (AssumedLiveBlocks.empty()) { 3838 A.deleteAfterManifest(F); 3839 return ChangeStatus::CHANGED; 3840 } 3841 3842 // Flag to determine if we can change an invoke to a call assuming the 3843 // callee is nounwind. This is not possible if the personality of the 3844 // function allows to catch asynchronous exceptions. 3845 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 3846 3847 KnownDeadEnds.set_union(ToBeExploredFrom); 3848 for (const Instruction *DeadEndI : KnownDeadEnds) { 3849 auto *CB = dyn_cast<CallBase>(DeadEndI); 3850 if (!CB) 3851 continue; 3852 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3853 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 3854 bool MayReturn = !NoReturnAA.isAssumedNoReturn(); 3855 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) 3856 continue; 3857 3858 if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) 3859 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); 3860 else 3861 A.changeToUnreachableAfterManifest( 3862 const_cast<Instruction *>(DeadEndI->getNextNode())); 3863 HasChanged = ChangeStatus::CHANGED; 3864 } 3865 3866 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted."); 3867 for (BasicBlock &BB : F) 3868 if (!AssumedLiveBlocks.count(&BB)) { 3869 A.deleteAfterManifest(BB); 3870 ++BUILD_STAT_NAME(AAIsDead, BasicBlock); 3871 HasChanged = ChangeStatus::CHANGED; 3872 } 3873 3874 return HasChanged; 3875 } 3876 3877 /// See AbstractAttribute::updateImpl(...). 3878 ChangeStatus updateImpl(Attributor &A) override; 3879 3880 bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override { 3881 assert(From->getParent() == getAnchorScope() && 3882 To->getParent() == getAnchorScope() && 3883 "Used AAIsDead of the wrong function"); 3884 return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To)); 3885 } 3886 3887 /// See AbstractAttribute::trackStatistics() 3888 void trackStatistics() const override {} 3889 3890 /// Returns true if the function is assumed dead. 3891 bool isAssumedDead() const override { return false; } 3892 3893 /// See AAIsDead::isKnownDead(). 3894 bool isKnownDead() const override { return false; } 3895 3896 /// See AAIsDead::isAssumedDead(BasicBlock *). 3897 bool isAssumedDead(const BasicBlock *BB) const override { 3898 assert(BB->getParent() == getAnchorScope() && 3899 "BB must be in the same anchor scope function."); 3900 3901 if (!getAssumed()) 3902 return false; 3903 return !AssumedLiveBlocks.count(BB); 3904 } 3905 3906 /// See AAIsDead::isKnownDead(BasicBlock *). 3907 bool isKnownDead(const BasicBlock *BB) const override { 3908 return getKnown() && isAssumedDead(BB); 3909 } 3910 3911 /// See AAIsDead::isAssumed(Instruction *I). 3912 bool isAssumedDead(const Instruction *I) const override { 3913 assert(I->getParent()->getParent() == getAnchorScope() && 3914 "Instruction must be in the same anchor scope function."); 3915 3916 if (!getAssumed()) 3917 return false; 3918 3919 // If it is not in AssumedLiveBlocks then it for sure dead. 3920 // Otherwise, it can still be after noreturn call in a live block. 3921 if (!AssumedLiveBlocks.count(I->getParent())) 3922 return true; 3923 3924 // If it is not after a liveness barrier it is live. 3925 const Instruction *PrevI = I->getPrevNode(); 3926 while (PrevI) { 3927 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) 3928 return true; 3929 PrevI = PrevI->getPrevNode(); 3930 } 3931 return false; 3932 } 3933 3934 /// See AAIsDead::isKnownDead(Instruction *I). 3935 bool isKnownDead(const Instruction *I) const override { 3936 return getKnown() && isAssumedDead(I); 3937 } 3938 3939 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A 3940 /// that internal function called from \p BB should now be looked at. 3941 bool assumeLive(Attributor &A, const BasicBlock &BB) { 3942 if (!AssumedLiveBlocks.insert(&BB).second) 3943 return false; 3944 3945 // We assume that all of BB is (probably) live now and if there are calls to 3946 // internal functions we will assume that those are now live as well. This 3947 // is a performance optimization for blocks with calls to a lot of internal 3948 // functions. It can however cause dead functions to be treated as live. 3949 for (const Instruction &I : BB) 3950 if (const auto *CB = dyn_cast<CallBase>(&I)) 3951 if (const Function *F = CB->getCalledFunction()) 3952 if (F->hasLocalLinkage()) 3953 A.markLiveInternalFunction(*F); 3954 return true; 3955 } 3956 3957 /// Collection of instructions that need to be explored again, e.g., we 3958 /// did assume they do not transfer control to (one of their) successors. 3959 SmallSetVector<const Instruction *, 8> ToBeExploredFrom; 3960 3961 /// Collection of instructions that are known to not transfer control. 3962 SmallSetVector<const Instruction *, 8> KnownDeadEnds; 3963 3964 /// Collection of all assumed live edges 3965 DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges; 3966 3967 /// Collection of all assumed live BasicBlocks. 3968 DenseSet<const BasicBlock *> AssumedLiveBlocks; 3969 }; 3970 3971 static bool 3972 identifyAliveSuccessors(Attributor &A, const CallBase &CB, 3973 AbstractAttribute &AA, 3974 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3975 const IRPosition &IPos = IRPosition::callsite_function(CB); 3976 3977 const auto &NoReturnAA = 3978 A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL); 3979 if (NoReturnAA.isAssumedNoReturn()) 3980 return !NoReturnAA.isKnownNoReturn(); 3981 if (CB.isTerminator()) 3982 AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); 3983 else 3984 AliveSuccessors.push_back(CB.getNextNode()); 3985 return false; 3986 } 3987 3988 static bool 3989 identifyAliveSuccessors(Attributor &A, const InvokeInst &II, 3990 AbstractAttribute &AA, 3991 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3992 bool UsedAssumedInformation = 3993 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); 3994 3995 // First, determine if we can change an invoke to a call assuming the 3996 // callee is nounwind. This is not possible if the personality of the 3997 // function allows to catch asynchronous exceptions. 3998 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { 3999 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 4000 } else { 4001 const IRPosition &IPos = IRPosition::callsite_function(II); 4002 const auto &AANoUnw = 4003 A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL); 4004 if (AANoUnw.isAssumedNoUnwind()) { 4005 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); 4006 } else { 4007 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 4008 } 4009 } 4010 return UsedAssumedInformation; 4011 } 4012 4013 static bool 4014 identifyAliveSuccessors(Attributor &A, const BranchInst &BI, 4015 AbstractAttribute &AA, 4016 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 4017 bool UsedAssumedInformation = false; 4018 if (BI.getNumSuccessors() == 1) { 4019 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 4020 } else { 4021 Optional<Constant *> C = 4022 A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation); 4023 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) { 4024 // No value yet, assume both edges are dead. 4025 } else if (isa_and_nonnull<ConstantInt>(*C)) { 4026 const BasicBlock *SuccBB = 4027 BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue()); 4028 AliveSuccessors.push_back(&SuccBB->front()); 4029 } else { 4030 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 4031 AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); 4032 UsedAssumedInformation = false; 4033 } 4034 } 4035 return UsedAssumedInformation; 4036 } 4037 4038 static bool 4039 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, 4040 AbstractAttribute &AA, 4041 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 4042 bool UsedAssumedInformation = false; 4043 Optional<Constant *> C = 4044 A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation); 4045 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) { 4046 // No value yet, assume all edges are dead. 4047 } else if (isa_and_nonnull<ConstantInt>(C.getValue())) { 4048 for (auto &CaseIt : SI.cases()) { 4049 if (CaseIt.getCaseValue() == C.getValue()) { 4050 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); 4051 return UsedAssumedInformation; 4052 } 4053 } 4054 AliveSuccessors.push_back(&SI.getDefaultDest()->front()); 4055 return UsedAssumedInformation; 4056 } else { 4057 for (const BasicBlock *SuccBB : successors(SI.getParent())) 4058 AliveSuccessors.push_back(&SuccBB->front()); 4059 } 4060 return UsedAssumedInformation; 4061 } 4062 4063 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { 4064 ChangeStatus Change = ChangeStatus::UNCHANGED; 4065 4066 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" 4067 << getAnchorScope()->size() << "] BBs and " 4068 << ToBeExploredFrom.size() << " exploration points and " 4069 << KnownDeadEnds.size() << " known dead ends\n"); 4070 4071 // Copy and clear the list of instructions we need to explore from. It is 4072 // refilled with instructions the next update has to look at. 4073 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), 4074 ToBeExploredFrom.end()); 4075 decltype(ToBeExploredFrom) NewToBeExploredFrom; 4076 4077 SmallVector<const Instruction *, 8> AliveSuccessors; 4078 while (!Worklist.empty()) { 4079 const Instruction *I = Worklist.pop_back_val(); 4080 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"); 4081 4082 // Fast forward for uninteresting instructions. We could look for UB here 4083 // though. 4084 while (!I->isTerminator() && !isa<CallBase>(I)) 4085 I = I->getNextNode(); 4086 4087 AliveSuccessors.clear(); 4088 4089 bool UsedAssumedInformation = false; 4090 switch (I->getOpcode()) { 4091 // TODO: look for (assumed) UB to backwards propagate "deadness". 4092 default: 4093 assert(I->isTerminator() && 4094 "Expected non-terminators to be handled already!"); 4095 for (const BasicBlock *SuccBB : successors(I->getParent())) 4096 AliveSuccessors.push_back(&SuccBB->front()); 4097 break; 4098 case Instruction::Call: 4099 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), 4100 *this, AliveSuccessors); 4101 break; 4102 case Instruction::Invoke: 4103 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), 4104 *this, AliveSuccessors); 4105 break; 4106 case Instruction::Br: 4107 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), 4108 *this, AliveSuccessors); 4109 break; 4110 case Instruction::Switch: 4111 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), 4112 *this, AliveSuccessors); 4113 break; 4114 } 4115 4116 if (UsedAssumedInformation) { 4117 NewToBeExploredFrom.insert(I); 4118 } else if (AliveSuccessors.empty() || 4119 (I->isTerminator() && 4120 AliveSuccessors.size() < I->getNumSuccessors())) { 4121 if (KnownDeadEnds.insert(I)) 4122 Change = ChangeStatus::CHANGED; 4123 } 4124 4125 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: " 4126 << AliveSuccessors.size() << " UsedAssumedInformation: " 4127 << UsedAssumedInformation << "\n"); 4128 4129 for (const Instruction *AliveSuccessor : AliveSuccessors) { 4130 if (!I->isTerminator()) { 4131 assert(AliveSuccessors.size() == 1 && 4132 "Non-terminator expected to have a single successor!"); 4133 Worklist.push_back(AliveSuccessor); 4134 } else { 4135 // record the assumed live edge 4136 auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent()); 4137 if (AssumedLiveEdges.insert(Edge).second) 4138 Change = ChangeStatus::CHANGED; 4139 if (assumeLive(A, *AliveSuccessor->getParent())) 4140 Worklist.push_back(AliveSuccessor); 4141 } 4142 } 4143 } 4144 4145 // Check if the content of ToBeExploredFrom changed, ignore the order. 4146 if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() || 4147 llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) { 4148 return !ToBeExploredFrom.count(I); 4149 })) { 4150 Change = ChangeStatus::CHANGED; 4151 ToBeExploredFrom = std::move(NewToBeExploredFrom); 4152 } 4153 4154 // If we know everything is live there is no need to query for liveness. 4155 // Instead, indicating a pessimistic fixpoint will cause the state to be 4156 // "invalid" and all queries to be answered conservatively without lookups. 4157 // To be in this state we have to (1) finished the exploration and (3) not 4158 // discovered any non-trivial dead end and (2) not ruled unreachable code 4159 // dead. 4160 if (ToBeExploredFrom.empty() && 4161 getAnchorScope()->size() == AssumedLiveBlocks.size() && 4162 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { 4163 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; 4164 })) 4165 return indicatePessimisticFixpoint(); 4166 return Change; 4167 } 4168 4169 /// Liveness information for a call sites. 4170 struct AAIsDeadCallSite final : AAIsDeadFunction { 4171 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) 4172 : AAIsDeadFunction(IRP, A) {} 4173 4174 /// See AbstractAttribute::initialize(...). 4175 void initialize(Attributor &A) override { 4176 // TODO: Once we have call site specific value information we can provide 4177 // call site specific liveness information and then it makes 4178 // sense to specialize attributes for call sites instead of 4179 // redirecting requests to the callee. 4180 llvm_unreachable("Abstract attributes for liveness are not " 4181 "supported for call sites yet!"); 4182 } 4183 4184 /// See AbstractAttribute::updateImpl(...). 4185 ChangeStatus updateImpl(Attributor &A) override { 4186 return indicatePessimisticFixpoint(); 4187 } 4188 4189 /// See AbstractAttribute::trackStatistics() 4190 void trackStatistics() const override {} 4191 }; 4192 } // namespace 4193 4194 /// -------------------- Dereferenceable Argument Attribute -------------------- 4195 4196 namespace { 4197 struct AADereferenceableImpl : AADereferenceable { 4198 AADereferenceableImpl(const IRPosition &IRP, Attributor &A) 4199 : AADereferenceable(IRP, A) {} 4200 using StateType = DerefState; 4201 4202 /// See AbstractAttribute::initialize(...). 4203 void initialize(Attributor &A) override { 4204 SmallVector<Attribute, 4> Attrs; 4205 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, 4206 Attrs, /* IgnoreSubsumingPositions */ false, &A); 4207 for (const Attribute &Attr : Attrs) 4208 takeKnownDerefBytesMaximum(Attr.getValueAsInt()); 4209 4210 const IRPosition &IRP = this->getIRPosition(); 4211 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE); 4212 4213 bool CanBeNull, CanBeFreed; 4214 takeKnownDerefBytesMaximum( 4215 IRP.getAssociatedValue().getPointerDereferenceableBytes( 4216 A.getDataLayout(), CanBeNull, CanBeFreed)); 4217 4218 bool IsFnInterface = IRP.isFnInterfaceKind(); 4219 Function *FnScope = IRP.getAnchorScope(); 4220 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { 4221 indicatePessimisticFixpoint(); 4222 return; 4223 } 4224 4225 if (Instruction *CtxI = getCtxI()) 4226 followUsesInMBEC(*this, A, getState(), *CtxI); 4227 } 4228 4229 /// See AbstractAttribute::getState() 4230 /// { 4231 StateType &getState() override { return *this; } 4232 const StateType &getState() const override { return *this; } 4233 /// } 4234 4235 /// Helper function for collecting accessed bytes in must-be-executed-context 4236 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, 4237 DerefState &State) { 4238 const Value *UseV = U->get(); 4239 if (!UseV->getType()->isPointerTy()) 4240 return; 4241 4242 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); 4243 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) 4244 return; 4245 4246 int64_t Offset; 4247 const Value *Base = GetPointerBaseWithConstantOffset( 4248 Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true); 4249 if (Base && Base == &getAssociatedValue()) 4250 State.addAccessedBytes(Offset, Loc->Size.getValue()); 4251 } 4252 4253 /// See followUsesInMBEC 4254 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4255 AADereferenceable::StateType &State) { 4256 bool IsNonNull = false; 4257 bool TrackUse = false; 4258 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( 4259 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); 4260 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes 4261 << " for instruction " << *I << "\n"); 4262 4263 addAccessedBytesForUse(A, U, I, State); 4264 State.takeKnownDerefBytesMaximum(DerefBytes); 4265 return TrackUse; 4266 } 4267 4268 /// See AbstractAttribute::manifest(...). 4269 ChangeStatus manifest(Attributor &A) override { 4270 ChangeStatus Change = AADereferenceable::manifest(A); 4271 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { 4272 removeAttrs({Attribute::DereferenceableOrNull}); 4273 return ChangeStatus::CHANGED; 4274 } 4275 return Change; 4276 } 4277 4278 void getDeducedAttributes(LLVMContext &Ctx, 4279 SmallVectorImpl<Attribute> &Attrs) const override { 4280 // TODO: Add *_globally support 4281 if (isAssumedNonNull()) 4282 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 4283 Ctx, getAssumedDereferenceableBytes())); 4284 else 4285 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 4286 Ctx, getAssumedDereferenceableBytes())); 4287 } 4288 4289 /// See AbstractAttribute::getAsStr(). 4290 const std::string getAsStr() const override { 4291 if (!getAssumedDereferenceableBytes()) 4292 return "unknown-dereferenceable"; 4293 return std::string("dereferenceable") + 4294 (isAssumedNonNull() ? "" : "_or_null") + 4295 (isAssumedGlobal() ? "_globally" : "") + "<" + 4296 std::to_string(getKnownDereferenceableBytes()) + "-" + 4297 std::to_string(getAssumedDereferenceableBytes()) + ">"; 4298 } 4299 }; 4300 4301 /// Dereferenceable attribute for a floating value. 4302 struct AADereferenceableFloating : AADereferenceableImpl { 4303 AADereferenceableFloating(const IRPosition &IRP, Attributor &A) 4304 : AADereferenceableImpl(IRP, A) {} 4305 4306 /// See AbstractAttribute::updateImpl(...). 4307 ChangeStatus updateImpl(Attributor &A) override { 4308 const DataLayout &DL = A.getDataLayout(); 4309 4310 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T, 4311 bool Stripped) -> bool { 4312 unsigned IdxWidth = 4313 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 4314 APInt Offset(IdxWidth, 0); 4315 const Value *Base = stripAndAccumulateOffsets( 4316 A, *this, &V, DL, Offset, /* GetMinOffset */ false, 4317 /* AllowNonInbounds */ true); 4318 4319 const auto &AA = A.getAAFor<AADereferenceable>( 4320 *this, IRPosition::value(*Base), DepClassTy::REQUIRED); 4321 int64_t DerefBytes = 0; 4322 if (!Stripped && this == &AA) { 4323 // Use IR information if we did not strip anything. 4324 // TODO: track globally. 4325 bool CanBeNull, CanBeFreed; 4326 DerefBytes = 4327 Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); 4328 T.GlobalState.indicatePessimisticFixpoint(); 4329 } else { 4330 const DerefState &DS = AA.getState(); 4331 DerefBytes = DS.DerefBytesState.getAssumed(); 4332 T.GlobalState &= DS.GlobalState; 4333 } 4334 4335 // For now we do not try to "increase" dereferenceability due to negative 4336 // indices as we first have to come up with code to deal with loops and 4337 // for overflows of the dereferenceable bytes. 4338 int64_t OffsetSExt = Offset.getSExtValue(); 4339 if (OffsetSExt < 0) 4340 OffsetSExt = 0; 4341 4342 T.takeAssumedDerefBytesMinimum( 4343 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4344 4345 if (this == &AA) { 4346 if (!Stripped) { 4347 // If nothing was stripped IR information is all we got. 4348 T.takeKnownDerefBytesMaximum( 4349 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4350 T.indicatePessimisticFixpoint(); 4351 } else if (OffsetSExt > 0) { 4352 // If something was stripped but there is circular reasoning we look 4353 // for the offset. If it is positive we basically decrease the 4354 // dereferenceable bytes in a circluar loop now, which will simply 4355 // drive them down to the known value in a very slow way which we 4356 // can accelerate. 4357 T.indicatePessimisticFixpoint(); 4358 } 4359 } 4360 4361 return T.isValidState(); 4362 }; 4363 4364 DerefState T; 4365 bool UsedAssumedInformation = false; 4366 if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T, 4367 VisitValueCB, getCtxI(), 4368 UsedAssumedInformation)) 4369 return indicatePessimisticFixpoint(); 4370 4371 return clampStateAndIndicateChange(getState(), T); 4372 } 4373 4374 /// See AbstractAttribute::trackStatistics() 4375 void trackStatistics() const override { 4376 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) 4377 } 4378 }; 4379 4380 /// Dereferenceable attribute for a return value. 4381 struct AADereferenceableReturned final 4382 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { 4383 AADereferenceableReturned(const IRPosition &IRP, Attributor &A) 4384 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( 4385 IRP, A) {} 4386 4387 /// See AbstractAttribute::trackStatistics() 4388 void trackStatistics() const override { 4389 STATS_DECLTRACK_FNRET_ATTR(dereferenceable) 4390 } 4391 }; 4392 4393 /// Dereferenceable attribute for an argument 4394 struct AADereferenceableArgument final 4395 : AAArgumentFromCallSiteArguments<AADereferenceable, 4396 AADereferenceableImpl> { 4397 using Base = 4398 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; 4399 AADereferenceableArgument(const IRPosition &IRP, Attributor &A) 4400 : Base(IRP, A) {} 4401 4402 /// See AbstractAttribute::trackStatistics() 4403 void trackStatistics() const override { 4404 STATS_DECLTRACK_ARG_ATTR(dereferenceable) 4405 } 4406 }; 4407 4408 /// Dereferenceable attribute for a call site argument. 4409 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { 4410 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) 4411 : AADereferenceableFloating(IRP, A) {} 4412 4413 /// See AbstractAttribute::trackStatistics() 4414 void trackStatistics() const override { 4415 STATS_DECLTRACK_CSARG_ATTR(dereferenceable) 4416 } 4417 }; 4418 4419 /// Dereferenceable attribute deduction for a call site return value. 4420 struct AADereferenceableCallSiteReturned final 4421 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { 4422 using Base = 4423 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; 4424 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) 4425 : Base(IRP, A) {} 4426 4427 /// See AbstractAttribute::trackStatistics() 4428 void trackStatistics() const override { 4429 STATS_DECLTRACK_CS_ATTR(dereferenceable); 4430 } 4431 }; 4432 } // namespace 4433 4434 // ------------------------ Align Argument Attribute ------------------------ 4435 4436 namespace { 4437 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA, 4438 Value &AssociatedValue, const Use *U, 4439 const Instruction *I, bool &TrackUse) { 4440 // We need to follow common pointer manipulation uses to the accesses they 4441 // feed into. 4442 if (isa<CastInst>(I)) { 4443 // Follow all but ptr2int casts. 4444 TrackUse = !isa<PtrToIntInst>(I); 4445 return 0; 4446 } 4447 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 4448 if (GEP->hasAllConstantIndices()) 4449 TrackUse = true; 4450 return 0; 4451 } 4452 4453 MaybeAlign MA; 4454 if (const auto *CB = dyn_cast<CallBase>(I)) { 4455 if (CB->isBundleOperand(U) || CB->isCallee(U)) 4456 return 0; 4457 4458 unsigned ArgNo = CB->getArgOperandNo(U); 4459 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 4460 // As long as we only use known information there is no need to track 4461 // dependences here. 4462 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE); 4463 MA = MaybeAlign(AlignAA.getKnownAlign()); 4464 } 4465 4466 const DataLayout &DL = A.getDataLayout(); 4467 const Value *UseV = U->get(); 4468 if (auto *SI = dyn_cast<StoreInst>(I)) { 4469 if (SI->getPointerOperand() == UseV) 4470 MA = SI->getAlign(); 4471 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 4472 if (LI->getPointerOperand() == UseV) 4473 MA = LI->getAlign(); 4474 } 4475 4476 if (!MA || *MA <= QueryingAA.getKnownAlign()) 4477 return 0; 4478 4479 unsigned Alignment = MA->value(); 4480 int64_t Offset; 4481 4482 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { 4483 if (Base == &AssociatedValue) { 4484 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4485 // So we can say that the maximum power of two which is a divisor of 4486 // gcd(Offset, Alignment) is an alignment. 4487 4488 uint32_t gcd = 4489 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment); 4490 Alignment = llvm::PowerOf2Floor(gcd); 4491 } 4492 } 4493 4494 return Alignment; 4495 } 4496 4497 struct AAAlignImpl : AAAlign { 4498 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} 4499 4500 /// See AbstractAttribute::initialize(...). 4501 void initialize(Attributor &A) override { 4502 SmallVector<Attribute, 4> Attrs; 4503 getAttrs({Attribute::Alignment}, Attrs); 4504 for (const Attribute &Attr : Attrs) 4505 takeKnownMaximum(Attr.getValueAsInt()); 4506 4507 Value &V = getAssociatedValue(); 4508 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value()); 4509 4510 if (getIRPosition().isFnInterfaceKind() && 4511 (!getAnchorScope() || 4512 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { 4513 indicatePessimisticFixpoint(); 4514 return; 4515 } 4516 4517 if (Instruction *CtxI = getCtxI()) 4518 followUsesInMBEC(*this, A, getState(), *CtxI); 4519 } 4520 4521 /// See AbstractAttribute::manifest(...). 4522 ChangeStatus manifest(Attributor &A) override { 4523 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; 4524 4525 // Check for users that allow alignment annotations. 4526 Value &AssociatedValue = getAssociatedValue(); 4527 for (const Use &U : AssociatedValue.uses()) { 4528 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { 4529 if (SI->getPointerOperand() == &AssociatedValue) 4530 if (SI->getAlignment() < getAssumedAlign()) { 4531 STATS_DECLTRACK(AAAlign, Store, 4532 "Number of times alignment added to a store"); 4533 SI->setAlignment(Align(getAssumedAlign())); 4534 LoadStoreChanged = ChangeStatus::CHANGED; 4535 } 4536 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { 4537 if (LI->getPointerOperand() == &AssociatedValue) 4538 if (LI->getAlignment() < getAssumedAlign()) { 4539 LI->setAlignment(Align(getAssumedAlign())); 4540 STATS_DECLTRACK(AAAlign, Load, 4541 "Number of times alignment added to a load"); 4542 LoadStoreChanged = ChangeStatus::CHANGED; 4543 } 4544 } 4545 } 4546 4547 ChangeStatus Changed = AAAlign::manifest(A); 4548 4549 Align InheritAlign = 4550 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4551 if (InheritAlign >= getAssumedAlign()) 4552 return LoadStoreChanged; 4553 return Changed | LoadStoreChanged; 4554 } 4555 4556 // TODO: Provide a helper to determine the implied ABI alignment and check in 4557 // the existing manifest method and a new one for AAAlignImpl that value 4558 // to avoid making the alignment explicit if it did not improve. 4559 4560 /// See AbstractAttribute::getDeducedAttributes 4561 virtual void 4562 getDeducedAttributes(LLVMContext &Ctx, 4563 SmallVectorImpl<Attribute> &Attrs) const override { 4564 if (getAssumedAlign() > 1) 4565 Attrs.emplace_back( 4566 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); 4567 } 4568 4569 /// See followUsesInMBEC 4570 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4571 AAAlign::StateType &State) { 4572 bool TrackUse = false; 4573 4574 unsigned int KnownAlign = 4575 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); 4576 State.takeKnownMaximum(KnownAlign); 4577 4578 return TrackUse; 4579 } 4580 4581 /// See AbstractAttribute::getAsStr(). 4582 const std::string getAsStr() const override { 4583 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + 4584 "-" + std::to_string(getAssumedAlign()) + ">") 4585 : "unknown-align"; 4586 } 4587 }; 4588 4589 /// Align attribute for a floating value. 4590 struct AAAlignFloating : AAAlignImpl { 4591 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} 4592 4593 /// See AbstractAttribute::updateImpl(...). 4594 ChangeStatus updateImpl(Attributor &A) override { 4595 const DataLayout &DL = A.getDataLayout(); 4596 4597 auto VisitValueCB = [&](Value &V, const Instruction *, 4598 AAAlign::StateType &T, bool Stripped) -> bool { 4599 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V)) 4600 return true; 4601 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V), 4602 DepClassTy::REQUIRED); 4603 if (!Stripped && this == &AA) { 4604 int64_t Offset; 4605 unsigned Alignment = 1; 4606 if (const Value *Base = 4607 GetPointerBaseWithConstantOffset(&V, Offset, DL)) { 4608 // TODO: Use AAAlign for the base too. 4609 Align PA = Base->getPointerAlignment(DL); 4610 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4611 // So we can say that the maximum power of two which is a divisor of 4612 // gcd(Offset, Alignment) is an alignment. 4613 4614 uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), 4615 uint32_t(PA.value())); 4616 Alignment = llvm::PowerOf2Floor(gcd); 4617 } else { 4618 Alignment = V.getPointerAlignment(DL).value(); 4619 } 4620 // Use only IR information if we did not strip anything. 4621 T.takeKnownMaximum(Alignment); 4622 T.indicatePessimisticFixpoint(); 4623 } else { 4624 // Use abstract attribute information. 4625 const AAAlign::StateType &DS = AA.getState(); 4626 T ^= DS; 4627 } 4628 return T.isValidState(); 4629 }; 4630 4631 StateType T; 4632 bool UsedAssumedInformation = false; 4633 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 4634 VisitValueCB, getCtxI(), 4635 UsedAssumedInformation)) 4636 return indicatePessimisticFixpoint(); 4637 4638 // TODO: If we know we visited all incoming values, thus no are assumed 4639 // dead, we can take the known information from the state T. 4640 return clampStateAndIndicateChange(getState(), T); 4641 } 4642 4643 /// See AbstractAttribute::trackStatistics() 4644 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } 4645 }; 4646 4647 /// Align attribute for function return value. 4648 struct AAAlignReturned final 4649 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { 4650 using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>; 4651 AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4652 4653 /// See AbstractAttribute::initialize(...). 4654 void initialize(Attributor &A) override { 4655 Base::initialize(A); 4656 Function *F = getAssociatedFunction(); 4657 if (!F || F->isDeclaration()) 4658 indicatePessimisticFixpoint(); 4659 } 4660 4661 /// See AbstractAttribute::trackStatistics() 4662 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } 4663 }; 4664 4665 /// Align attribute for function argument. 4666 struct AAAlignArgument final 4667 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { 4668 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; 4669 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4670 4671 /// See AbstractAttribute::manifest(...). 4672 ChangeStatus manifest(Attributor &A) override { 4673 // If the associated argument is involved in a must-tail call we give up 4674 // because we would need to keep the argument alignments of caller and 4675 // callee in-sync. Just does not seem worth the trouble right now. 4676 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) 4677 return ChangeStatus::UNCHANGED; 4678 return Base::manifest(A); 4679 } 4680 4681 /// See AbstractAttribute::trackStatistics() 4682 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } 4683 }; 4684 4685 struct AAAlignCallSiteArgument final : AAAlignFloating { 4686 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) 4687 : AAAlignFloating(IRP, A) {} 4688 4689 /// See AbstractAttribute::manifest(...). 4690 ChangeStatus manifest(Attributor &A) override { 4691 // If the associated argument is involved in a must-tail call we give up 4692 // because we would need to keep the argument alignments of caller and 4693 // callee in-sync. Just does not seem worth the trouble right now. 4694 if (Argument *Arg = getAssociatedArgument()) 4695 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) 4696 return ChangeStatus::UNCHANGED; 4697 ChangeStatus Changed = AAAlignImpl::manifest(A); 4698 Align InheritAlign = 4699 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4700 if (InheritAlign >= getAssumedAlign()) 4701 Changed = ChangeStatus::UNCHANGED; 4702 return Changed; 4703 } 4704 4705 /// See AbstractAttribute::updateImpl(Attributor &A). 4706 ChangeStatus updateImpl(Attributor &A) override { 4707 ChangeStatus Changed = AAAlignFloating::updateImpl(A); 4708 if (Argument *Arg = getAssociatedArgument()) { 4709 // We only take known information from the argument 4710 // so we do not need to track a dependence. 4711 const auto &ArgAlignAA = A.getAAFor<AAAlign>( 4712 *this, IRPosition::argument(*Arg), DepClassTy::NONE); 4713 takeKnownMaximum(ArgAlignAA.getKnownAlign()); 4714 } 4715 return Changed; 4716 } 4717 4718 /// See AbstractAttribute::trackStatistics() 4719 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } 4720 }; 4721 4722 /// Align attribute deduction for a call site return value. 4723 struct AAAlignCallSiteReturned final 4724 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { 4725 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; 4726 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) 4727 : Base(IRP, A) {} 4728 4729 /// See AbstractAttribute::initialize(...). 4730 void initialize(Attributor &A) override { 4731 Base::initialize(A); 4732 Function *F = getAssociatedFunction(); 4733 if (!F || F->isDeclaration()) 4734 indicatePessimisticFixpoint(); 4735 } 4736 4737 /// See AbstractAttribute::trackStatistics() 4738 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } 4739 }; 4740 } // namespace 4741 4742 /// ------------------ Function No-Return Attribute ---------------------------- 4743 namespace { 4744 struct AANoReturnImpl : public AANoReturn { 4745 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} 4746 4747 /// See AbstractAttribute::initialize(...). 4748 void initialize(Attributor &A) override { 4749 AANoReturn::initialize(A); 4750 Function *F = getAssociatedFunction(); 4751 if (!F || F->isDeclaration()) 4752 indicatePessimisticFixpoint(); 4753 } 4754 4755 /// See AbstractAttribute::getAsStr(). 4756 const std::string getAsStr() const override { 4757 return getAssumed() ? "noreturn" : "may-return"; 4758 } 4759 4760 /// See AbstractAttribute::updateImpl(Attributor &A). 4761 virtual ChangeStatus updateImpl(Attributor &A) override { 4762 auto CheckForNoReturn = [](Instruction &) { return false; }; 4763 bool UsedAssumedInformation = false; 4764 if (!A.checkForAllInstructions(CheckForNoReturn, *this, 4765 {(unsigned)Instruction::Ret}, 4766 UsedAssumedInformation)) 4767 return indicatePessimisticFixpoint(); 4768 return ChangeStatus::UNCHANGED; 4769 } 4770 }; 4771 4772 struct AANoReturnFunction final : AANoReturnImpl { 4773 AANoReturnFunction(const IRPosition &IRP, Attributor &A) 4774 : AANoReturnImpl(IRP, A) {} 4775 4776 /// See AbstractAttribute::trackStatistics() 4777 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } 4778 }; 4779 4780 /// NoReturn attribute deduction for a call sites. 4781 struct AANoReturnCallSite final : AANoReturnImpl { 4782 AANoReturnCallSite(const IRPosition &IRP, Attributor &A) 4783 : AANoReturnImpl(IRP, A) {} 4784 4785 /// See AbstractAttribute::initialize(...). 4786 void initialize(Attributor &A) override { 4787 AANoReturnImpl::initialize(A); 4788 if (Function *F = getAssociatedFunction()) { 4789 const IRPosition &FnPos = IRPosition::function(*F); 4790 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4791 if (!FnAA.isAssumedNoReturn()) 4792 indicatePessimisticFixpoint(); 4793 } 4794 } 4795 4796 /// See AbstractAttribute::updateImpl(...). 4797 ChangeStatus updateImpl(Attributor &A) override { 4798 // TODO: Once we have call site specific value information we can provide 4799 // call site specific liveness information and then it makes 4800 // sense to specialize attributes for call sites arguments instead of 4801 // redirecting requests to the callee argument. 4802 Function *F = getAssociatedFunction(); 4803 const IRPosition &FnPos = IRPosition::function(*F); 4804 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4805 return clampStateAndIndicateChange(getState(), FnAA.getState()); 4806 } 4807 4808 /// See AbstractAttribute::trackStatistics() 4809 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } 4810 }; 4811 } // namespace 4812 4813 /// ----------------------- Variable Capturing --------------------------------- 4814 4815 namespace { 4816 /// A class to hold the state of for no-capture attributes. 4817 struct AANoCaptureImpl : public AANoCapture { 4818 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} 4819 4820 /// See AbstractAttribute::initialize(...). 4821 void initialize(Attributor &A) override { 4822 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { 4823 indicateOptimisticFixpoint(); 4824 return; 4825 } 4826 Function *AnchorScope = getAnchorScope(); 4827 if (isFnInterfaceKind() && 4828 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { 4829 indicatePessimisticFixpoint(); 4830 return; 4831 } 4832 4833 // You cannot "capture" null in the default address space. 4834 if (isa<ConstantPointerNull>(getAssociatedValue()) && 4835 getAssociatedValue().getType()->getPointerAddressSpace() == 0) { 4836 indicateOptimisticFixpoint(); 4837 return; 4838 } 4839 4840 const Function *F = 4841 isArgumentPosition() ? getAssociatedFunction() : AnchorScope; 4842 4843 // Check what state the associated function can actually capture. 4844 if (F) 4845 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 4846 else 4847 indicatePessimisticFixpoint(); 4848 } 4849 4850 /// See AbstractAttribute::updateImpl(...). 4851 ChangeStatus updateImpl(Attributor &A) override; 4852 4853 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). 4854 virtual void 4855 getDeducedAttributes(LLVMContext &Ctx, 4856 SmallVectorImpl<Attribute> &Attrs) const override { 4857 if (!isAssumedNoCaptureMaybeReturned()) 4858 return; 4859 4860 if (isArgumentPosition()) { 4861 if (isAssumedNoCapture()) 4862 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); 4863 else if (ManifestInternal) 4864 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); 4865 } 4866 } 4867 4868 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known 4869 /// depending on the ability of the function associated with \p IRP to capture 4870 /// state in memory and through "returning/throwing", respectively. 4871 static void determineFunctionCaptureCapabilities(const IRPosition &IRP, 4872 const Function &F, 4873 BitIntegerState &State) { 4874 // TODO: Once we have memory behavior attributes we should use them here. 4875 4876 // If we know we cannot communicate or write to memory, we do not care about 4877 // ptr2int anymore. 4878 if (F.onlyReadsMemory() && F.doesNotThrow() && 4879 F.getReturnType()->isVoidTy()) { 4880 State.addKnownBits(NO_CAPTURE); 4881 return; 4882 } 4883 4884 // A function cannot capture state in memory if it only reads memory, it can 4885 // however return/throw state and the state might be influenced by the 4886 // pointer value, e.g., loading from a returned pointer might reveal a bit. 4887 if (F.onlyReadsMemory()) 4888 State.addKnownBits(NOT_CAPTURED_IN_MEM); 4889 4890 // A function cannot communicate state back if it does not through 4891 // exceptions and doesn not return values. 4892 if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) 4893 State.addKnownBits(NOT_CAPTURED_IN_RET); 4894 4895 // Check existing "returned" attributes. 4896 int ArgNo = IRP.getCalleeArgNo(); 4897 if (F.doesNotThrow() && ArgNo >= 0) { 4898 for (unsigned u = 0, e = F.arg_size(); u < e; ++u) 4899 if (F.hasParamAttribute(u, Attribute::Returned)) { 4900 if (u == unsigned(ArgNo)) 4901 State.removeAssumedBits(NOT_CAPTURED_IN_RET); 4902 else if (F.onlyReadsMemory()) 4903 State.addKnownBits(NO_CAPTURE); 4904 else 4905 State.addKnownBits(NOT_CAPTURED_IN_RET); 4906 break; 4907 } 4908 } 4909 } 4910 4911 /// See AbstractState::getAsStr(). 4912 const std::string getAsStr() const override { 4913 if (isKnownNoCapture()) 4914 return "known not-captured"; 4915 if (isAssumedNoCapture()) 4916 return "assumed not-captured"; 4917 if (isKnownNoCaptureMaybeReturned()) 4918 return "known not-captured-maybe-returned"; 4919 if (isAssumedNoCaptureMaybeReturned()) 4920 return "assumed not-captured-maybe-returned"; 4921 return "assumed-captured"; 4922 } 4923 4924 /// Check the use \p U and update \p State accordingly. Return true if we 4925 /// should continue to update the state. 4926 bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U, 4927 bool &Follow) { 4928 Instruction *UInst = cast<Instruction>(U.getUser()); 4929 LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in " 4930 << *UInst << "\n"); 4931 4932 // Deal with ptr2int by following uses. 4933 if (isa<PtrToIntInst>(UInst)) { 4934 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n"); 4935 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 4936 /* Return */ true); 4937 } 4938 4939 // For stores we already checked if we can follow them, if they make it 4940 // here we give up. 4941 if (isa<StoreInst>(UInst)) 4942 return isCapturedIn(State, /* Memory */ true, /* Integer */ false, 4943 /* Return */ false); 4944 4945 // Explicitly catch return instructions. 4946 if (isa<ReturnInst>(UInst)) { 4947 if (UInst->getFunction() == getAnchorScope()) 4948 return isCapturedIn(State, /* Memory */ false, /* Integer */ false, 4949 /* Return */ true); 4950 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 4951 /* Return */ true); 4952 } 4953 4954 // For now we only use special logic for call sites. However, the tracker 4955 // itself knows about a lot of other non-capturing cases already. 4956 auto *CB = dyn_cast<CallBase>(UInst); 4957 if (!CB || !CB->isArgOperand(&U)) 4958 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 4959 /* Return */ true); 4960 4961 unsigned ArgNo = CB->getArgOperandNo(&U); 4962 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); 4963 // If we have a abstract no-capture attribute for the argument we can use 4964 // it to justify a non-capture attribute here. This allows recursion! 4965 auto &ArgNoCaptureAA = 4966 A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED); 4967 if (ArgNoCaptureAA.isAssumedNoCapture()) 4968 return isCapturedIn(State, /* Memory */ false, /* Integer */ false, 4969 /* Return */ false); 4970 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 4971 Follow = true; 4972 return isCapturedIn(State, /* Memory */ false, /* Integer */ false, 4973 /* Return */ false); 4974 } 4975 4976 // Lastly, we could not find a reason no-capture can be assumed so we don't. 4977 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 4978 /* Return */ true); 4979 } 4980 4981 /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and 4982 /// \p CapturedInRet, then return true if we should continue updating the 4983 /// state. 4984 static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem, 4985 bool CapturedInInt, bool CapturedInRet) { 4986 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " 4987 << CapturedInInt << "|Ret " << CapturedInRet << "]\n"); 4988 if (CapturedInMem) 4989 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); 4990 if (CapturedInInt) 4991 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); 4992 if (CapturedInRet) 4993 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); 4994 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4995 } 4996 }; 4997 4998 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { 4999 const IRPosition &IRP = getIRPosition(); 5000 Value *V = isArgumentPosition() ? IRP.getAssociatedArgument() 5001 : &IRP.getAssociatedValue(); 5002 if (!V) 5003 return indicatePessimisticFixpoint(); 5004 5005 const Function *F = 5006 isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 5007 assert(F && "Expected a function!"); 5008 const IRPosition &FnPos = IRPosition::function(*F); 5009 5010 AANoCapture::StateType T; 5011 5012 // Readonly means we cannot capture through memory. 5013 bool IsKnown; 5014 if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) { 5015 T.addKnownBits(NOT_CAPTURED_IN_MEM); 5016 if (IsKnown) 5017 addKnownBits(NOT_CAPTURED_IN_MEM); 5018 } 5019 5020 // Make sure all returned values are different than the underlying value. 5021 // TODO: we could do this in a more sophisticated way inside 5022 // AAReturnedValues, e.g., track all values that escape through returns 5023 // directly somehow. 5024 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { 5025 bool SeenConstant = false; 5026 for (auto &It : RVAA.returned_values()) { 5027 if (isa<Constant>(It.first)) { 5028 if (SeenConstant) 5029 return false; 5030 SeenConstant = true; 5031 } else if (!isa<Argument>(It.first) || 5032 It.first == getAssociatedArgument()) 5033 return false; 5034 } 5035 return true; 5036 }; 5037 5038 const auto &NoUnwindAA = 5039 A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL); 5040 if (NoUnwindAA.isAssumedNoUnwind()) { 5041 bool IsVoidTy = F->getReturnType()->isVoidTy(); 5042 const AAReturnedValues *RVAA = 5043 IsVoidTy ? nullptr 5044 : &A.getAAFor<AAReturnedValues>(*this, FnPos, 5045 5046 DepClassTy::OPTIONAL); 5047 if (IsVoidTy || CheckReturnedArgs(*RVAA)) { 5048 T.addKnownBits(NOT_CAPTURED_IN_RET); 5049 if (T.isKnown(NOT_CAPTURED_IN_MEM)) 5050 return ChangeStatus::UNCHANGED; 5051 if (NoUnwindAA.isKnownNoUnwind() && 5052 (IsVoidTy || RVAA->getState().isAtFixpoint())) { 5053 addKnownBits(NOT_CAPTURED_IN_RET); 5054 if (isKnown(NOT_CAPTURED_IN_MEM)) 5055 return indicateOptimisticFixpoint(); 5056 } 5057 } 5058 } 5059 5060 auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) { 5061 const auto &DerefAA = A.getAAFor<AADereferenceable>( 5062 *this, IRPosition::value(*O), DepClassTy::OPTIONAL); 5063 return DerefAA.getAssumedDereferenceableBytes(); 5064 }; 5065 5066 auto UseCheck = [&](const Use &U, bool &Follow) -> bool { 5067 switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) { 5068 case UseCaptureKind::NO_CAPTURE: 5069 return true; 5070 case UseCaptureKind::MAY_CAPTURE: 5071 return checkUse(A, T, U, Follow); 5072 case UseCaptureKind::PASSTHROUGH: 5073 Follow = true; 5074 return true; 5075 } 5076 llvm_unreachable("Unexpected use capture kind!"); 5077 }; 5078 5079 if (!A.checkForAllUses(UseCheck, *this, *V)) 5080 return indicatePessimisticFixpoint(); 5081 5082 AANoCapture::StateType &S = getState(); 5083 auto Assumed = S.getAssumed(); 5084 S.intersectAssumedBits(T.getAssumed()); 5085 if (!isAssumedNoCaptureMaybeReturned()) 5086 return indicatePessimisticFixpoint(); 5087 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 5088 : ChangeStatus::CHANGED; 5089 } 5090 5091 /// NoCapture attribute for function arguments. 5092 struct AANoCaptureArgument final : AANoCaptureImpl { 5093 AANoCaptureArgument(const IRPosition &IRP, Attributor &A) 5094 : AANoCaptureImpl(IRP, A) {} 5095 5096 /// See AbstractAttribute::trackStatistics() 5097 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } 5098 }; 5099 5100 /// NoCapture attribute for call site arguments. 5101 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { 5102 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) 5103 : AANoCaptureImpl(IRP, A) {} 5104 5105 /// See AbstractAttribute::initialize(...). 5106 void initialize(Attributor &A) override { 5107 if (Argument *Arg = getAssociatedArgument()) 5108 if (Arg->hasByValAttr()) 5109 indicateOptimisticFixpoint(); 5110 AANoCaptureImpl::initialize(A); 5111 } 5112 5113 /// See AbstractAttribute::updateImpl(...). 5114 ChangeStatus updateImpl(Attributor &A) override { 5115 // TODO: Once we have call site specific value information we can provide 5116 // call site specific liveness information and then it makes 5117 // sense to specialize attributes for call sites arguments instead of 5118 // redirecting requests to the callee argument. 5119 Argument *Arg = getAssociatedArgument(); 5120 if (!Arg) 5121 return indicatePessimisticFixpoint(); 5122 const IRPosition &ArgPos = IRPosition::argument(*Arg); 5123 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED); 5124 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 5125 } 5126 5127 /// See AbstractAttribute::trackStatistics() 5128 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)}; 5129 }; 5130 5131 /// NoCapture attribute for floating values. 5132 struct AANoCaptureFloating final : AANoCaptureImpl { 5133 AANoCaptureFloating(const IRPosition &IRP, Attributor &A) 5134 : AANoCaptureImpl(IRP, A) {} 5135 5136 /// See AbstractAttribute::trackStatistics() 5137 void trackStatistics() const override { 5138 STATS_DECLTRACK_FLOATING_ATTR(nocapture) 5139 } 5140 }; 5141 5142 /// NoCapture attribute for function return value. 5143 struct AANoCaptureReturned final : AANoCaptureImpl { 5144 AANoCaptureReturned(const IRPosition &IRP, Attributor &A) 5145 : AANoCaptureImpl(IRP, A) { 5146 llvm_unreachable("NoCapture is not applicable to function returns!"); 5147 } 5148 5149 /// See AbstractAttribute::initialize(...). 5150 void initialize(Attributor &A) override { 5151 llvm_unreachable("NoCapture is not applicable to function returns!"); 5152 } 5153 5154 /// See AbstractAttribute::updateImpl(...). 5155 ChangeStatus updateImpl(Attributor &A) override { 5156 llvm_unreachable("NoCapture is not applicable to function returns!"); 5157 } 5158 5159 /// See AbstractAttribute::trackStatistics() 5160 void trackStatistics() const override {} 5161 }; 5162 5163 /// NoCapture attribute deduction for a call site return value. 5164 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { 5165 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) 5166 : AANoCaptureImpl(IRP, A) {} 5167 5168 /// See AbstractAttribute::initialize(...). 5169 void initialize(Attributor &A) override { 5170 const Function *F = getAnchorScope(); 5171 // Check what state the associated function can actually capture. 5172 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 5173 } 5174 5175 /// See AbstractAttribute::trackStatistics() 5176 void trackStatistics() const override { 5177 STATS_DECLTRACK_CSRET_ATTR(nocapture) 5178 } 5179 }; 5180 } // namespace 5181 5182 /// ------------------ Value Simplify Attribute ---------------------------- 5183 5184 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) { 5185 // FIXME: Add a typecast support. 5186 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5187 SimplifiedAssociatedValue, Other, Ty); 5188 if (SimplifiedAssociatedValue == Optional<Value *>(nullptr)) 5189 return false; 5190 5191 LLVM_DEBUG({ 5192 if (SimplifiedAssociatedValue.hasValue()) 5193 dbgs() << "[ValueSimplify] is assumed to be " 5194 << **SimplifiedAssociatedValue << "\n"; 5195 else 5196 dbgs() << "[ValueSimplify] is assumed to be <none>\n"; 5197 }); 5198 return true; 5199 } 5200 5201 namespace { 5202 struct AAValueSimplifyImpl : AAValueSimplify { 5203 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) 5204 : AAValueSimplify(IRP, A) {} 5205 5206 /// See AbstractAttribute::initialize(...). 5207 void initialize(Attributor &A) override { 5208 if (getAssociatedValue().getType()->isVoidTy()) 5209 indicatePessimisticFixpoint(); 5210 if (A.hasSimplificationCallback(getIRPosition())) 5211 indicatePessimisticFixpoint(); 5212 } 5213 5214 /// See AbstractAttribute::getAsStr(). 5215 const std::string getAsStr() const override { 5216 LLVM_DEBUG({ 5217 errs() << "SAV: " << (bool)SimplifiedAssociatedValue << " "; 5218 if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue) 5219 errs() << "SAV: " << **SimplifiedAssociatedValue << " "; 5220 }); 5221 return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple") 5222 : "not-simple"; 5223 } 5224 5225 /// See AbstractAttribute::trackStatistics() 5226 void trackStatistics() const override {} 5227 5228 /// See AAValueSimplify::getAssumedSimplifiedValue() 5229 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5230 return SimplifiedAssociatedValue; 5231 } 5232 5233 /// Return a value we can use as replacement for the associated one, or 5234 /// nullptr if we don't have one that makes sense. 5235 Value *getReplacementValue(Attributor &A) const { 5236 Value *NewV; 5237 NewV = SimplifiedAssociatedValue.hasValue() 5238 ? SimplifiedAssociatedValue.getValue() 5239 : UndefValue::get(getAssociatedType()); 5240 if (!NewV) 5241 return nullptr; 5242 NewV = AA::getWithType(*NewV, *getAssociatedType()); 5243 if (!NewV || NewV == &getAssociatedValue()) 5244 return nullptr; 5245 const Instruction *CtxI = getCtxI(); 5246 if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache())) 5247 return nullptr; 5248 if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope())) 5249 return nullptr; 5250 return NewV; 5251 } 5252 5253 /// Helper function for querying AAValueSimplify and updating candicate. 5254 /// \param IRP The value position we are trying to unify with SimplifiedValue 5255 bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, 5256 const IRPosition &IRP, bool Simplify = true) { 5257 bool UsedAssumedInformation = false; 5258 Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue(); 5259 if (Simplify) 5260 QueryingValueSimplified = 5261 A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation); 5262 return unionAssumed(QueryingValueSimplified); 5263 } 5264 5265 /// Returns a candidate is found or not 5266 template <typename AAType> bool askSimplifiedValueFor(Attributor &A) { 5267 if (!getAssociatedValue().getType()->isIntegerTy()) 5268 return false; 5269 5270 // This will also pass the call base context. 5271 const auto &AA = 5272 A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE); 5273 5274 Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A); 5275 5276 if (!COpt.hasValue()) { 5277 SimplifiedAssociatedValue = llvm::None; 5278 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5279 return true; 5280 } 5281 if (auto *C = COpt.getValue()) { 5282 SimplifiedAssociatedValue = C; 5283 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5284 return true; 5285 } 5286 return false; 5287 } 5288 5289 bool askSimplifiedValueForOtherAAs(Attributor &A) { 5290 if (askSimplifiedValueFor<AAValueConstantRange>(A)) 5291 return true; 5292 if (askSimplifiedValueFor<AAPotentialValues>(A)) 5293 return true; 5294 return false; 5295 } 5296 5297 /// See AbstractAttribute::manifest(...). 5298 ChangeStatus manifest(Attributor &A) override { 5299 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5300 if (getAssociatedValue().user_empty()) 5301 return Changed; 5302 5303 if (auto *NewV = getReplacementValue(A)) { 5304 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> " 5305 << *NewV << " :: " << *this << "\n"); 5306 if (A.changeValueAfterManifest(getAssociatedValue(), *NewV)) 5307 Changed = ChangeStatus::CHANGED; 5308 } 5309 5310 return Changed | AAValueSimplify::manifest(A); 5311 } 5312 5313 /// See AbstractState::indicatePessimisticFixpoint(...). 5314 ChangeStatus indicatePessimisticFixpoint() override { 5315 SimplifiedAssociatedValue = &getAssociatedValue(); 5316 return AAValueSimplify::indicatePessimisticFixpoint(); 5317 } 5318 5319 static bool handleLoad(Attributor &A, const AbstractAttribute &AA, 5320 LoadInst &L, function_ref<bool(Value &)> Union) { 5321 auto UnionWrapper = [&](Value &V, Value &Obj) { 5322 if (isa<AllocaInst>(Obj)) 5323 return Union(V); 5324 if (!AA::isDynamicallyUnique(A, AA, V)) 5325 return false; 5326 if (!AA::isValidAtPosition(V, L, A.getInfoCache())) 5327 return false; 5328 return Union(V); 5329 }; 5330 5331 Value &Ptr = *L.getPointerOperand(); 5332 SmallVector<Value *, 8> Objects; 5333 bool UsedAssumedInformation = false; 5334 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L, 5335 UsedAssumedInformation)) 5336 return false; 5337 5338 const auto *TLI = 5339 A.getInfoCache().getTargetLibraryInfoForFunction(*L.getFunction()); 5340 for (Value *Obj : Objects) { 5341 LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n"); 5342 if (isa<UndefValue>(Obj)) 5343 continue; 5344 if (isa<ConstantPointerNull>(Obj)) { 5345 // A null pointer access can be undefined but any offset from null may 5346 // be OK. We do not try to optimize the latter. 5347 if (!NullPointerIsDefined(L.getFunction(), 5348 Ptr.getType()->getPointerAddressSpace()) && 5349 A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj) 5350 continue; 5351 return false; 5352 } 5353 Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType(), TLI); 5354 if (!InitialVal || !Union(*InitialVal)) 5355 return false; 5356 5357 LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store " 5358 "propagation, checking accesses next.\n"); 5359 5360 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) { 5361 LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n"); 5362 if (Acc.isWrittenValueYetUndetermined()) 5363 return true; 5364 Value *Content = Acc.getWrittenValue(); 5365 if (!Content) 5366 return false; 5367 Value *CastedContent = 5368 AA::getWithType(*Content, *AA.getAssociatedType()); 5369 if (!CastedContent) 5370 return false; 5371 if (IsExact) 5372 return UnionWrapper(*CastedContent, *Obj); 5373 if (auto *C = dyn_cast<Constant>(CastedContent)) 5374 if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C)) 5375 return UnionWrapper(*CastedContent, *Obj); 5376 return false; 5377 }; 5378 5379 auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj), 5380 DepClassTy::REQUIRED); 5381 if (!PI.forallInterferingAccesses(A, AA, L, CheckAccess)) 5382 return false; 5383 } 5384 return true; 5385 } 5386 }; 5387 5388 struct AAValueSimplifyArgument final : AAValueSimplifyImpl { 5389 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) 5390 : AAValueSimplifyImpl(IRP, A) {} 5391 5392 void initialize(Attributor &A) override { 5393 AAValueSimplifyImpl::initialize(A); 5394 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) 5395 indicatePessimisticFixpoint(); 5396 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated, 5397 Attribute::StructRet, Attribute::Nest, Attribute::ByVal}, 5398 /* IgnoreSubsumingPositions */ true)) 5399 indicatePessimisticFixpoint(); 5400 } 5401 5402 /// See AbstractAttribute::updateImpl(...). 5403 ChangeStatus updateImpl(Attributor &A) override { 5404 // Byval is only replacable if it is readonly otherwise we would write into 5405 // the replaced value and not the copy that byval creates implicitly. 5406 Argument *Arg = getAssociatedArgument(); 5407 if (Arg->hasByValAttr()) { 5408 // TODO: We probably need to verify synchronization is not an issue, e.g., 5409 // there is no race by not copying a constant byval. 5410 bool IsKnown; 5411 if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 5412 return indicatePessimisticFixpoint(); 5413 } 5414 5415 auto Before = SimplifiedAssociatedValue; 5416 5417 auto PredForCallSite = [&](AbstractCallSite ACS) { 5418 const IRPosition &ACSArgPos = 5419 IRPosition::callsite_argument(ACS, getCallSiteArgNo()); 5420 // Check if a coresponding argument was found or if it is on not 5421 // associated (which can happen for callback calls). 5422 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 5423 return false; 5424 5425 // Simplify the argument operand explicitly and check if the result is 5426 // valid in the current scope. This avoids refering to simplified values 5427 // in other functions, e.g., we don't want to say a an argument in a 5428 // static function is actually an argument in a different function. 5429 bool UsedAssumedInformation = false; 5430 Optional<Constant *> SimpleArgOp = 5431 A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation); 5432 if (!SimpleArgOp.hasValue()) 5433 return true; 5434 if (!SimpleArgOp.getValue()) 5435 return false; 5436 if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp)) 5437 return false; 5438 return unionAssumed(*SimpleArgOp); 5439 }; 5440 5441 // Generate a answer specific to a call site context. 5442 bool Success; 5443 bool UsedAssumedInformation = false; 5444 if (hasCallBaseContext() && 5445 getCallBaseContext()->getCalledFunction() == Arg->getParent()) 5446 Success = PredForCallSite( 5447 AbstractCallSite(&getCallBaseContext()->getCalledOperandUse())); 5448 else 5449 Success = A.checkForAllCallSites(PredForCallSite, *this, true, 5450 UsedAssumedInformation); 5451 5452 if (!Success) 5453 if (!askSimplifiedValueForOtherAAs(A)) 5454 return indicatePessimisticFixpoint(); 5455 5456 // If a candicate was found in this update, return CHANGED. 5457 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5458 : ChangeStatus ::CHANGED; 5459 } 5460 5461 /// See AbstractAttribute::trackStatistics() 5462 void trackStatistics() const override { 5463 STATS_DECLTRACK_ARG_ATTR(value_simplify) 5464 } 5465 }; 5466 5467 struct AAValueSimplifyReturned : AAValueSimplifyImpl { 5468 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) 5469 : AAValueSimplifyImpl(IRP, A) {} 5470 5471 /// See AAValueSimplify::getAssumedSimplifiedValue() 5472 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5473 if (!isValidState()) 5474 return nullptr; 5475 return SimplifiedAssociatedValue; 5476 } 5477 5478 /// See AbstractAttribute::updateImpl(...). 5479 ChangeStatus updateImpl(Attributor &A) override { 5480 auto Before = SimplifiedAssociatedValue; 5481 5482 auto ReturnInstCB = [&](Instruction &I) { 5483 auto &RI = cast<ReturnInst>(I); 5484 return checkAndUpdate( 5485 A, *this, 5486 IRPosition::value(*RI.getReturnValue(), getCallBaseContext())); 5487 }; 5488 5489 bool UsedAssumedInformation = false; 5490 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret}, 5491 UsedAssumedInformation)) 5492 if (!askSimplifiedValueForOtherAAs(A)) 5493 return indicatePessimisticFixpoint(); 5494 5495 // If a candicate was found in this update, return CHANGED. 5496 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5497 : ChangeStatus ::CHANGED; 5498 } 5499 5500 ChangeStatus manifest(Attributor &A) override { 5501 // We queried AAValueSimplify for the returned values so they will be 5502 // replaced if a simplified form was found. Nothing to do here. 5503 return ChangeStatus::UNCHANGED; 5504 } 5505 5506 /// See AbstractAttribute::trackStatistics() 5507 void trackStatistics() const override { 5508 STATS_DECLTRACK_FNRET_ATTR(value_simplify) 5509 } 5510 }; 5511 5512 struct AAValueSimplifyFloating : AAValueSimplifyImpl { 5513 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) 5514 : AAValueSimplifyImpl(IRP, A) {} 5515 5516 /// See AbstractAttribute::initialize(...). 5517 void initialize(Attributor &A) override { 5518 AAValueSimplifyImpl::initialize(A); 5519 Value &V = getAnchorValue(); 5520 5521 // TODO: add other stuffs 5522 if (isa<Constant>(V)) 5523 indicatePessimisticFixpoint(); 5524 } 5525 5526 /// Check if \p Cmp is a comparison we can simplify. 5527 /// 5528 /// We handle multiple cases, one in which at least one operand is an 5529 /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other 5530 /// operand. Return true if successful, in that case SimplifiedAssociatedValue 5531 /// will be updated. 5532 bool handleCmp(Attributor &A, CmpInst &Cmp) { 5533 auto Union = [&](Value &V) { 5534 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5535 SimplifiedAssociatedValue, &V, V.getType()); 5536 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5537 }; 5538 5539 Value *LHS = Cmp.getOperand(0); 5540 Value *RHS = Cmp.getOperand(1); 5541 5542 // Simplify the operands first. 5543 bool UsedAssumedInformation = false; 5544 const auto &SimplifiedLHS = 5545 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 5546 *this, UsedAssumedInformation); 5547 if (!SimplifiedLHS.hasValue()) 5548 return true; 5549 if (!SimplifiedLHS.getValue()) 5550 return false; 5551 LHS = *SimplifiedLHS; 5552 5553 const auto &SimplifiedRHS = 5554 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 5555 *this, UsedAssumedInformation); 5556 if (!SimplifiedRHS.hasValue()) 5557 return true; 5558 if (!SimplifiedRHS.getValue()) 5559 return false; 5560 RHS = *SimplifiedRHS; 5561 5562 LLVMContext &Ctx = Cmp.getContext(); 5563 // Handle the trivial case first in which we don't even need to think about 5564 // null or non-null. 5565 if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) { 5566 Constant *NewVal = 5567 ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual()); 5568 if (!Union(*NewVal)) 5569 return false; 5570 if (!UsedAssumedInformation) 5571 indicateOptimisticFixpoint(); 5572 return true; 5573 } 5574 5575 // From now on we only handle equalities (==, !=). 5576 ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp); 5577 if (!ICmp || !ICmp->isEquality()) 5578 return false; 5579 5580 bool LHSIsNull = isa<ConstantPointerNull>(LHS); 5581 bool RHSIsNull = isa<ConstantPointerNull>(RHS); 5582 if (!LHSIsNull && !RHSIsNull) 5583 return false; 5584 5585 // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the 5586 // non-nullptr operand and if we assume it's non-null we can conclude the 5587 // result of the comparison. 5588 assert((LHSIsNull || RHSIsNull) && 5589 "Expected nullptr versus non-nullptr comparison at this point"); 5590 5591 // The index is the operand that we assume is not null. 5592 unsigned PtrIdx = LHSIsNull; 5593 auto &PtrNonNullAA = A.getAAFor<AANonNull>( 5594 *this, IRPosition::value(*ICmp->getOperand(PtrIdx)), 5595 DepClassTy::REQUIRED); 5596 if (!PtrNonNullAA.isAssumedNonNull()) 5597 return false; 5598 UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull(); 5599 5600 // The new value depends on the predicate, true for != and false for ==. 5601 Constant *NewVal = ConstantInt::get( 5602 Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE); 5603 if (!Union(*NewVal)) 5604 return false; 5605 5606 if (!UsedAssumedInformation) 5607 indicateOptimisticFixpoint(); 5608 5609 return true; 5610 } 5611 5612 bool updateWithLoad(Attributor &A, LoadInst &L) { 5613 auto Union = [&](Value &V) { 5614 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5615 SimplifiedAssociatedValue, &V, L.getType()); 5616 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5617 }; 5618 return handleLoad(A, *this, L, Union); 5619 } 5620 5621 /// Use the generic, non-optimistic InstSimplfy functionality if we managed to 5622 /// simplify any operand of the instruction \p I. Return true if successful, 5623 /// in that case SimplifiedAssociatedValue will be updated. 5624 bool handleGenericInst(Attributor &A, Instruction &I) { 5625 bool SomeSimplified = false; 5626 bool UsedAssumedInformation = false; 5627 5628 SmallVector<Value *, 8> NewOps(I.getNumOperands()); 5629 int Idx = 0; 5630 for (Value *Op : I.operands()) { 5631 const auto &SimplifiedOp = 5632 A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()), 5633 *this, UsedAssumedInformation); 5634 // If we are not sure about any operand we are not sure about the entire 5635 // instruction, we'll wait. 5636 if (!SimplifiedOp.hasValue()) 5637 return true; 5638 5639 if (SimplifiedOp.getValue()) 5640 NewOps[Idx] = SimplifiedOp.getValue(); 5641 else 5642 NewOps[Idx] = Op; 5643 5644 SomeSimplified |= (NewOps[Idx] != Op); 5645 ++Idx; 5646 } 5647 5648 // We won't bother with the InstSimplify interface if we didn't simplify any 5649 // operand ourselves. 5650 if (!SomeSimplified) 5651 return false; 5652 5653 InformationCache &InfoCache = A.getInfoCache(); 5654 Function *F = I.getFunction(); 5655 const auto *DT = 5656 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 5657 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5658 auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 5659 OptimizationRemarkEmitter *ORE = nullptr; 5660 5661 const DataLayout &DL = I.getModule()->getDataLayout(); 5662 SimplifyQuery Q(DL, TLI, DT, AC, &I); 5663 if (Value *SimplifiedI = 5664 SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) { 5665 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5666 SimplifiedAssociatedValue, SimplifiedI, I.getType()); 5667 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5668 } 5669 return false; 5670 } 5671 5672 /// See AbstractAttribute::updateImpl(...). 5673 ChangeStatus updateImpl(Attributor &A) override { 5674 auto Before = SimplifiedAssociatedValue; 5675 5676 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &, 5677 bool Stripped) -> bool { 5678 auto &AA = A.getAAFor<AAValueSimplify>( 5679 *this, IRPosition::value(V, getCallBaseContext()), 5680 DepClassTy::REQUIRED); 5681 if (!Stripped && this == &AA) { 5682 5683 if (auto *I = dyn_cast<Instruction>(&V)) { 5684 if (auto *LI = dyn_cast<LoadInst>(&V)) 5685 if (updateWithLoad(A, *LI)) 5686 return true; 5687 if (auto *Cmp = dyn_cast<CmpInst>(&V)) 5688 if (handleCmp(A, *Cmp)) 5689 return true; 5690 if (handleGenericInst(A, *I)) 5691 return true; 5692 } 5693 // TODO: Look the instruction and check recursively. 5694 5695 LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V 5696 << "\n"); 5697 return false; 5698 } 5699 return checkAndUpdate(A, *this, 5700 IRPosition::value(V, getCallBaseContext())); 5701 }; 5702 5703 bool Dummy = false; 5704 bool UsedAssumedInformation = false; 5705 if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy, 5706 VisitValueCB, getCtxI(), 5707 UsedAssumedInformation, 5708 /* UseValueSimplify */ false)) 5709 if (!askSimplifiedValueForOtherAAs(A)) 5710 return indicatePessimisticFixpoint(); 5711 5712 // If a candicate was found in this update, return CHANGED. 5713 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5714 : ChangeStatus ::CHANGED; 5715 } 5716 5717 /// See AbstractAttribute::trackStatistics() 5718 void trackStatistics() const override { 5719 STATS_DECLTRACK_FLOATING_ATTR(value_simplify) 5720 } 5721 }; 5722 5723 struct AAValueSimplifyFunction : AAValueSimplifyImpl { 5724 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) 5725 : AAValueSimplifyImpl(IRP, A) {} 5726 5727 /// See AbstractAttribute::initialize(...). 5728 void initialize(Attributor &A) override { 5729 SimplifiedAssociatedValue = nullptr; 5730 indicateOptimisticFixpoint(); 5731 } 5732 /// See AbstractAttribute::initialize(...). 5733 ChangeStatus updateImpl(Attributor &A) override { 5734 llvm_unreachable( 5735 "AAValueSimplify(Function|CallSite)::updateImpl will not be called"); 5736 } 5737 /// See AbstractAttribute::trackStatistics() 5738 void trackStatistics() const override { 5739 STATS_DECLTRACK_FN_ATTR(value_simplify) 5740 } 5741 }; 5742 5743 struct AAValueSimplifyCallSite : AAValueSimplifyFunction { 5744 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) 5745 : AAValueSimplifyFunction(IRP, A) {} 5746 /// See AbstractAttribute::trackStatistics() 5747 void trackStatistics() const override { 5748 STATS_DECLTRACK_CS_ATTR(value_simplify) 5749 } 5750 }; 5751 5752 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl { 5753 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) 5754 : AAValueSimplifyImpl(IRP, A) {} 5755 5756 void initialize(Attributor &A) override { 5757 AAValueSimplifyImpl::initialize(A); 5758 Function *Fn = getAssociatedFunction(); 5759 if (!Fn) { 5760 indicatePessimisticFixpoint(); 5761 return; 5762 } 5763 for (Argument &Arg : Fn->args()) { 5764 if (Arg.hasReturnedAttr()) { 5765 auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()), 5766 Arg.getArgNo()); 5767 if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT && 5768 checkAndUpdate(A, *this, IRP)) 5769 indicateOptimisticFixpoint(); 5770 else 5771 indicatePessimisticFixpoint(); 5772 return; 5773 } 5774 } 5775 } 5776 5777 /// See AbstractAttribute::updateImpl(...). 5778 ChangeStatus updateImpl(Attributor &A) override { 5779 auto Before = SimplifiedAssociatedValue; 5780 auto &RetAA = A.getAAFor<AAReturnedValues>( 5781 *this, IRPosition::function(*getAssociatedFunction()), 5782 DepClassTy::REQUIRED); 5783 auto PredForReturned = 5784 [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 5785 bool UsedAssumedInformation = false; 5786 Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent( 5787 &RetVal, *cast<CallBase>(getCtxI()), *this, 5788 UsedAssumedInformation); 5789 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5790 SimplifiedAssociatedValue, CSRetVal, getAssociatedType()); 5791 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5792 }; 5793 if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned)) 5794 if (!askSimplifiedValueForOtherAAs(A)) 5795 return indicatePessimisticFixpoint(); 5796 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5797 : ChangeStatus ::CHANGED; 5798 } 5799 5800 void trackStatistics() const override { 5801 STATS_DECLTRACK_CSRET_ATTR(value_simplify) 5802 } 5803 }; 5804 5805 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { 5806 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) 5807 : AAValueSimplifyFloating(IRP, A) {} 5808 5809 /// See AbstractAttribute::manifest(...). 5810 ChangeStatus manifest(Attributor &A) override { 5811 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5812 5813 if (auto *NewV = getReplacementValue(A)) { 5814 Use &U = cast<CallBase>(&getAnchorValue()) 5815 ->getArgOperandUse(getCallSiteArgNo()); 5816 if (A.changeUseAfterManifest(U, *NewV)) 5817 Changed = ChangeStatus::CHANGED; 5818 } 5819 5820 return Changed | AAValueSimplify::manifest(A); 5821 } 5822 5823 void trackStatistics() const override { 5824 STATS_DECLTRACK_CSARG_ATTR(value_simplify) 5825 } 5826 }; 5827 } // namespace 5828 5829 /// ----------------------- Heap-To-Stack Conversion --------------------------- 5830 namespace { 5831 struct AAHeapToStackFunction final : public AAHeapToStack { 5832 5833 struct AllocationInfo { 5834 /// The call that allocates the memory. 5835 CallBase *const CB; 5836 5837 /// The library function id for the allocation. 5838 LibFunc LibraryFunctionId = NotLibFunc; 5839 5840 /// The status wrt. a rewrite. 5841 enum { 5842 STACK_DUE_TO_USE, 5843 STACK_DUE_TO_FREE, 5844 INVALID, 5845 } Status = STACK_DUE_TO_USE; 5846 5847 /// Flag to indicate if we encountered a use that might free this allocation 5848 /// but which is not in the deallocation infos. 5849 bool HasPotentiallyFreeingUnknownUses = false; 5850 5851 /// The set of free calls that use this allocation. 5852 SmallSetVector<CallBase *, 1> PotentialFreeCalls{}; 5853 }; 5854 5855 struct DeallocationInfo { 5856 /// The call that deallocates the memory. 5857 CallBase *const CB; 5858 5859 /// Flag to indicate if we don't know all objects this deallocation might 5860 /// free. 5861 bool MightFreeUnknownObjects = false; 5862 5863 /// The set of allocation calls that are potentially freed. 5864 SmallSetVector<CallBase *, 1> PotentialAllocationCalls{}; 5865 }; 5866 5867 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) 5868 : AAHeapToStack(IRP, A) {} 5869 5870 ~AAHeapToStackFunction() { 5871 // Ensure we call the destructor so we release any memory allocated in the 5872 // sets. 5873 for (auto &It : AllocationInfos) 5874 It.second->~AllocationInfo(); 5875 for (auto &It : DeallocationInfos) 5876 It.second->~DeallocationInfo(); 5877 } 5878 5879 void initialize(Attributor &A) override { 5880 AAHeapToStack::initialize(A); 5881 5882 const Function *F = getAnchorScope(); 5883 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5884 5885 auto AllocationIdentifierCB = [&](Instruction &I) { 5886 CallBase *CB = dyn_cast<CallBase>(&I); 5887 if (!CB) 5888 return true; 5889 if (isFreeCall(CB, TLI)) { 5890 DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB}; 5891 return true; 5892 } 5893 // To do heap to stack, we need to know that the allocation itself is 5894 // removable once uses are rewritten, and that we can initialize the 5895 // alloca to the same pattern as the original allocation result. 5896 if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) { 5897 auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext()); 5898 if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) { 5899 AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB}; 5900 AllocationInfos[CB] = AI; 5901 TLI->getLibFunc(*CB, AI->LibraryFunctionId); 5902 } 5903 } 5904 return true; 5905 }; 5906 5907 bool UsedAssumedInformation = false; 5908 bool Success = A.checkForAllCallLikeInstructions( 5909 AllocationIdentifierCB, *this, UsedAssumedInformation, 5910 /* CheckBBLivenessOnly */ false, 5911 /* CheckPotentiallyDead */ true); 5912 (void)Success; 5913 assert(Success && "Did not expect the call base visit callback to fail!"); 5914 5915 Attributor::SimplifictionCallbackTy SCB = 5916 [](const IRPosition &, const AbstractAttribute *, 5917 bool &) -> Optional<Value *> { return nullptr; }; 5918 for (const auto &It : AllocationInfos) 5919 A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first), 5920 SCB); 5921 for (const auto &It : DeallocationInfos) 5922 A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first), 5923 SCB); 5924 } 5925 5926 const std::string getAsStr() const override { 5927 unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0; 5928 for (const auto &It : AllocationInfos) { 5929 if (It.second->Status == AllocationInfo::INVALID) 5930 ++NumInvalidMallocs; 5931 else 5932 ++NumH2SMallocs; 5933 } 5934 return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" + 5935 std::to_string(NumInvalidMallocs); 5936 } 5937 5938 /// See AbstractAttribute::trackStatistics(). 5939 void trackStatistics() const override { 5940 STATS_DECL( 5941 MallocCalls, Function, 5942 "Number of malloc/calloc/aligned_alloc calls converted to allocas"); 5943 for (auto &It : AllocationInfos) 5944 if (It.second->Status != AllocationInfo::INVALID) 5945 ++BUILD_STAT_NAME(MallocCalls, Function); 5946 } 5947 5948 bool isAssumedHeapToStack(const CallBase &CB) const override { 5949 if (isValidState()) 5950 if (AllocationInfo *AI = 5951 AllocationInfos.lookup(const_cast<CallBase *>(&CB))) 5952 return AI->Status != AllocationInfo::INVALID; 5953 return false; 5954 } 5955 5956 bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override { 5957 if (!isValidState()) 5958 return false; 5959 5960 for (auto &It : AllocationInfos) { 5961 AllocationInfo &AI = *It.second; 5962 if (AI.Status == AllocationInfo::INVALID) 5963 continue; 5964 5965 if (AI.PotentialFreeCalls.count(&CB)) 5966 return true; 5967 } 5968 5969 return false; 5970 } 5971 5972 ChangeStatus manifest(Attributor &A) override { 5973 assert(getState().isValidState() && 5974 "Attempted to manifest an invalid state!"); 5975 5976 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 5977 Function *F = getAnchorScope(); 5978 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5979 5980 for (auto &It : AllocationInfos) { 5981 AllocationInfo &AI = *It.second; 5982 if (AI.Status == AllocationInfo::INVALID) 5983 continue; 5984 5985 for (CallBase *FreeCall : AI.PotentialFreeCalls) { 5986 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); 5987 A.deleteAfterManifest(*FreeCall); 5988 HasChanged = ChangeStatus::CHANGED; 5989 } 5990 5991 LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB 5992 << "\n"); 5993 5994 auto Remark = [&](OptimizationRemark OR) { 5995 LibFunc IsAllocShared; 5996 if (TLI->getLibFunc(*AI.CB, IsAllocShared)) 5997 if (IsAllocShared == LibFunc___kmpc_alloc_shared) 5998 return OR << "Moving globalized variable to the stack."; 5999 return OR << "Moving memory allocation from the heap to the stack."; 6000 }; 6001 if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 6002 A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark); 6003 else 6004 A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark); 6005 6006 const DataLayout &DL = A.getInfoCache().getDL(); 6007 Value *Size; 6008 Optional<APInt> SizeAPI = getSize(A, *this, AI); 6009 if (SizeAPI.hasValue()) { 6010 Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI); 6011 } else { 6012 LLVMContext &Ctx = AI.CB->getContext(); 6013 ObjectSizeOpts Opts; 6014 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts); 6015 SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB); 6016 assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() && 6017 cast<ConstantInt>(SizeOffsetPair.second)->isZero()); 6018 Size = SizeOffsetPair.first; 6019 } 6020 6021 Align Alignment(1); 6022 if (MaybeAlign RetAlign = AI.CB->getRetAlign()) 6023 Alignment = max(Alignment, RetAlign); 6024 if (Value *Align = getAllocAlignment(AI.CB, TLI)) { 6025 Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align); 6026 assert(AlignmentAPI.hasValue() && 6027 "Expected an alignment during manifest!"); 6028 Alignment = 6029 max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue())); 6030 } 6031 6032 // TODO: Hoist the alloca towards the function entry. 6033 unsigned AS = DL.getAllocaAddrSpace(); 6034 Instruction *Alloca = new AllocaInst(Type::getInt8Ty(F->getContext()), AS, 6035 Size, Alignment, "", AI.CB); 6036 6037 if (Alloca->getType() != AI.CB->getType()) 6038 Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 6039 Alloca, AI.CB->getType(), "malloc_cast", AI.CB); 6040 6041 auto *I8Ty = Type::getInt8Ty(F->getContext()); 6042 auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty); 6043 assert(InitVal && 6044 "Must be able to materialize initial memory state of allocation"); 6045 6046 A.changeValueAfterManifest(*AI.CB, *Alloca); 6047 6048 if (auto *II = dyn_cast<InvokeInst>(AI.CB)) { 6049 auto *NBB = II->getNormalDest(); 6050 BranchInst::Create(NBB, AI.CB->getParent()); 6051 A.deleteAfterManifest(*AI.CB); 6052 } else { 6053 A.deleteAfterManifest(*AI.CB); 6054 } 6055 6056 // Initialize the alloca with the same value as used by the allocation 6057 // function. We can skip undef as the initial value of an alloc is 6058 // undef, and the memset would simply end up being DSEd. 6059 if (!isa<UndefValue>(InitVal)) { 6060 IRBuilder<> Builder(Alloca->getNextNode()); 6061 // TODO: Use alignment above if align!=1 6062 Builder.CreateMemSet(Alloca, InitVal, Size, None); 6063 } 6064 HasChanged = ChangeStatus::CHANGED; 6065 } 6066 6067 return HasChanged; 6068 } 6069 6070 Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA, 6071 Value &V) { 6072 bool UsedAssumedInformation = false; 6073 Optional<Constant *> SimpleV = 6074 A.getAssumedConstant(V, AA, UsedAssumedInformation); 6075 if (!SimpleV.hasValue()) 6076 return APInt(64, 0); 6077 if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue())) 6078 return CI->getValue(); 6079 return llvm::None; 6080 } 6081 6082 Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA, 6083 AllocationInfo &AI) { 6084 auto Mapper = [&](const Value *V) -> const Value * { 6085 bool UsedAssumedInformation = false; 6086 if (Optional<Constant *> SimpleV = 6087 A.getAssumedConstant(*V, AA, UsedAssumedInformation)) 6088 if (*SimpleV) 6089 return *SimpleV; 6090 return V; 6091 }; 6092 6093 const Function *F = getAnchorScope(); 6094 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 6095 return getAllocSize(AI.CB, TLI, Mapper); 6096 } 6097 6098 /// Collection of all malloc-like calls in a function with associated 6099 /// information. 6100 MapVector<CallBase *, AllocationInfo *> AllocationInfos; 6101 6102 /// Collection of all free-like calls in a function with associated 6103 /// information. 6104 MapVector<CallBase *, DeallocationInfo *> DeallocationInfos; 6105 6106 ChangeStatus updateImpl(Attributor &A) override; 6107 }; 6108 6109 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) { 6110 ChangeStatus Changed = ChangeStatus::UNCHANGED; 6111 const Function *F = getAnchorScope(); 6112 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 6113 6114 const auto &LivenessAA = 6115 A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE); 6116 6117 MustBeExecutedContextExplorer &Explorer = 6118 A.getInfoCache().getMustBeExecutedContextExplorer(); 6119 6120 bool StackIsAccessibleByOtherThreads = 6121 A.getInfoCache().stackIsAccessibleByOtherThreads(); 6122 6123 // Flag to ensure we update our deallocation information at most once per 6124 // updateImpl call and only if we use the free check reasoning. 6125 bool HasUpdatedFrees = false; 6126 6127 auto UpdateFrees = [&]() { 6128 HasUpdatedFrees = true; 6129 6130 for (auto &It : DeallocationInfos) { 6131 DeallocationInfo &DI = *It.second; 6132 // For now we cannot use deallocations that have unknown inputs, skip 6133 // them. 6134 if (DI.MightFreeUnknownObjects) 6135 continue; 6136 6137 // No need to analyze dead calls, ignore them instead. 6138 bool UsedAssumedInformation = false; 6139 if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation, 6140 /* CheckBBLivenessOnly */ true)) 6141 continue; 6142 6143 // Use the optimistic version to get the freed objects, ignoring dead 6144 // branches etc. 6145 SmallVector<Value *, 8> Objects; 6146 if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects, 6147 *this, DI.CB, 6148 UsedAssumedInformation)) { 6149 LLVM_DEBUG( 6150 dbgs() 6151 << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n"); 6152 DI.MightFreeUnknownObjects = true; 6153 continue; 6154 } 6155 6156 // Check each object explicitly. 6157 for (auto *Obj : Objects) { 6158 // Free of null and undef can be ignored as no-ops (or UB in the latter 6159 // case). 6160 if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj)) 6161 continue; 6162 6163 CallBase *ObjCB = dyn_cast<CallBase>(Obj); 6164 if (!ObjCB) { 6165 LLVM_DEBUG(dbgs() 6166 << "[H2S] Free of a non-call object: " << *Obj << "\n"); 6167 DI.MightFreeUnknownObjects = true; 6168 continue; 6169 } 6170 6171 AllocationInfo *AI = AllocationInfos.lookup(ObjCB); 6172 if (!AI) { 6173 LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj 6174 << "\n"); 6175 DI.MightFreeUnknownObjects = true; 6176 continue; 6177 } 6178 6179 DI.PotentialAllocationCalls.insert(ObjCB); 6180 } 6181 } 6182 }; 6183 6184 auto FreeCheck = [&](AllocationInfo &AI) { 6185 // If the stack is not accessible by other threads, the "must-free" logic 6186 // doesn't apply as the pointer could be shared and needs to be places in 6187 // "shareable" memory. 6188 if (!StackIsAccessibleByOtherThreads) { 6189 auto &NoSyncAA = 6190 A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL); 6191 if (!NoSyncAA.isAssumedNoSync()) { 6192 LLVM_DEBUG( 6193 dbgs() << "[H2S] found an escaping use, stack is not accessible by " 6194 "other threads and function is not nosync:\n"); 6195 return false; 6196 } 6197 } 6198 if (!HasUpdatedFrees) 6199 UpdateFrees(); 6200 6201 // TODO: Allow multi exit functions that have different free calls. 6202 if (AI.PotentialFreeCalls.size() != 1) { 6203 LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but " 6204 << AI.PotentialFreeCalls.size() << "\n"); 6205 return false; 6206 } 6207 CallBase *UniqueFree = *AI.PotentialFreeCalls.begin(); 6208 DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree); 6209 if (!DI) { 6210 LLVM_DEBUG( 6211 dbgs() << "[H2S] unique free call was not known as deallocation call " 6212 << *UniqueFree << "\n"); 6213 return false; 6214 } 6215 if (DI->MightFreeUnknownObjects) { 6216 LLVM_DEBUG( 6217 dbgs() << "[H2S] unique free call might free unknown allocations\n"); 6218 return false; 6219 } 6220 if (DI->PotentialAllocationCalls.size() > 1) { 6221 LLVM_DEBUG(dbgs() << "[H2S] unique free call might free " 6222 << DI->PotentialAllocationCalls.size() 6223 << " different allocations\n"); 6224 return false; 6225 } 6226 if (*DI->PotentialAllocationCalls.begin() != AI.CB) { 6227 LLVM_DEBUG( 6228 dbgs() 6229 << "[H2S] unique free call not known to free this allocation but " 6230 << **DI->PotentialAllocationCalls.begin() << "\n"); 6231 return false; 6232 } 6233 Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode(); 6234 if (!Explorer.findInContextOf(UniqueFree, CtxI)) { 6235 LLVM_DEBUG( 6236 dbgs() 6237 << "[H2S] unique free call might not be executed with the allocation " 6238 << *UniqueFree << "\n"); 6239 return false; 6240 } 6241 return true; 6242 }; 6243 6244 auto UsesCheck = [&](AllocationInfo &AI) { 6245 bool ValidUsesOnly = true; 6246 6247 auto Pred = [&](const Use &U, bool &Follow) -> bool { 6248 Instruction *UserI = cast<Instruction>(U.getUser()); 6249 if (isa<LoadInst>(UserI)) 6250 return true; 6251 if (auto *SI = dyn_cast<StoreInst>(UserI)) { 6252 if (SI->getValueOperand() == U.get()) { 6253 LLVM_DEBUG(dbgs() 6254 << "[H2S] escaping store to memory: " << *UserI << "\n"); 6255 ValidUsesOnly = false; 6256 } else { 6257 // A store into the malloc'ed memory is fine. 6258 } 6259 return true; 6260 } 6261 if (auto *CB = dyn_cast<CallBase>(UserI)) { 6262 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) 6263 return true; 6264 if (DeallocationInfos.count(CB)) { 6265 AI.PotentialFreeCalls.insert(CB); 6266 return true; 6267 } 6268 6269 unsigned ArgNo = CB->getArgOperandNo(&U); 6270 6271 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 6272 *this, IRPosition::callsite_argument(*CB, ArgNo), 6273 DepClassTy::OPTIONAL); 6274 6275 // If a call site argument use is nofree, we are fine. 6276 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( 6277 *this, IRPosition::callsite_argument(*CB, ArgNo), 6278 DepClassTy::OPTIONAL); 6279 6280 bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture(); 6281 bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree(); 6282 if (MaybeCaptured || 6283 (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared && 6284 MaybeFreed)) { 6285 AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed; 6286 6287 // Emit a missed remark if this is missed OpenMP globalization. 6288 auto Remark = [&](OptimizationRemarkMissed ORM) { 6289 return ORM 6290 << "Could not move globalized variable to the stack. " 6291 "Variable is potentially captured in call. Mark " 6292 "parameter as `__attribute__((noescape))` to override."; 6293 }; 6294 6295 if (ValidUsesOnly && 6296 AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 6297 A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark); 6298 6299 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n"); 6300 ValidUsesOnly = false; 6301 } 6302 return true; 6303 } 6304 6305 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 6306 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 6307 Follow = true; 6308 return true; 6309 } 6310 // Unknown user for which we can not track uses further (in a way that 6311 // makes sense). 6312 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n"); 6313 ValidUsesOnly = false; 6314 return true; 6315 }; 6316 if (!A.checkForAllUses(Pred, *this, *AI.CB)) 6317 return false; 6318 return ValidUsesOnly; 6319 }; 6320 6321 // The actual update starts here. We look at all allocations and depending on 6322 // their status perform the appropriate check(s). 6323 for (auto &It : AllocationInfos) { 6324 AllocationInfo &AI = *It.second; 6325 if (AI.Status == AllocationInfo::INVALID) 6326 continue; 6327 6328 if (Value *Align = getAllocAlignment(AI.CB, TLI)) { 6329 Optional<APInt> APAlign = getAPInt(A, *this, *Align); 6330 if (!APAlign) { 6331 // Can't generate an alloca which respects the required alignment 6332 // on the allocation. 6333 LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB 6334 << "\n"); 6335 AI.Status = AllocationInfo::INVALID; 6336 Changed = ChangeStatus::CHANGED; 6337 continue; 6338 } else { 6339 if (APAlign->ugt(llvm::Value::MaximumAlignment) || 6340 !APAlign->isPowerOf2()) { 6341 LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign 6342 << "\n"); 6343 AI.Status = AllocationInfo::INVALID; 6344 Changed = ChangeStatus::CHANGED; 6345 continue; 6346 } 6347 } 6348 } 6349 6350 if (MaxHeapToStackSize != -1) { 6351 Optional<APInt> Size = getSize(A, *this, AI); 6352 if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) { 6353 LLVM_DEBUG({ 6354 if (!Size.hasValue()) 6355 dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n"; 6356 else 6357 dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " 6358 << MaxHeapToStackSize << "\n"; 6359 }); 6360 6361 AI.Status = AllocationInfo::INVALID; 6362 Changed = ChangeStatus::CHANGED; 6363 continue; 6364 } 6365 } 6366 6367 switch (AI.Status) { 6368 case AllocationInfo::STACK_DUE_TO_USE: 6369 if (UsesCheck(AI)) 6370 continue; 6371 AI.Status = AllocationInfo::STACK_DUE_TO_FREE; 6372 LLVM_FALLTHROUGH; 6373 case AllocationInfo::STACK_DUE_TO_FREE: 6374 if (FreeCheck(AI)) 6375 continue; 6376 AI.Status = AllocationInfo::INVALID; 6377 Changed = ChangeStatus::CHANGED; 6378 continue; 6379 case AllocationInfo::INVALID: 6380 llvm_unreachable("Invalid allocations should never reach this point!"); 6381 }; 6382 } 6383 6384 return Changed; 6385 } 6386 } // namespace 6387 6388 /// ----------------------- Privatizable Pointers ------------------------------ 6389 namespace { 6390 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { 6391 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) 6392 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} 6393 6394 ChangeStatus indicatePessimisticFixpoint() override { 6395 AAPrivatizablePtr::indicatePessimisticFixpoint(); 6396 PrivatizableType = nullptr; 6397 return ChangeStatus::CHANGED; 6398 } 6399 6400 /// Identify the type we can chose for a private copy of the underlying 6401 /// argument. None means it is not clear yet, nullptr means there is none. 6402 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; 6403 6404 /// Return a privatizable type that encloses both T0 and T1. 6405 /// TODO: This is merely a stub for now as we should manage a mapping as well. 6406 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { 6407 if (!T0.hasValue()) 6408 return T1; 6409 if (!T1.hasValue()) 6410 return T0; 6411 if (T0 == T1) 6412 return T0; 6413 return nullptr; 6414 } 6415 6416 Optional<Type *> getPrivatizableType() const override { 6417 return PrivatizableType; 6418 } 6419 6420 const std::string getAsStr() const override { 6421 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; 6422 } 6423 6424 protected: 6425 Optional<Type *> PrivatizableType; 6426 }; 6427 6428 // TODO: Do this for call site arguments (probably also other values) as well. 6429 6430 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { 6431 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) 6432 : AAPrivatizablePtrImpl(IRP, A) {} 6433 6434 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6435 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6436 // If this is a byval argument and we know all the call sites (so we can 6437 // rewrite them), there is no need to check them explicitly. 6438 bool UsedAssumedInformation = false; 6439 SmallVector<Attribute, 1> Attrs; 6440 getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true); 6441 if (!Attrs.empty() && 6442 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, 6443 true, UsedAssumedInformation)) 6444 return Attrs[0].getValueAsType(); 6445 6446 Optional<Type *> Ty; 6447 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 6448 6449 // Make sure the associated call site argument has the same type at all call 6450 // sites and it is an allocation we know is safe to privatize, for now that 6451 // means we only allow alloca instructions. 6452 // TODO: We can additionally analyze the accesses in the callee to create 6453 // the type from that information instead. That is a little more 6454 // involved and will be done in a follow up patch. 6455 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6456 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 6457 // Check if a coresponding argument was found or if it is one not 6458 // associated (which can happen for callback calls). 6459 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 6460 return false; 6461 6462 // Check that all call sites agree on a type. 6463 auto &PrivCSArgAA = 6464 A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED); 6465 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); 6466 6467 LLVM_DEBUG({ 6468 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; 6469 if (CSTy.hasValue() && CSTy.getValue()) 6470 CSTy.getValue()->print(dbgs()); 6471 else if (CSTy.hasValue()) 6472 dbgs() << "<nullptr>"; 6473 else 6474 dbgs() << "<none>"; 6475 }); 6476 6477 Ty = combineTypes(Ty, CSTy); 6478 6479 LLVM_DEBUG({ 6480 dbgs() << " : New Type: "; 6481 if (Ty.hasValue() && Ty.getValue()) 6482 Ty.getValue()->print(dbgs()); 6483 else if (Ty.hasValue()) 6484 dbgs() << "<nullptr>"; 6485 else 6486 dbgs() << "<none>"; 6487 dbgs() << "\n"; 6488 }); 6489 6490 return !Ty.hasValue() || Ty.getValue(); 6491 }; 6492 6493 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, 6494 UsedAssumedInformation)) 6495 return nullptr; 6496 return Ty; 6497 } 6498 6499 /// See AbstractAttribute::updateImpl(...). 6500 ChangeStatus updateImpl(Attributor &A) override { 6501 PrivatizableType = identifyPrivatizableType(A); 6502 if (!PrivatizableType.hasValue()) 6503 return ChangeStatus::UNCHANGED; 6504 if (!PrivatizableType.getValue()) 6505 return indicatePessimisticFixpoint(); 6506 6507 // The dependence is optional so we don't give up once we give up on the 6508 // alignment. 6509 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), 6510 DepClassTy::OPTIONAL); 6511 6512 // Avoid arguments with padding for now. 6513 if (!getIRPosition().hasAttr(Attribute::ByVal) && 6514 !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(), 6515 A.getInfoCache().getDL())) { 6516 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n"); 6517 return indicatePessimisticFixpoint(); 6518 } 6519 6520 // Collect the types that will replace the privatizable type in the function 6521 // signature. 6522 SmallVector<Type *, 16> ReplacementTypes; 6523 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 6524 6525 // Verify callee and caller agree on how the promoted argument would be 6526 // passed. 6527 Function &Fn = *getIRPosition().getAnchorScope(); 6528 const auto *TTI = 6529 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); 6530 if (!TTI) { 6531 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function " 6532 << Fn.getName() << "\n"); 6533 return indicatePessimisticFixpoint(); 6534 } 6535 6536 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6537 CallBase *CB = ACS.getInstruction(); 6538 return TTI->areTypesABICompatible( 6539 CB->getCaller(), CB->getCalledFunction(), ReplacementTypes); 6540 }; 6541 bool UsedAssumedInformation = false; 6542 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, 6543 UsedAssumedInformation)) { 6544 LLVM_DEBUG( 6545 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " 6546 << Fn.getName() << "\n"); 6547 return indicatePessimisticFixpoint(); 6548 } 6549 6550 // Register a rewrite of the argument. 6551 Argument *Arg = getAssociatedArgument(); 6552 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { 6553 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"); 6554 return indicatePessimisticFixpoint(); 6555 } 6556 6557 unsigned ArgNo = Arg->getArgNo(); 6558 6559 // Helper to check if for the given call site the associated argument is 6560 // passed to a callback where the privatization would be different. 6561 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { 6562 SmallVector<const Use *, 4> CallbackUses; 6563 AbstractCallSite::getCallbackUses(CB, CallbackUses); 6564 for (const Use *U : CallbackUses) { 6565 AbstractCallSite CBACS(U); 6566 assert(CBACS && CBACS.isCallbackCall()); 6567 for (Argument &CBArg : CBACS.getCalledFunction()->args()) { 6568 int CBArgNo = CBACS.getCallArgOperandNo(CBArg); 6569 6570 LLVM_DEBUG({ 6571 dbgs() 6572 << "[AAPrivatizablePtr] Argument " << *Arg 6573 << "check if can be privatized in the context of its parent (" 6574 << Arg->getParent()->getName() 6575 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6576 "callback (" 6577 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6578 << ")\n[AAPrivatizablePtr] " << CBArg << " : " 6579 << CBACS.getCallArgOperand(CBArg) << " vs " 6580 << CB.getArgOperand(ArgNo) << "\n" 6581 << "[AAPrivatizablePtr] " << CBArg << " : " 6582 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; 6583 }); 6584 6585 if (CBArgNo != int(ArgNo)) 6586 continue; 6587 const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6588 *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED); 6589 if (CBArgPrivAA.isValidState()) { 6590 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); 6591 if (!CBArgPrivTy.hasValue()) 6592 continue; 6593 if (CBArgPrivTy.getValue() == PrivatizableType) 6594 continue; 6595 } 6596 6597 LLVM_DEBUG({ 6598 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6599 << " cannot be privatized in the context of its parent (" 6600 << Arg->getParent()->getName() 6601 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6602 "callback (" 6603 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6604 << ").\n[AAPrivatizablePtr] for which the argument " 6605 "privatization is not compatible.\n"; 6606 }); 6607 return false; 6608 } 6609 } 6610 return true; 6611 }; 6612 6613 // Helper to check if for the given call site the associated argument is 6614 // passed to a direct call where the privatization would be different. 6615 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { 6616 CallBase *DC = cast<CallBase>(ACS.getInstruction()); 6617 int DCArgNo = ACS.getCallArgOperandNo(ArgNo); 6618 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() && 6619 "Expected a direct call operand for callback call operand"); 6620 6621 LLVM_DEBUG({ 6622 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6623 << " check if be privatized in the context of its parent (" 6624 << Arg->getParent()->getName() 6625 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6626 "direct call of (" 6627 << DCArgNo << "@" << DC->getCalledFunction()->getName() 6628 << ").\n"; 6629 }); 6630 6631 Function *DCCallee = DC->getCalledFunction(); 6632 if (unsigned(DCArgNo) < DCCallee->arg_size()) { 6633 const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6634 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)), 6635 DepClassTy::REQUIRED); 6636 if (DCArgPrivAA.isValidState()) { 6637 auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); 6638 if (!DCArgPrivTy.hasValue()) 6639 return true; 6640 if (DCArgPrivTy.getValue() == PrivatizableType) 6641 return true; 6642 } 6643 } 6644 6645 LLVM_DEBUG({ 6646 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6647 << " cannot be privatized in the context of its parent (" 6648 << Arg->getParent()->getName() 6649 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6650 "direct call of (" 6651 << ACS.getInstruction()->getCalledFunction()->getName() 6652 << ").\n[AAPrivatizablePtr] for which the argument " 6653 "privatization is not compatible.\n"; 6654 }); 6655 return false; 6656 }; 6657 6658 // Helper to check if the associated argument is used at the given abstract 6659 // call site in a way that is incompatible with the privatization assumed 6660 // here. 6661 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { 6662 if (ACS.isDirectCall()) 6663 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); 6664 if (ACS.isCallbackCall()) 6665 return IsCompatiblePrivArgOfDirectCS(ACS); 6666 return false; 6667 }; 6668 6669 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true, 6670 UsedAssumedInformation)) 6671 return indicatePessimisticFixpoint(); 6672 6673 return ChangeStatus::UNCHANGED; 6674 } 6675 6676 /// Given a type to private \p PrivType, collect the constituates (which are 6677 /// used) in \p ReplacementTypes. 6678 static void 6679 identifyReplacementTypes(Type *PrivType, 6680 SmallVectorImpl<Type *> &ReplacementTypes) { 6681 // TODO: For now we expand the privatization type to the fullest which can 6682 // lead to dead arguments that need to be removed later. 6683 assert(PrivType && "Expected privatizable type!"); 6684 6685 // Traverse the type, extract constituate types on the outermost level. 6686 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6687 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) 6688 ReplacementTypes.push_back(PrivStructType->getElementType(u)); 6689 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6690 ReplacementTypes.append(PrivArrayType->getNumElements(), 6691 PrivArrayType->getElementType()); 6692 } else { 6693 ReplacementTypes.push_back(PrivType); 6694 } 6695 } 6696 6697 /// Initialize \p Base according to the type \p PrivType at position \p IP. 6698 /// The values needed are taken from the arguments of \p F starting at 6699 /// position \p ArgNo. 6700 static void createInitialization(Type *PrivType, Value &Base, Function &F, 6701 unsigned ArgNo, Instruction &IP) { 6702 assert(PrivType && "Expected privatizable type!"); 6703 6704 IRBuilder<NoFolder> IRB(&IP); 6705 const DataLayout &DL = F.getParent()->getDataLayout(); 6706 6707 // Traverse the type, build GEPs and stores. 6708 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6709 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6710 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6711 Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo(); 6712 Value *Ptr = 6713 constructPointer(PointeeTy, PrivType, &Base, 6714 PrivStructLayout->getElementOffset(u), IRB, DL); 6715 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6716 } 6717 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6718 Type *PointeeTy = PrivArrayType->getElementType(); 6719 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6720 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6721 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6722 Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base, 6723 u * PointeeTySize, IRB, DL); 6724 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6725 } 6726 } else { 6727 new StoreInst(F.getArg(ArgNo), &Base, &IP); 6728 } 6729 } 6730 6731 /// Extract values from \p Base according to the type \p PrivType at the 6732 /// call position \p ACS. The values are appended to \p ReplacementValues. 6733 void createReplacementValues(Align Alignment, Type *PrivType, 6734 AbstractCallSite ACS, Value *Base, 6735 SmallVectorImpl<Value *> &ReplacementValues) { 6736 assert(Base && "Expected base value!"); 6737 assert(PrivType && "Expected privatizable type!"); 6738 Instruction *IP = ACS.getInstruction(); 6739 6740 IRBuilder<NoFolder> IRB(IP); 6741 const DataLayout &DL = IP->getModule()->getDataLayout(); 6742 6743 Type *PrivPtrType = PrivType->getPointerTo(); 6744 if (Base->getType() != PrivPtrType) 6745 Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 6746 Base, PrivPtrType, "", ACS.getInstruction()); 6747 6748 // Traverse the type, build GEPs and loads. 6749 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6750 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6751 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6752 Type *PointeeTy = PrivStructType->getElementType(u); 6753 Value *Ptr = 6754 constructPointer(PointeeTy->getPointerTo(), PrivType, Base, 6755 PrivStructLayout->getElementOffset(u), IRB, DL); 6756 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6757 L->setAlignment(Alignment); 6758 ReplacementValues.push_back(L); 6759 } 6760 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6761 Type *PointeeTy = PrivArrayType->getElementType(); 6762 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6763 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6764 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6765 Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base, 6766 u * PointeeTySize, IRB, DL); 6767 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6768 L->setAlignment(Alignment); 6769 ReplacementValues.push_back(L); 6770 } 6771 } else { 6772 LoadInst *L = new LoadInst(PrivType, Base, "", IP); 6773 L->setAlignment(Alignment); 6774 ReplacementValues.push_back(L); 6775 } 6776 } 6777 6778 /// See AbstractAttribute::manifest(...) 6779 ChangeStatus manifest(Attributor &A) override { 6780 if (!PrivatizableType.hasValue()) 6781 return ChangeStatus::UNCHANGED; 6782 assert(PrivatizableType.getValue() && "Expected privatizable type!"); 6783 6784 // Collect all tail calls in the function as we cannot allow new allocas to 6785 // escape into tail recursion. 6786 // TODO: Be smarter about new allocas escaping into tail calls. 6787 SmallVector<CallInst *, 16> TailCalls; 6788 bool UsedAssumedInformation = false; 6789 if (!A.checkForAllInstructions( 6790 [&](Instruction &I) { 6791 CallInst &CI = cast<CallInst>(I); 6792 if (CI.isTailCall()) 6793 TailCalls.push_back(&CI); 6794 return true; 6795 }, 6796 *this, {Instruction::Call}, UsedAssumedInformation)) 6797 return ChangeStatus::UNCHANGED; 6798 6799 Argument *Arg = getAssociatedArgument(); 6800 // Query AAAlign attribute for alignment of associated argument to 6801 // determine the best alignment of loads. 6802 const auto &AlignAA = 6803 A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE); 6804 6805 // Callback to repair the associated function. A new alloca is placed at the 6806 // beginning and initialized with the values passed through arguments. The 6807 // new alloca replaces the use of the old pointer argument. 6808 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = 6809 [=](const Attributor::ArgumentReplacementInfo &ARI, 6810 Function &ReplacementFn, Function::arg_iterator ArgIt) { 6811 BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); 6812 Instruction *IP = &*EntryBB.getFirstInsertionPt(); 6813 const DataLayout &DL = IP->getModule()->getDataLayout(); 6814 unsigned AS = DL.getAllocaAddrSpace(); 6815 Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS, 6816 Arg->getName() + ".priv", IP); 6817 createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn, 6818 ArgIt->getArgNo(), *IP); 6819 6820 if (AI->getType() != Arg->getType()) 6821 AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 6822 AI, Arg->getType(), "", IP); 6823 Arg->replaceAllUsesWith(AI); 6824 6825 for (CallInst *CI : TailCalls) 6826 CI->setTailCall(false); 6827 }; 6828 6829 // Callback to repair a call site of the associated function. The elements 6830 // of the privatizable type are loaded prior to the call and passed to the 6831 // new function version. 6832 Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = 6833 [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI, 6834 AbstractCallSite ACS, 6835 SmallVectorImpl<Value *> &NewArgOperands) { 6836 // When no alignment is specified for the load instruction, 6837 // natural alignment is assumed. 6838 createReplacementValues( 6839 assumeAligned(AlignAA.getAssumedAlign()), 6840 PrivatizableType.getValue(), ACS, 6841 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()), 6842 NewArgOperands); 6843 }; 6844 6845 // Collect the types that will replace the privatizable type in the function 6846 // signature. 6847 SmallVector<Type *, 16> ReplacementTypes; 6848 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 6849 6850 // Register a rewrite of the argument. 6851 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes, 6852 std::move(FnRepairCB), 6853 std::move(ACSRepairCB))) 6854 return ChangeStatus::CHANGED; 6855 return ChangeStatus::UNCHANGED; 6856 } 6857 6858 /// See AbstractAttribute::trackStatistics() 6859 void trackStatistics() const override { 6860 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr); 6861 } 6862 }; 6863 6864 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { 6865 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) 6866 : AAPrivatizablePtrImpl(IRP, A) {} 6867 6868 /// See AbstractAttribute::initialize(...). 6869 virtual void initialize(Attributor &A) override { 6870 // TODO: We can privatize more than arguments. 6871 indicatePessimisticFixpoint(); 6872 } 6873 6874 ChangeStatus updateImpl(Attributor &A) override { 6875 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" 6876 "updateImpl will not be called"); 6877 } 6878 6879 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6880 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6881 Value *Obj = getUnderlyingObject(&getAssociatedValue()); 6882 if (!Obj) { 6883 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n"); 6884 return nullptr; 6885 } 6886 6887 if (auto *AI = dyn_cast<AllocaInst>(Obj)) 6888 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 6889 if (CI->isOne()) 6890 return AI->getAllocatedType(); 6891 if (auto *Arg = dyn_cast<Argument>(Obj)) { 6892 auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>( 6893 *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED); 6894 if (PrivArgAA.isAssumedPrivatizablePtr()) 6895 return PrivArgAA.getPrivatizableType(); 6896 } 6897 6898 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " 6899 "alloca nor privatizable argument: " 6900 << *Obj << "!\n"); 6901 return nullptr; 6902 } 6903 6904 /// See AbstractAttribute::trackStatistics() 6905 void trackStatistics() const override { 6906 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr); 6907 } 6908 }; 6909 6910 struct AAPrivatizablePtrCallSiteArgument final 6911 : public AAPrivatizablePtrFloating { 6912 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) 6913 : AAPrivatizablePtrFloating(IRP, A) {} 6914 6915 /// See AbstractAttribute::initialize(...). 6916 void initialize(Attributor &A) override { 6917 if (getIRPosition().hasAttr(Attribute::ByVal)) 6918 indicateOptimisticFixpoint(); 6919 } 6920 6921 /// See AbstractAttribute::updateImpl(...). 6922 ChangeStatus updateImpl(Attributor &A) override { 6923 PrivatizableType = identifyPrivatizableType(A); 6924 if (!PrivatizableType.hasValue()) 6925 return ChangeStatus::UNCHANGED; 6926 if (!PrivatizableType.getValue()) 6927 return indicatePessimisticFixpoint(); 6928 6929 const IRPosition &IRP = getIRPosition(); 6930 auto &NoCaptureAA = 6931 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED); 6932 if (!NoCaptureAA.isAssumedNoCapture()) { 6933 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n"); 6934 return indicatePessimisticFixpoint(); 6935 } 6936 6937 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED); 6938 if (!NoAliasAA.isAssumedNoAlias()) { 6939 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n"); 6940 return indicatePessimisticFixpoint(); 6941 } 6942 6943 bool IsKnown; 6944 if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) { 6945 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n"); 6946 return indicatePessimisticFixpoint(); 6947 } 6948 6949 return ChangeStatus::UNCHANGED; 6950 } 6951 6952 /// See AbstractAttribute::trackStatistics() 6953 void trackStatistics() const override { 6954 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr); 6955 } 6956 }; 6957 6958 struct AAPrivatizablePtrCallSiteReturned final 6959 : public AAPrivatizablePtrFloating { 6960 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) 6961 : AAPrivatizablePtrFloating(IRP, A) {} 6962 6963 /// See AbstractAttribute::initialize(...). 6964 void initialize(Attributor &A) override { 6965 // TODO: We can privatize more than arguments. 6966 indicatePessimisticFixpoint(); 6967 } 6968 6969 /// See AbstractAttribute::trackStatistics() 6970 void trackStatistics() const override { 6971 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr); 6972 } 6973 }; 6974 6975 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { 6976 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) 6977 : AAPrivatizablePtrFloating(IRP, A) {} 6978 6979 /// See AbstractAttribute::initialize(...). 6980 void initialize(Attributor &A) override { 6981 // TODO: We can privatize more than arguments. 6982 indicatePessimisticFixpoint(); 6983 } 6984 6985 /// See AbstractAttribute::trackStatistics() 6986 void trackStatistics() const override { 6987 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr); 6988 } 6989 }; 6990 } // namespace 6991 6992 /// -------------------- Memory Behavior Attributes ---------------------------- 6993 /// Includes read-none, read-only, and write-only. 6994 /// ---------------------------------------------------------------------------- 6995 namespace { 6996 struct AAMemoryBehaviorImpl : public AAMemoryBehavior { 6997 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) 6998 : AAMemoryBehavior(IRP, A) {} 6999 7000 /// See AbstractAttribute::initialize(...). 7001 void initialize(Attributor &A) override { 7002 intersectAssumedBits(BEST_STATE); 7003 getKnownStateFromValue(getIRPosition(), getState()); 7004 AAMemoryBehavior::initialize(A); 7005 } 7006 7007 /// Return the memory behavior information encoded in the IR for \p IRP. 7008 static void getKnownStateFromValue(const IRPosition &IRP, 7009 BitIntegerState &State, 7010 bool IgnoreSubsumingPositions = false) { 7011 SmallVector<Attribute, 2> Attrs; 7012 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 7013 for (const Attribute &Attr : Attrs) { 7014 switch (Attr.getKindAsEnum()) { 7015 case Attribute::ReadNone: 7016 State.addKnownBits(NO_ACCESSES); 7017 break; 7018 case Attribute::ReadOnly: 7019 State.addKnownBits(NO_WRITES); 7020 break; 7021 case Attribute::WriteOnly: 7022 State.addKnownBits(NO_READS); 7023 break; 7024 default: 7025 llvm_unreachable("Unexpected attribute!"); 7026 } 7027 } 7028 7029 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) { 7030 if (!I->mayReadFromMemory()) 7031 State.addKnownBits(NO_READS); 7032 if (!I->mayWriteToMemory()) 7033 State.addKnownBits(NO_WRITES); 7034 } 7035 } 7036 7037 /// See AbstractAttribute::getDeducedAttributes(...). 7038 void getDeducedAttributes(LLVMContext &Ctx, 7039 SmallVectorImpl<Attribute> &Attrs) const override { 7040 assert(Attrs.size() == 0); 7041 if (isAssumedReadNone()) 7042 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 7043 else if (isAssumedReadOnly()) 7044 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly)); 7045 else if (isAssumedWriteOnly()) 7046 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly)); 7047 assert(Attrs.size() <= 1); 7048 } 7049 7050 /// See AbstractAttribute::manifest(...). 7051 ChangeStatus manifest(Attributor &A) override { 7052 if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true)) 7053 return ChangeStatus::UNCHANGED; 7054 7055 const IRPosition &IRP = getIRPosition(); 7056 7057 // Check if we would improve the existing attributes first. 7058 SmallVector<Attribute, 4> DeducedAttrs; 7059 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 7060 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 7061 return IRP.hasAttr(Attr.getKindAsEnum(), 7062 /* IgnoreSubsumingPositions */ true); 7063 })) 7064 return ChangeStatus::UNCHANGED; 7065 7066 // Clear existing attributes. 7067 IRP.removeAttrs(AttrKinds); 7068 7069 // Use the generic manifest method. 7070 return IRAttribute::manifest(A); 7071 } 7072 7073 /// See AbstractState::getAsStr(). 7074 const std::string getAsStr() const override { 7075 if (isAssumedReadNone()) 7076 return "readnone"; 7077 if (isAssumedReadOnly()) 7078 return "readonly"; 7079 if (isAssumedWriteOnly()) 7080 return "writeonly"; 7081 return "may-read/write"; 7082 } 7083 7084 /// The set of IR attributes AAMemoryBehavior deals with. 7085 static const Attribute::AttrKind AttrKinds[3]; 7086 }; 7087 7088 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { 7089 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; 7090 7091 /// Memory behavior attribute for a floating value. 7092 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { 7093 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) 7094 : AAMemoryBehaviorImpl(IRP, A) {} 7095 7096 /// See AbstractAttribute::updateImpl(...). 7097 ChangeStatus updateImpl(Attributor &A) override; 7098 7099 /// See AbstractAttribute::trackStatistics() 7100 void trackStatistics() const override { 7101 if (isAssumedReadNone()) 7102 STATS_DECLTRACK_FLOATING_ATTR(readnone) 7103 else if (isAssumedReadOnly()) 7104 STATS_DECLTRACK_FLOATING_ATTR(readonly) 7105 else if (isAssumedWriteOnly()) 7106 STATS_DECLTRACK_FLOATING_ATTR(writeonly) 7107 } 7108 7109 private: 7110 /// Return true if users of \p UserI might access the underlying 7111 /// variable/location described by \p U and should therefore be analyzed. 7112 bool followUsersOfUseIn(Attributor &A, const Use &U, 7113 const Instruction *UserI); 7114 7115 /// Update the state according to the effect of use \p U in \p UserI. 7116 void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI); 7117 }; 7118 7119 /// Memory behavior attribute for function argument. 7120 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { 7121 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) 7122 : AAMemoryBehaviorFloating(IRP, A) {} 7123 7124 /// See AbstractAttribute::initialize(...). 7125 void initialize(Attributor &A) override { 7126 intersectAssumedBits(BEST_STATE); 7127 const IRPosition &IRP = getIRPosition(); 7128 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we 7129 // can query it when we use has/getAttr. That would allow us to reuse the 7130 // initialize of the base class here. 7131 bool HasByVal = 7132 IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true); 7133 getKnownStateFromValue(IRP, getState(), 7134 /* IgnoreSubsumingPositions */ HasByVal); 7135 7136 // Initialize the use vector with all direct uses of the associated value. 7137 Argument *Arg = getAssociatedArgument(); 7138 if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) 7139 indicatePessimisticFixpoint(); 7140 } 7141 7142 ChangeStatus manifest(Attributor &A) override { 7143 // TODO: Pointer arguments are not supported on vectors of pointers yet. 7144 if (!getAssociatedValue().getType()->isPointerTy()) 7145 return ChangeStatus::UNCHANGED; 7146 7147 // TODO: From readattrs.ll: "inalloca parameters are always 7148 // considered written" 7149 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) { 7150 removeKnownBits(NO_WRITES); 7151 removeAssumedBits(NO_WRITES); 7152 } 7153 return AAMemoryBehaviorFloating::manifest(A); 7154 } 7155 7156 /// See AbstractAttribute::trackStatistics() 7157 void trackStatistics() const override { 7158 if (isAssumedReadNone()) 7159 STATS_DECLTRACK_ARG_ATTR(readnone) 7160 else if (isAssumedReadOnly()) 7161 STATS_DECLTRACK_ARG_ATTR(readonly) 7162 else if (isAssumedWriteOnly()) 7163 STATS_DECLTRACK_ARG_ATTR(writeonly) 7164 } 7165 }; 7166 7167 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { 7168 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) 7169 : AAMemoryBehaviorArgument(IRP, A) {} 7170 7171 /// See AbstractAttribute::initialize(...). 7172 void initialize(Attributor &A) override { 7173 // If we don't have an associated attribute this is either a variadic call 7174 // or an indirect call, either way, nothing to do here. 7175 Argument *Arg = getAssociatedArgument(); 7176 if (!Arg) { 7177 indicatePessimisticFixpoint(); 7178 return; 7179 } 7180 if (Arg->hasByValAttr()) { 7181 addKnownBits(NO_WRITES); 7182 removeKnownBits(NO_READS); 7183 removeAssumedBits(NO_READS); 7184 } 7185 AAMemoryBehaviorArgument::initialize(A); 7186 if (getAssociatedFunction()->isDeclaration()) 7187 indicatePessimisticFixpoint(); 7188 } 7189 7190 /// See AbstractAttribute::updateImpl(...). 7191 ChangeStatus updateImpl(Attributor &A) override { 7192 // TODO: Once we have call site specific value information we can provide 7193 // call site specific liveness liveness information and then it makes 7194 // sense to specialize attributes for call sites arguments instead of 7195 // redirecting requests to the callee argument. 7196 Argument *Arg = getAssociatedArgument(); 7197 const IRPosition &ArgPos = IRPosition::argument(*Arg); 7198 auto &ArgAA = 7199 A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED); 7200 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 7201 } 7202 7203 /// See AbstractAttribute::trackStatistics() 7204 void trackStatistics() const override { 7205 if (isAssumedReadNone()) 7206 STATS_DECLTRACK_CSARG_ATTR(readnone) 7207 else if (isAssumedReadOnly()) 7208 STATS_DECLTRACK_CSARG_ATTR(readonly) 7209 else if (isAssumedWriteOnly()) 7210 STATS_DECLTRACK_CSARG_ATTR(writeonly) 7211 } 7212 }; 7213 7214 /// Memory behavior attribute for a call site return position. 7215 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { 7216 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) 7217 : AAMemoryBehaviorFloating(IRP, A) {} 7218 7219 /// See AbstractAttribute::initialize(...). 7220 void initialize(Attributor &A) override { 7221 AAMemoryBehaviorImpl::initialize(A); 7222 Function *F = getAssociatedFunction(); 7223 if (!F || F->isDeclaration()) 7224 indicatePessimisticFixpoint(); 7225 } 7226 7227 /// See AbstractAttribute::manifest(...). 7228 ChangeStatus manifest(Attributor &A) override { 7229 // We do not annotate returned values. 7230 return ChangeStatus::UNCHANGED; 7231 } 7232 7233 /// See AbstractAttribute::trackStatistics() 7234 void trackStatistics() const override {} 7235 }; 7236 7237 /// An AA to represent the memory behavior function attributes. 7238 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { 7239 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) 7240 : AAMemoryBehaviorImpl(IRP, A) {} 7241 7242 /// See AbstractAttribute::updateImpl(Attributor &A). 7243 virtual ChangeStatus updateImpl(Attributor &A) override; 7244 7245 /// See AbstractAttribute::manifest(...). 7246 ChangeStatus manifest(Attributor &A) override { 7247 Function &F = cast<Function>(getAnchorValue()); 7248 if (isAssumedReadNone()) { 7249 F.removeFnAttr(Attribute::ArgMemOnly); 7250 F.removeFnAttr(Attribute::InaccessibleMemOnly); 7251 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); 7252 } 7253 return AAMemoryBehaviorImpl::manifest(A); 7254 } 7255 7256 /// See AbstractAttribute::trackStatistics() 7257 void trackStatistics() const override { 7258 if (isAssumedReadNone()) 7259 STATS_DECLTRACK_FN_ATTR(readnone) 7260 else if (isAssumedReadOnly()) 7261 STATS_DECLTRACK_FN_ATTR(readonly) 7262 else if (isAssumedWriteOnly()) 7263 STATS_DECLTRACK_FN_ATTR(writeonly) 7264 } 7265 }; 7266 7267 /// AAMemoryBehavior attribute for call sites. 7268 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl { 7269 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) 7270 : AAMemoryBehaviorImpl(IRP, A) {} 7271 7272 /// See AbstractAttribute::initialize(...). 7273 void initialize(Attributor &A) override { 7274 AAMemoryBehaviorImpl::initialize(A); 7275 Function *F = getAssociatedFunction(); 7276 if (!F || F->isDeclaration()) 7277 indicatePessimisticFixpoint(); 7278 } 7279 7280 /// See AbstractAttribute::updateImpl(...). 7281 ChangeStatus updateImpl(Attributor &A) override { 7282 // TODO: Once we have call site specific value information we can provide 7283 // call site specific liveness liveness information and then it makes 7284 // sense to specialize attributes for call sites arguments instead of 7285 // redirecting requests to the callee argument. 7286 Function *F = getAssociatedFunction(); 7287 const IRPosition &FnPos = IRPosition::function(*F); 7288 auto &FnAA = 7289 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED); 7290 return clampStateAndIndicateChange(getState(), FnAA.getState()); 7291 } 7292 7293 /// See AbstractAttribute::trackStatistics() 7294 void trackStatistics() const override { 7295 if (isAssumedReadNone()) 7296 STATS_DECLTRACK_CS_ATTR(readnone) 7297 else if (isAssumedReadOnly()) 7298 STATS_DECLTRACK_CS_ATTR(readonly) 7299 else if (isAssumedWriteOnly()) 7300 STATS_DECLTRACK_CS_ATTR(writeonly) 7301 } 7302 }; 7303 7304 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { 7305 7306 // The current assumed state used to determine a change. 7307 auto AssumedState = getAssumed(); 7308 7309 auto CheckRWInst = [&](Instruction &I) { 7310 // If the instruction has an own memory behavior state, use it to restrict 7311 // the local state. No further analysis is required as the other memory 7312 // state is as optimistic as it gets. 7313 if (const auto *CB = dyn_cast<CallBase>(&I)) { 7314 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 7315 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 7316 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7317 return !isAtFixpoint(); 7318 } 7319 7320 // Remove access kind modifiers if necessary. 7321 if (I.mayReadFromMemory()) 7322 removeAssumedBits(NO_READS); 7323 if (I.mayWriteToMemory()) 7324 removeAssumedBits(NO_WRITES); 7325 return !isAtFixpoint(); 7326 }; 7327 7328 bool UsedAssumedInformation = false; 7329 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 7330 UsedAssumedInformation)) 7331 return indicatePessimisticFixpoint(); 7332 7333 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7334 : ChangeStatus::UNCHANGED; 7335 } 7336 7337 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { 7338 7339 const IRPosition &IRP = getIRPosition(); 7340 const IRPosition &FnPos = IRPosition::function_scope(IRP); 7341 AAMemoryBehavior::StateType &S = getState(); 7342 7343 // First, check the function scope. We take the known information and we avoid 7344 // work if the assumed information implies the current assumed information for 7345 // this attribute. This is a valid for all but byval arguments. 7346 Argument *Arg = IRP.getAssociatedArgument(); 7347 AAMemoryBehavior::base_t FnMemAssumedState = 7348 AAMemoryBehavior::StateType::getWorstState(); 7349 if (!Arg || !Arg->hasByValAttr()) { 7350 const auto &FnMemAA = 7351 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL); 7352 FnMemAssumedState = FnMemAA.getAssumed(); 7353 S.addKnownBits(FnMemAA.getKnown()); 7354 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed()) 7355 return ChangeStatus::UNCHANGED; 7356 } 7357 7358 // The current assumed state used to determine a change. 7359 auto AssumedState = S.getAssumed(); 7360 7361 // Make sure the value is not captured (except through "return"), if 7362 // it is, any information derived would be irrelevant anyway as we cannot 7363 // check the potential aliases introduced by the capture. However, no need 7364 // to fall back to anythign less optimistic than the function state. 7365 const auto &ArgNoCaptureAA = 7366 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL); 7367 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 7368 S.intersectAssumedBits(FnMemAssumedState); 7369 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7370 : ChangeStatus::UNCHANGED; 7371 } 7372 7373 // Visit and expand uses until all are analyzed or a fixpoint is reached. 7374 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 7375 Instruction *UserI = cast<Instruction>(U.getUser()); 7376 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI 7377 << " \n"); 7378 7379 // Droppable users, e.g., llvm::assume does not actually perform any action. 7380 if (UserI->isDroppable()) 7381 return true; 7382 7383 // Check if the users of UserI should also be visited. 7384 Follow = followUsersOfUseIn(A, U, UserI); 7385 7386 // If UserI might touch memory we analyze the use in detail. 7387 if (UserI->mayReadOrWriteMemory()) 7388 analyzeUseIn(A, U, UserI); 7389 7390 return !isAtFixpoint(); 7391 }; 7392 7393 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) 7394 return indicatePessimisticFixpoint(); 7395 7396 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7397 : ChangeStatus::UNCHANGED; 7398 } 7399 7400 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U, 7401 const Instruction *UserI) { 7402 // The loaded value is unrelated to the pointer argument, no need to 7403 // follow the users of the load. 7404 if (isa<LoadInst>(UserI)) 7405 return false; 7406 7407 // By default we follow all uses assuming UserI might leak information on U, 7408 // we have special handling for call sites operands though. 7409 const auto *CB = dyn_cast<CallBase>(UserI); 7410 if (!CB || !CB->isArgOperand(&U)) 7411 return true; 7412 7413 // If the use is a call argument known not to be captured, the users of 7414 // the call do not need to be visited because they have to be unrelated to 7415 // the input. Note that this check is not trivial even though we disallow 7416 // general capturing of the underlying argument. The reason is that the 7417 // call might the argument "through return", which we allow and for which we 7418 // need to check call users. 7419 if (U.get()->getType()->isPointerTy()) { 7420 unsigned ArgNo = CB->getArgOperandNo(&U); 7421 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 7422 *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL); 7423 return !ArgNoCaptureAA.isAssumedNoCapture(); 7424 } 7425 7426 return true; 7427 } 7428 7429 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U, 7430 const Instruction *UserI) { 7431 assert(UserI->mayReadOrWriteMemory()); 7432 7433 switch (UserI->getOpcode()) { 7434 default: 7435 // TODO: Handle all atomics and other side-effect operations we know of. 7436 break; 7437 case Instruction::Load: 7438 // Loads cause the NO_READS property to disappear. 7439 removeAssumedBits(NO_READS); 7440 return; 7441 7442 case Instruction::Store: 7443 // Stores cause the NO_WRITES property to disappear if the use is the 7444 // pointer operand. Note that while capturing was taken care of somewhere 7445 // else we need to deal with stores of the value that is not looked through. 7446 if (cast<StoreInst>(UserI)->getPointerOperand() == U.get()) 7447 removeAssumedBits(NO_WRITES); 7448 else 7449 indicatePessimisticFixpoint(); 7450 return; 7451 7452 case Instruction::Call: 7453 case Instruction::CallBr: 7454 case Instruction::Invoke: { 7455 // For call sites we look at the argument memory behavior attribute (this 7456 // could be recursive!) in order to restrict our own state. 7457 const auto *CB = cast<CallBase>(UserI); 7458 7459 // Give up on operand bundles. 7460 if (CB->isBundleOperand(&U)) { 7461 indicatePessimisticFixpoint(); 7462 return; 7463 } 7464 7465 // Calling a function does read the function pointer, maybe write it if the 7466 // function is self-modifying. 7467 if (CB->isCallee(&U)) { 7468 removeAssumedBits(NO_READS); 7469 break; 7470 } 7471 7472 // Adjust the possible access behavior based on the information on the 7473 // argument. 7474 IRPosition Pos; 7475 if (U.get()->getType()->isPointerTy()) 7476 Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)); 7477 else 7478 Pos = IRPosition::callsite_function(*CB); 7479 const auto &MemBehaviorAA = 7480 A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL); 7481 // "assumed" has at most the same bits as the MemBehaviorAA assumed 7482 // and at least "known". 7483 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7484 return; 7485 } 7486 }; 7487 7488 // Generally, look at the "may-properties" and adjust the assumed state if we 7489 // did not trigger special handling before. 7490 if (UserI->mayReadFromMemory()) 7491 removeAssumedBits(NO_READS); 7492 if (UserI->mayWriteToMemory()) 7493 removeAssumedBits(NO_WRITES); 7494 } 7495 } // namespace 7496 7497 /// -------------------- Memory Locations Attributes --------------------------- 7498 /// Includes read-none, argmemonly, inaccessiblememonly, 7499 /// inaccessiblememorargmemonly 7500 /// ---------------------------------------------------------------------------- 7501 7502 std::string AAMemoryLocation::getMemoryLocationsAsStr( 7503 AAMemoryLocation::MemoryLocationsKind MLK) { 7504 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) 7505 return "all memory"; 7506 if (MLK == AAMemoryLocation::NO_LOCATIONS) 7507 return "no memory"; 7508 std::string S = "memory:"; 7509 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) 7510 S += "stack,"; 7511 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) 7512 S += "constant,"; 7513 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) 7514 S += "internal global,"; 7515 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) 7516 S += "external global,"; 7517 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) 7518 S += "argument,"; 7519 if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) 7520 S += "inaccessible,"; 7521 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) 7522 S += "malloced,"; 7523 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) 7524 S += "unknown,"; 7525 S.pop_back(); 7526 return S; 7527 } 7528 7529 namespace { 7530 struct AAMemoryLocationImpl : public AAMemoryLocation { 7531 7532 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) 7533 : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { 7534 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7535 AccessKind2Accesses[u] = nullptr; 7536 } 7537 7538 ~AAMemoryLocationImpl() { 7539 // The AccessSets are allocated via a BumpPtrAllocator, we call 7540 // the destructor manually. 7541 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7542 if (AccessKind2Accesses[u]) 7543 AccessKind2Accesses[u]->~AccessSet(); 7544 } 7545 7546 /// See AbstractAttribute::initialize(...). 7547 void initialize(Attributor &A) override { 7548 intersectAssumedBits(BEST_STATE); 7549 getKnownStateFromValue(A, getIRPosition(), getState()); 7550 AAMemoryLocation::initialize(A); 7551 } 7552 7553 /// Return the memory behavior information encoded in the IR for \p IRP. 7554 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, 7555 BitIntegerState &State, 7556 bool IgnoreSubsumingPositions = false) { 7557 // For internal functions we ignore `argmemonly` and 7558 // `inaccessiblememorargmemonly` as we might break it via interprocedural 7559 // constant propagation. It is unclear if this is the best way but it is 7560 // unlikely this will cause real performance problems. If we are deriving 7561 // attributes for the anchor function we even remove the attribute in 7562 // addition to ignoring it. 7563 bool UseArgMemOnly = true; 7564 Function *AnchorFn = IRP.getAnchorScope(); 7565 if (AnchorFn && A.isRunOn(*AnchorFn)) 7566 UseArgMemOnly = !AnchorFn->hasLocalLinkage(); 7567 7568 SmallVector<Attribute, 2> Attrs; 7569 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 7570 for (const Attribute &Attr : Attrs) { 7571 switch (Attr.getKindAsEnum()) { 7572 case Attribute::ReadNone: 7573 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM); 7574 break; 7575 case Attribute::InaccessibleMemOnly: 7576 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 7577 break; 7578 case Attribute::ArgMemOnly: 7579 if (UseArgMemOnly) 7580 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true)); 7581 else 7582 IRP.removeAttrs({Attribute::ArgMemOnly}); 7583 break; 7584 case Attribute::InaccessibleMemOrArgMemOnly: 7585 if (UseArgMemOnly) 7586 State.addKnownBits(inverseLocation( 7587 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 7588 else 7589 IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly}); 7590 break; 7591 default: 7592 llvm_unreachable("Unexpected attribute!"); 7593 } 7594 } 7595 } 7596 7597 /// See AbstractAttribute::getDeducedAttributes(...). 7598 void getDeducedAttributes(LLVMContext &Ctx, 7599 SmallVectorImpl<Attribute> &Attrs) const override { 7600 assert(Attrs.size() == 0); 7601 if (isAssumedReadNone()) { 7602 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 7603 } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { 7604 if (isAssumedInaccessibleMemOnly()) 7605 Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly)); 7606 else if (isAssumedArgMemOnly()) 7607 Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly)); 7608 else if (isAssumedInaccessibleOrArgMemOnly()) 7609 Attrs.push_back( 7610 Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly)); 7611 } 7612 assert(Attrs.size() <= 1); 7613 } 7614 7615 /// See AbstractAttribute::manifest(...). 7616 ChangeStatus manifest(Attributor &A) override { 7617 const IRPosition &IRP = getIRPosition(); 7618 7619 // Check if we would improve the existing attributes first. 7620 SmallVector<Attribute, 4> DeducedAttrs; 7621 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 7622 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 7623 return IRP.hasAttr(Attr.getKindAsEnum(), 7624 /* IgnoreSubsumingPositions */ true); 7625 })) 7626 return ChangeStatus::UNCHANGED; 7627 7628 // Clear existing attributes. 7629 IRP.removeAttrs(AttrKinds); 7630 if (isAssumedReadNone()) 7631 IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds); 7632 7633 // Use the generic manifest method. 7634 return IRAttribute::manifest(A); 7635 } 7636 7637 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). 7638 bool checkForAllAccessesToMemoryKind( 7639 function_ref<bool(const Instruction *, const Value *, AccessKind, 7640 MemoryLocationsKind)> 7641 Pred, 7642 MemoryLocationsKind RequestedMLK) const override { 7643 if (!isValidState()) 7644 return false; 7645 7646 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); 7647 if (AssumedMLK == NO_LOCATIONS) 7648 return true; 7649 7650 unsigned Idx = 0; 7651 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; 7652 CurMLK *= 2, ++Idx) { 7653 if (CurMLK & RequestedMLK) 7654 continue; 7655 7656 if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) 7657 for (const AccessInfo &AI : *Accesses) 7658 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) 7659 return false; 7660 } 7661 7662 return true; 7663 } 7664 7665 ChangeStatus indicatePessimisticFixpoint() override { 7666 // If we give up and indicate a pessimistic fixpoint this instruction will 7667 // become an access for all potential access kinds: 7668 // TODO: Add pointers for argmemonly and globals to improve the results of 7669 // checkForAllAccessesToMemoryKind. 7670 bool Changed = false; 7671 MemoryLocationsKind KnownMLK = getKnown(); 7672 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 7673 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) 7674 if (!(CurMLK & KnownMLK)) 7675 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed, 7676 getAccessKindFromInst(I)); 7677 return AAMemoryLocation::indicatePessimisticFixpoint(); 7678 } 7679 7680 protected: 7681 /// Helper struct to tie together an instruction that has a read or write 7682 /// effect with the pointer it accesses (if any). 7683 struct AccessInfo { 7684 7685 /// The instruction that caused the access. 7686 const Instruction *I; 7687 7688 /// The base pointer that is accessed, or null if unknown. 7689 const Value *Ptr; 7690 7691 /// The kind of access (read/write/read+write). 7692 AccessKind Kind; 7693 7694 bool operator==(const AccessInfo &RHS) const { 7695 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; 7696 } 7697 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { 7698 if (LHS.I != RHS.I) 7699 return LHS.I < RHS.I; 7700 if (LHS.Ptr != RHS.Ptr) 7701 return LHS.Ptr < RHS.Ptr; 7702 if (LHS.Kind != RHS.Kind) 7703 return LHS.Kind < RHS.Kind; 7704 return false; 7705 } 7706 }; 7707 7708 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the 7709 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. 7710 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; 7711 AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()]; 7712 7713 /// Categorize the pointer arguments of CB that might access memory in 7714 /// AccessedLoc and update the state and access map accordingly. 7715 void 7716 categorizeArgumentPointerLocations(Attributor &A, CallBase &CB, 7717 AAMemoryLocation::StateType &AccessedLocs, 7718 bool &Changed); 7719 7720 /// Return the kind(s) of location that may be accessed by \p V. 7721 AAMemoryLocation::MemoryLocationsKind 7722 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); 7723 7724 /// Return the access kind as determined by \p I. 7725 AccessKind getAccessKindFromInst(const Instruction *I) { 7726 AccessKind AK = READ_WRITE; 7727 if (I) { 7728 AK = I->mayReadFromMemory() ? READ : NONE; 7729 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); 7730 } 7731 return AK; 7732 } 7733 7734 /// Update the state \p State and the AccessKind2Accesses given that \p I is 7735 /// an access of kind \p AK to a \p MLK memory location with the access 7736 /// pointer \p Ptr. 7737 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, 7738 MemoryLocationsKind MLK, const Instruction *I, 7739 const Value *Ptr, bool &Changed, 7740 AccessKind AK = READ_WRITE) { 7741 7742 assert(isPowerOf2_32(MLK) && "Expected a single location set!"); 7743 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)]; 7744 if (!Accesses) 7745 Accesses = new (Allocator) AccessSet(); 7746 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second; 7747 State.removeAssumedBits(MLK); 7748 } 7749 7750 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or 7751 /// arguments, and update the state and access map accordingly. 7752 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, 7753 AAMemoryLocation::StateType &State, bool &Changed); 7754 7755 /// Used to allocate access sets. 7756 BumpPtrAllocator &Allocator; 7757 7758 /// The set of IR attributes AAMemoryLocation deals with. 7759 static const Attribute::AttrKind AttrKinds[4]; 7760 }; 7761 7762 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = { 7763 Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly, 7764 Attribute::InaccessibleMemOrArgMemOnly}; 7765 7766 void AAMemoryLocationImpl::categorizePtrValue( 7767 Attributor &A, const Instruction &I, const Value &Ptr, 7768 AAMemoryLocation::StateType &State, bool &Changed) { 7769 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for " 7770 << Ptr << " [" 7771 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n"); 7772 7773 SmallVector<Value *, 8> Objects; 7774 bool UsedAssumedInformation = false; 7775 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I, 7776 UsedAssumedInformation, 7777 /* Intraprocedural */ true)) { 7778 LLVM_DEBUG( 7779 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"); 7780 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed, 7781 getAccessKindFromInst(&I)); 7782 return; 7783 } 7784 7785 for (Value *Obj : Objects) { 7786 // TODO: recognize the TBAA used for constant accesses. 7787 MemoryLocationsKind MLK = NO_LOCATIONS; 7788 if (isa<UndefValue>(Obj)) 7789 continue; 7790 if (isa<Argument>(Obj)) { 7791 // TODO: For now we do not treat byval arguments as local copies performed 7792 // on the call edge, though, we should. To make that happen we need to 7793 // teach various passes, e.g., DSE, about the copy effect of a byval. That 7794 // would also allow us to mark functions only accessing byval arguments as 7795 // readnone again, atguably their acceses have no effect outside of the 7796 // function, like accesses to allocas. 7797 MLK = NO_ARGUMENT_MEM; 7798 } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) { 7799 // Reading constant memory is not treated as a read "effect" by the 7800 // function attr pass so we won't neither. Constants defined by TBAA are 7801 // similar. (We know we do not write it because it is constant.) 7802 if (auto *GVar = dyn_cast<GlobalVariable>(GV)) 7803 if (GVar->isConstant()) 7804 continue; 7805 7806 if (GV->hasLocalLinkage()) 7807 MLK = NO_GLOBAL_INTERNAL_MEM; 7808 else 7809 MLK = NO_GLOBAL_EXTERNAL_MEM; 7810 } else if (isa<ConstantPointerNull>(Obj) && 7811 !NullPointerIsDefined(getAssociatedFunction(), 7812 Ptr.getType()->getPointerAddressSpace())) { 7813 continue; 7814 } else if (isa<AllocaInst>(Obj)) { 7815 MLK = NO_LOCAL_MEM; 7816 } else if (const auto *CB = dyn_cast<CallBase>(Obj)) { 7817 const auto &NoAliasAA = A.getAAFor<AANoAlias>( 7818 *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL); 7819 if (NoAliasAA.isAssumedNoAlias()) 7820 MLK = NO_MALLOCED_MEM; 7821 else 7822 MLK = NO_UNKOWN_MEM; 7823 } else { 7824 MLK = NO_UNKOWN_MEM; 7825 } 7826 7827 assert(MLK != NO_LOCATIONS && "No location specified!"); 7828 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: " 7829 << *Obj << " -> " << getMemoryLocationsAsStr(MLK) 7830 << "\n"); 7831 updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed, 7832 getAccessKindFromInst(&I)); 7833 } 7834 7835 LLVM_DEBUG( 7836 dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: " 7837 << getMemoryLocationsAsStr(State.getAssumed()) << "\n"); 7838 } 7839 7840 void AAMemoryLocationImpl::categorizeArgumentPointerLocations( 7841 Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs, 7842 bool &Changed) { 7843 for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) { 7844 7845 // Skip non-pointer arguments. 7846 const Value *ArgOp = CB.getArgOperand(ArgNo); 7847 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 7848 continue; 7849 7850 // Skip readnone arguments. 7851 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo); 7852 const auto &ArgOpMemLocationAA = 7853 A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL); 7854 7855 if (ArgOpMemLocationAA.isAssumedReadNone()) 7856 continue; 7857 7858 // Categorize potentially accessed pointer arguments as if there was an 7859 // access instruction with them as pointer. 7860 categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed); 7861 } 7862 } 7863 7864 AAMemoryLocation::MemoryLocationsKind 7865 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, 7866 bool &Changed) { 7867 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for " 7868 << I << "\n"); 7869 7870 AAMemoryLocation::StateType AccessedLocs; 7871 AccessedLocs.intersectAssumedBits(NO_LOCATIONS); 7872 7873 if (auto *CB = dyn_cast<CallBase>(&I)) { 7874 7875 // First check if we assume any memory is access is visible. 7876 const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>( 7877 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 7878 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I 7879 << " [" << CBMemLocationAA << "]\n"); 7880 7881 if (CBMemLocationAA.isAssumedReadNone()) 7882 return NO_LOCATIONS; 7883 7884 if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) { 7885 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr, 7886 Changed, getAccessKindFromInst(&I)); 7887 return AccessedLocs.getAssumed(); 7888 } 7889 7890 uint32_t CBAssumedNotAccessedLocs = 7891 CBMemLocationAA.getAssumedNotAccessedLocation(); 7892 7893 // Set the argmemonly and global bit as we handle them separately below. 7894 uint32_t CBAssumedNotAccessedLocsNoArgMem = 7895 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; 7896 7897 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { 7898 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) 7899 continue; 7900 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed, 7901 getAccessKindFromInst(&I)); 7902 } 7903 7904 // Now handle global memory if it might be accessed. This is slightly tricky 7905 // as NO_GLOBAL_MEM has multiple bits set. 7906 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); 7907 if (HasGlobalAccesses) { 7908 auto AccessPred = [&](const Instruction *, const Value *Ptr, 7909 AccessKind Kind, MemoryLocationsKind MLK) { 7910 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed, 7911 getAccessKindFromInst(&I)); 7912 return true; 7913 }; 7914 if (!CBMemLocationAA.checkForAllAccessesToMemoryKind( 7915 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false))) 7916 return AccessedLocs.getWorstState(); 7917 } 7918 7919 LLVM_DEBUG( 7920 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " 7921 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 7922 7923 // Now handle argument memory if it might be accessed. 7924 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); 7925 if (HasArgAccesses) 7926 categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed); 7927 7928 LLVM_DEBUG( 7929 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " 7930 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 7931 7932 return AccessedLocs.getAssumed(); 7933 } 7934 7935 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) { 7936 LLVM_DEBUG( 7937 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " 7938 << I << " [" << *Ptr << "]\n"); 7939 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed); 7940 return AccessedLocs.getAssumed(); 7941 } 7942 7943 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " 7944 << I << "\n"); 7945 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed, 7946 getAccessKindFromInst(&I)); 7947 return AccessedLocs.getAssumed(); 7948 } 7949 7950 /// An AA to represent the memory behavior function attributes. 7951 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { 7952 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) 7953 : AAMemoryLocationImpl(IRP, A) {} 7954 7955 /// See AbstractAttribute::updateImpl(Attributor &A). 7956 virtual ChangeStatus updateImpl(Attributor &A) override { 7957 7958 const auto &MemBehaviorAA = 7959 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 7960 if (MemBehaviorAA.isAssumedReadNone()) { 7961 if (MemBehaviorAA.isKnownReadNone()) 7962 return indicateOptimisticFixpoint(); 7963 assert(isAssumedReadNone() && 7964 "AAMemoryLocation was not read-none but AAMemoryBehavior was!"); 7965 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 7966 return ChangeStatus::UNCHANGED; 7967 } 7968 7969 // The current assumed state used to determine a change. 7970 auto AssumedState = getAssumed(); 7971 bool Changed = false; 7972 7973 auto CheckRWInst = [&](Instruction &I) { 7974 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); 7975 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I 7976 << ": " << getMemoryLocationsAsStr(MLK) << "\n"); 7977 removeAssumedBits(inverseLocation(MLK, false, false)); 7978 // Stop once only the valid bit set in the *not assumed location*, thus 7979 // once we don't actually exclude any memory locations in the state. 7980 return getAssumedNotAccessedLocation() != VALID_STATE; 7981 }; 7982 7983 bool UsedAssumedInformation = false; 7984 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 7985 UsedAssumedInformation)) 7986 return indicatePessimisticFixpoint(); 7987 7988 Changed |= AssumedState != getAssumed(); 7989 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 7990 } 7991 7992 /// See AbstractAttribute::trackStatistics() 7993 void trackStatistics() const override { 7994 if (isAssumedReadNone()) 7995 STATS_DECLTRACK_FN_ATTR(readnone) 7996 else if (isAssumedArgMemOnly()) 7997 STATS_DECLTRACK_FN_ATTR(argmemonly) 7998 else if (isAssumedInaccessibleMemOnly()) 7999 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly) 8000 else if (isAssumedInaccessibleOrArgMemOnly()) 8001 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly) 8002 } 8003 }; 8004 8005 /// AAMemoryLocation attribute for call sites. 8006 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { 8007 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) 8008 : AAMemoryLocationImpl(IRP, A) {} 8009 8010 /// See AbstractAttribute::initialize(...). 8011 void initialize(Attributor &A) override { 8012 AAMemoryLocationImpl::initialize(A); 8013 Function *F = getAssociatedFunction(); 8014 if (!F || F->isDeclaration()) 8015 indicatePessimisticFixpoint(); 8016 } 8017 8018 /// See AbstractAttribute::updateImpl(...). 8019 ChangeStatus updateImpl(Attributor &A) override { 8020 // TODO: Once we have call site specific value information we can provide 8021 // call site specific liveness liveness information and then it makes 8022 // sense to specialize attributes for call sites arguments instead of 8023 // redirecting requests to the callee argument. 8024 Function *F = getAssociatedFunction(); 8025 const IRPosition &FnPos = IRPosition::function(*F); 8026 auto &FnAA = 8027 A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED); 8028 bool Changed = false; 8029 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 8030 AccessKind Kind, MemoryLocationsKind MLK) { 8031 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed, 8032 getAccessKindFromInst(I)); 8033 return true; 8034 }; 8035 if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS)) 8036 return indicatePessimisticFixpoint(); 8037 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 8038 } 8039 8040 /// See AbstractAttribute::trackStatistics() 8041 void trackStatistics() const override { 8042 if (isAssumedReadNone()) 8043 STATS_DECLTRACK_CS_ATTR(readnone) 8044 } 8045 }; 8046 } // namespace 8047 8048 /// ------------------ Value Constant Range Attribute ------------------------- 8049 8050 namespace { 8051 struct AAValueConstantRangeImpl : AAValueConstantRange { 8052 using StateType = IntegerRangeState; 8053 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) 8054 : AAValueConstantRange(IRP, A) {} 8055 8056 /// See AbstractAttribute::initialize(..). 8057 void initialize(Attributor &A) override { 8058 if (A.hasSimplificationCallback(getIRPosition())) { 8059 indicatePessimisticFixpoint(); 8060 return; 8061 } 8062 8063 // Intersect a range given by SCEV. 8064 intersectKnown(getConstantRangeFromSCEV(A, getCtxI())); 8065 8066 // Intersect a range given by LVI. 8067 intersectKnown(getConstantRangeFromLVI(A, getCtxI())); 8068 } 8069 8070 /// See AbstractAttribute::getAsStr(). 8071 const std::string getAsStr() const override { 8072 std::string Str; 8073 llvm::raw_string_ostream OS(Str); 8074 OS << "range(" << getBitWidth() << ")<"; 8075 getKnown().print(OS); 8076 OS << " / "; 8077 getAssumed().print(OS); 8078 OS << ">"; 8079 return OS.str(); 8080 } 8081 8082 /// Helper function to get a SCEV expr for the associated value at program 8083 /// point \p I. 8084 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { 8085 if (!getAnchorScope()) 8086 return nullptr; 8087 8088 ScalarEvolution *SE = 8089 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 8090 *getAnchorScope()); 8091 8092 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( 8093 *getAnchorScope()); 8094 8095 if (!SE || !LI) 8096 return nullptr; 8097 8098 const SCEV *S = SE->getSCEV(&getAssociatedValue()); 8099 if (!I) 8100 return S; 8101 8102 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent())); 8103 } 8104 8105 /// Helper function to get a range from SCEV for the associated value at 8106 /// program point \p I. 8107 ConstantRange getConstantRangeFromSCEV(Attributor &A, 8108 const Instruction *I = nullptr) const { 8109 if (!getAnchorScope()) 8110 return getWorstState(getBitWidth()); 8111 8112 ScalarEvolution *SE = 8113 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 8114 *getAnchorScope()); 8115 8116 const SCEV *S = getSCEV(A, I); 8117 if (!SE || !S) 8118 return getWorstState(getBitWidth()); 8119 8120 return SE->getUnsignedRange(S); 8121 } 8122 8123 /// Helper function to get a range from LVI for the associated value at 8124 /// program point \p I. 8125 ConstantRange 8126 getConstantRangeFromLVI(Attributor &A, 8127 const Instruction *CtxI = nullptr) const { 8128 if (!getAnchorScope()) 8129 return getWorstState(getBitWidth()); 8130 8131 LazyValueInfo *LVI = 8132 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( 8133 *getAnchorScope()); 8134 8135 if (!LVI || !CtxI) 8136 return getWorstState(getBitWidth()); 8137 return LVI->getConstantRange(&getAssociatedValue(), 8138 const_cast<Instruction *>(CtxI)); 8139 } 8140 8141 /// Return true if \p CtxI is valid for querying outside analyses. 8142 /// This basically makes sure we do not ask intra-procedural analysis 8143 /// about a context in the wrong function or a context that violates 8144 /// dominance assumptions they might have. The \p AllowAACtxI flag indicates 8145 /// if the original context of this AA is OK or should be considered invalid. 8146 bool isValidCtxInstructionForOutsideAnalysis(Attributor &A, 8147 const Instruction *CtxI, 8148 bool AllowAACtxI) const { 8149 if (!CtxI || (!AllowAACtxI && CtxI == getCtxI())) 8150 return false; 8151 8152 // Our context might be in a different function, neither intra-procedural 8153 // analysis (ScalarEvolution nor LazyValueInfo) can handle that. 8154 if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction())) 8155 return false; 8156 8157 // If the context is not dominated by the value there are paths to the 8158 // context that do not define the value. This cannot be handled by 8159 // LazyValueInfo so we need to bail. 8160 if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) { 8161 InformationCache &InfoCache = A.getInfoCache(); 8162 const DominatorTree *DT = 8163 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( 8164 *I->getFunction()); 8165 return DT && DT->dominates(I, CtxI); 8166 } 8167 8168 return true; 8169 } 8170 8171 /// See AAValueConstantRange::getKnownConstantRange(..). 8172 ConstantRange 8173 getKnownConstantRange(Attributor &A, 8174 const Instruction *CtxI = nullptr) const override { 8175 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8176 /* AllowAACtxI */ false)) 8177 return getKnown(); 8178 8179 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8180 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8181 return getKnown().intersectWith(SCEVR).intersectWith(LVIR); 8182 } 8183 8184 /// See AAValueConstantRange::getAssumedConstantRange(..). 8185 ConstantRange 8186 getAssumedConstantRange(Attributor &A, 8187 const Instruction *CtxI = nullptr) const override { 8188 // TODO: Make SCEV use Attributor assumption. 8189 // We may be able to bound a variable range via assumptions in 8190 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to 8191 // evolve to x^2 + x, then we can say that y is in [2, 12]. 8192 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8193 /* AllowAACtxI */ false)) 8194 return getAssumed(); 8195 8196 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8197 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8198 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR); 8199 } 8200 8201 /// Helper function to create MDNode for range metadata. 8202 static MDNode * 8203 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, 8204 const ConstantRange &AssumedConstantRange) { 8205 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get( 8206 Ty, AssumedConstantRange.getLower())), 8207 ConstantAsMetadata::get(ConstantInt::get( 8208 Ty, AssumedConstantRange.getUpper()))}; 8209 return MDNode::get(Ctx, LowAndHigh); 8210 } 8211 8212 /// Return true if \p Assumed is included in \p KnownRanges. 8213 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { 8214 8215 if (Assumed.isFullSet()) 8216 return false; 8217 8218 if (!KnownRanges) 8219 return true; 8220 8221 // If multiple ranges are annotated in IR, we give up to annotate assumed 8222 // range for now. 8223 8224 // TODO: If there exists a known range which containts assumed range, we 8225 // can say assumed range is better. 8226 if (KnownRanges->getNumOperands() > 2) 8227 return false; 8228 8229 ConstantInt *Lower = 8230 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0)); 8231 ConstantInt *Upper = 8232 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1)); 8233 8234 ConstantRange Known(Lower->getValue(), Upper->getValue()); 8235 return Known.contains(Assumed) && Known != Assumed; 8236 } 8237 8238 /// Helper function to set range metadata. 8239 static bool 8240 setRangeMetadataIfisBetterRange(Instruction *I, 8241 const ConstantRange &AssumedConstantRange) { 8242 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range); 8243 if (isBetterRange(AssumedConstantRange, OldRangeMD)) { 8244 if (!AssumedConstantRange.isEmptySet()) { 8245 I->setMetadata(LLVMContext::MD_range, 8246 getMDNodeForConstantRange(I->getType(), I->getContext(), 8247 AssumedConstantRange)); 8248 return true; 8249 } 8250 } 8251 return false; 8252 } 8253 8254 /// See AbstractAttribute::manifest() 8255 ChangeStatus manifest(Attributor &A) override { 8256 ChangeStatus Changed = ChangeStatus::UNCHANGED; 8257 ConstantRange AssumedConstantRange = getAssumedConstantRange(A); 8258 assert(!AssumedConstantRange.isFullSet() && "Invalid state"); 8259 8260 auto &V = getAssociatedValue(); 8261 if (!AssumedConstantRange.isEmptySet() && 8262 !AssumedConstantRange.isSingleElement()) { 8263 if (Instruction *I = dyn_cast<Instruction>(&V)) { 8264 assert(I == getCtxI() && "Should not annotate an instruction which is " 8265 "not the context instruction"); 8266 if (isa<CallInst>(I) || isa<LoadInst>(I)) 8267 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) 8268 Changed = ChangeStatus::CHANGED; 8269 } 8270 } 8271 8272 return Changed; 8273 } 8274 }; 8275 8276 struct AAValueConstantRangeArgument final 8277 : AAArgumentFromCallSiteArguments< 8278 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8279 true /* BridgeCallBaseContext */> { 8280 using Base = AAArgumentFromCallSiteArguments< 8281 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8282 true /* BridgeCallBaseContext */>; 8283 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) 8284 : Base(IRP, A) {} 8285 8286 /// See AbstractAttribute::initialize(..). 8287 void initialize(Attributor &A) override { 8288 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8289 indicatePessimisticFixpoint(); 8290 } else { 8291 Base::initialize(A); 8292 } 8293 } 8294 8295 /// See AbstractAttribute::trackStatistics() 8296 void trackStatistics() const override { 8297 STATS_DECLTRACK_ARG_ATTR(value_range) 8298 } 8299 }; 8300 8301 struct AAValueConstantRangeReturned 8302 : AAReturnedFromReturnedValues<AAValueConstantRange, 8303 AAValueConstantRangeImpl, 8304 AAValueConstantRangeImpl::StateType, 8305 /* PropogateCallBaseContext */ true> { 8306 using Base = 8307 AAReturnedFromReturnedValues<AAValueConstantRange, 8308 AAValueConstantRangeImpl, 8309 AAValueConstantRangeImpl::StateType, 8310 /* PropogateCallBaseContext */ true>; 8311 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) 8312 : Base(IRP, A) {} 8313 8314 /// See AbstractAttribute::initialize(...). 8315 void initialize(Attributor &A) override {} 8316 8317 /// See AbstractAttribute::trackStatistics() 8318 void trackStatistics() const override { 8319 STATS_DECLTRACK_FNRET_ATTR(value_range) 8320 } 8321 }; 8322 8323 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { 8324 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) 8325 : AAValueConstantRangeImpl(IRP, A) {} 8326 8327 /// See AbstractAttribute::initialize(...). 8328 void initialize(Attributor &A) override { 8329 AAValueConstantRangeImpl::initialize(A); 8330 if (isAtFixpoint()) 8331 return; 8332 8333 Value &V = getAssociatedValue(); 8334 8335 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8336 unionAssumed(ConstantRange(C->getValue())); 8337 indicateOptimisticFixpoint(); 8338 return; 8339 } 8340 8341 if (isa<UndefValue>(&V)) { 8342 // Collapse the undef state to 0. 8343 unionAssumed(ConstantRange(APInt(getBitWidth(), 0))); 8344 indicateOptimisticFixpoint(); 8345 return; 8346 } 8347 8348 if (isa<CallBase>(&V)) 8349 return; 8350 8351 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V)) 8352 return; 8353 8354 // If it is a load instruction with range metadata, use it. 8355 if (LoadInst *LI = dyn_cast<LoadInst>(&V)) 8356 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) { 8357 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8358 return; 8359 } 8360 8361 // We can work with PHI and select instruction as we traverse their operands 8362 // during update. 8363 if (isa<SelectInst>(V) || isa<PHINode>(V)) 8364 return; 8365 8366 // Otherwise we give up. 8367 indicatePessimisticFixpoint(); 8368 8369 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: " 8370 << getAssociatedValue() << "\n"); 8371 } 8372 8373 bool calculateBinaryOperator( 8374 Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, 8375 const Instruction *CtxI, 8376 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8377 Value *LHS = BinOp->getOperand(0); 8378 Value *RHS = BinOp->getOperand(1); 8379 8380 // Simplify the operands first. 8381 bool UsedAssumedInformation = false; 8382 const auto &SimplifiedLHS = 8383 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8384 *this, UsedAssumedInformation); 8385 if (!SimplifiedLHS.hasValue()) 8386 return true; 8387 if (!SimplifiedLHS.getValue()) 8388 return false; 8389 LHS = *SimplifiedLHS; 8390 8391 const auto &SimplifiedRHS = 8392 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8393 *this, UsedAssumedInformation); 8394 if (!SimplifiedRHS.hasValue()) 8395 return true; 8396 if (!SimplifiedRHS.getValue()) 8397 return false; 8398 RHS = *SimplifiedRHS; 8399 8400 // TODO: Allow non integers as well. 8401 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8402 return false; 8403 8404 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8405 *this, IRPosition::value(*LHS, getCallBaseContext()), 8406 DepClassTy::REQUIRED); 8407 QuerriedAAs.push_back(&LHSAA); 8408 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8409 8410 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8411 *this, IRPosition::value(*RHS, getCallBaseContext()), 8412 DepClassTy::REQUIRED); 8413 QuerriedAAs.push_back(&RHSAA); 8414 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8415 8416 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange); 8417 8418 T.unionAssumed(AssumedRange); 8419 8420 // TODO: Track a known state too. 8421 8422 return T.isValidState(); 8423 } 8424 8425 bool calculateCastInst( 8426 Attributor &A, CastInst *CastI, IntegerRangeState &T, 8427 const Instruction *CtxI, 8428 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8429 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!"); 8430 // TODO: Allow non integers as well. 8431 Value *OpV = CastI->getOperand(0); 8432 8433 // Simplify the operand first. 8434 bool UsedAssumedInformation = false; 8435 const auto &SimplifiedOpV = 8436 A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()), 8437 *this, UsedAssumedInformation); 8438 if (!SimplifiedOpV.hasValue()) 8439 return true; 8440 if (!SimplifiedOpV.getValue()) 8441 return false; 8442 OpV = *SimplifiedOpV; 8443 8444 if (!OpV->getType()->isIntegerTy()) 8445 return false; 8446 8447 auto &OpAA = A.getAAFor<AAValueConstantRange>( 8448 *this, IRPosition::value(*OpV, getCallBaseContext()), 8449 DepClassTy::REQUIRED); 8450 QuerriedAAs.push_back(&OpAA); 8451 T.unionAssumed( 8452 OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth())); 8453 return T.isValidState(); 8454 } 8455 8456 bool 8457 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, 8458 const Instruction *CtxI, 8459 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8460 Value *LHS = CmpI->getOperand(0); 8461 Value *RHS = CmpI->getOperand(1); 8462 8463 // Simplify the operands first. 8464 bool UsedAssumedInformation = false; 8465 const auto &SimplifiedLHS = 8466 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8467 *this, UsedAssumedInformation); 8468 if (!SimplifiedLHS.hasValue()) 8469 return true; 8470 if (!SimplifiedLHS.getValue()) 8471 return false; 8472 LHS = *SimplifiedLHS; 8473 8474 const auto &SimplifiedRHS = 8475 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8476 *this, UsedAssumedInformation); 8477 if (!SimplifiedRHS.hasValue()) 8478 return true; 8479 if (!SimplifiedRHS.getValue()) 8480 return false; 8481 RHS = *SimplifiedRHS; 8482 8483 // TODO: Allow non integers as well. 8484 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8485 return false; 8486 8487 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8488 *this, IRPosition::value(*LHS, getCallBaseContext()), 8489 DepClassTy::REQUIRED); 8490 QuerriedAAs.push_back(&LHSAA); 8491 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8492 *this, IRPosition::value(*RHS, getCallBaseContext()), 8493 DepClassTy::REQUIRED); 8494 QuerriedAAs.push_back(&RHSAA); 8495 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8496 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8497 8498 // If one of them is empty set, we can't decide. 8499 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) 8500 return true; 8501 8502 bool MustTrue = false, MustFalse = false; 8503 8504 auto AllowedRegion = 8505 ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange); 8506 8507 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet()) 8508 MustFalse = true; 8509 8510 if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange)) 8511 MustTrue = true; 8512 8513 assert((!MustTrue || !MustFalse) && 8514 "Either MustTrue or MustFalse should be false!"); 8515 8516 if (MustTrue) 8517 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); 8518 else if (MustFalse) 8519 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); 8520 else 8521 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); 8522 8523 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA 8524 << " " << RHSAA << "\n"); 8525 8526 // TODO: Track a known state too. 8527 return T.isValidState(); 8528 } 8529 8530 /// See AbstractAttribute::updateImpl(...). 8531 ChangeStatus updateImpl(Attributor &A) override { 8532 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 8533 IntegerRangeState &T, bool Stripped) -> bool { 8534 Instruction *I = dyn_cast<Instruction>(&V); 8535 if (!I || isa<CallBase>(I)) { 8536 8537 // Simplify the operand first. 8538 bool UsedAssumedInformation = false; 8539 const auto &SimplifiedOpV = 8540 A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()), 8541 *this, UsedAssumedInformation); 8542 if (!SimplifiedOpV.hasValue()) 8543 return true; 8544 if (!SimplifiedOpV.getValue()) 8545 return false; 8546 Value *VPtr = *SimplifiedOpV; 8547 8548 // If the value is not instruction, we query AA to Attributor. 8549 const auto &AA = A.getAAFor<AAValueConstantRange>( 8550 *this, IRPosition::value(*VPtr, getCallBaseContext()), 8551 DepClassTy::REQUIRED); 8552 8553 // Clamp operator is not used to utilize a program point CtxI. 8554 T.unionAssumed(AA.getAssumedConstantRange(A, CtxI)); 8555 8556 return T.isValidState(); 8557 } 8558 8559 SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; 8560 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) { 8561 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) 8562 return false; 8563 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) { 8564 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) 8565 return false; 8566 } else if (auto *CastI = dyn_cast<CastInst>(I)) { 8567 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) 8568 return false; 8569 } else { 8570 // Give up with other instructions. 8571 // TODO: Add other instructions 8572 8573 T.indicatePessimisticFixpoint(); 8574 return false; 8575 } 8576 8577 // Catch circular reasoning in a pessimistic way for now. 8578 // TODO: Check how the range evolves and if we stripped anything, see also 8579 // AADereferenceable or AAAlign for similar situations. 8580 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { 8581 if (QueriedAA != this) 8582 continue; 8583 // If we are in a stady state we do not need to worry. 8584 if (T.getAssumed() == getState().getAssumed()) 8585 continue; 8586 T.indicatePessimisticFixpoint(); 8587 } 8588 8589 return T.isValidState(); 8590 }; 8591 8592 IntegerRangeState T(getBitWidth()); 8593 8594 bool UsedAssumedInformation = false; 8595 if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T, 8596 VisitValueCB, getCtxI(), 8597 UsedAssumedInformation, 8598 /* UseValueSimplify */ false)) 8599 return indicatePessimisticFixpoint(); 8600 8601 // Ensure that long def-use chains can't cause circular reasoning either by 8602 // introducing a cutoff below. 8603 if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED) 8604 return ChangeStatus::UNCHANGED; 8605 if (++NumChanges > MaxNumChanges) { 8606 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges 8607 << " but only " << MaxNumChanges 8608 << " are allowed to avoid cyclic reasoning."); 8609 return indicatePessimisticFixpoint(); 8610 } 8611 return ChangeStatus::CHANGED; 8612 } 8613 8614 /// See AbstractAttribute::trackStatistics() 8615 void trackStatistics() const override { 8616 STATS_DECLTRACK_FLOATING_ATTR(value_range) 8617 } 8618 8619 /// Tracker to bail after too many widening steps of the constant range. 8620 int NumChanges = 0; 8621 8622 /// Upper bound for the number of allowed changes (=widening steps) for the 8623 /// constant range before we give up. 8624 static constexpr int MaxNumChanges = 5; 8625 }; 8626 8627 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { 8628 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) 8629 : AAValueConstantRangeImpl(IRP, A) {} 8630 8631 /// See AbstractAttribute::initialize(...). 8632 ChangeStatus updateImpl(Attributor &A) override { 8633 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will " 8634 "not be called"); 8635 } 8636 8637 /// See AbstractAttribute::trackStatistics() 8638 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) } 8639 }; 8640 8641 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { 8642 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) 8643 : AAValueConstantRangeFunction(IRP, A) {} 8644 8645 /// See AbstractAttribute::trackStatistics() 8646 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) } 8647 }; 8648 8649 struct AAValueConstantRangeCallSiteReturned 8650 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8651 AAValueConstantRangeImpl, 8652 AAValueConstantRangeImpl::StateType, 8653 /* IntroduceCallBaseContext */ true> { 8654 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) 8655 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8656 AAValueConstantRangeImpl, 8657 AAValueConstantRangeImpl::StateType, 8658 /* IntroduceCallBaseContext */ true>(IRP, 8659 A) { 8660 } 8661 8662 /// See AbstractAttribute::initialize(...). 8663 void initialize(Attributor &A) override { 8664 // If it is a load instruction with range metadata, use the metadata. 8665 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue())) 8666 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range)) 8667 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8668 8669 AAValueConstantRangeImpl::initialize(A); 8670 } 8671 8672 /// See AbstractAttribute::trackStatistics() 8673 void trackStatistics() const override { 8674 STATS_DECLTRACK_CSRET_ATTR(value_range) 8675 } 8676 }; 8677 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { 8678 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) 8679 : AAValueConstantRangeFloating(IRP, A) {} 8680 8681 /// See AbstractAttribute::manifest() 8682 ChangeStatus manifest(Attributor &A) override { 8683 return ChangeStatus::UNCHANGED; 8684 } 8685 8686 /// See AbstractAttribute::trackStatistics() 8687 void trackStatistics() const override { 8688 STATS_DECLTRACK_CSARG_ATTR(value_range) 8689 } 8690 }; 8691 } // namespace 8692 8693 /// ------------------ Potential Values Attribute ------------------------- 8694 8695 namespace { 8696 struct AAPotentialValuesImpl : AAPotentialValues { 8697 using StateType = PotentialConstantIntValuesState; 8698 8699 AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A) 8700 : AAPotentialValues(IRP, A) {} 8701 8702 /// See AbstractAttribute::initialize(..). 8703 void initialize(Attributor &A) override { 8704 if (A.hasSimplificationCallback(getIRPosition())) 8705 indicatePessimisticFixpoint(); 8706 else 8707 AAPotentialValues::initialize(A); 8708 } 8709 8710 /// See AbstractAttribute::getAsStr(). 8711 const std::string getAsStr() const override { 8712 std::string Str; 8713 llvm::raw_string_ostream OS(Str); 8714 OS << getState(); 8715 return OS.str(); 8716 } 8717 8718 /// See AbstractAttribute::updateImpl(...). 8719 ChangeStatus updateImpl(Attributor &A) override { 8720 return indicatePessimisticFixpoint(); 8721 } 8722 }; 8723 8724 struct AAPotentialValuesArgument final 8725 : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 8726 PotentialConstantIntValuesState> { 8727 using Base = 8728 AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 8729 PotentialConstantIntValuesState>; 8730 AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A) 8731 : Base(IRP, A) {} 8732 8733 /// See AbstractAttribute::initialize(..). 8734 void initialize(Attributor &A) override { 8735 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8736 indicatePessimisticFixpoint(); 8737 } else { 8738 Base::initialize(A); 8739 } 8740 } 8741 8742 /// See AbstractAttribute::trackStatistics() 8743 void trackStatistics() const override { 8744 STATS_DECLTRACK_ARG_ATTR(potential_values) 8745 } 8746 }; 8747 8748 struct AAPotentialValuesReturned 8749 : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> { 8750 using Base = 8751 AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>; 8752 AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A) 8753 : Base(IRP, A) {} 8754 8755 /// See AbstractAttribute::trackStatistics() 8756 void trackStatistics() const override { 8757 STATS_DECLTRACK_FNRET_ATTR(potential_values) 8758 } 8759 }; 8760 8761 struct AAPotentialValuesFloating : AAPotentialValuesImpl { 8762 AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A) 8763 : AAPotentialValuesImpl(IRP, A) {} 8764 8765 /// See AbstractAttribute::initialize(..). 8766 void initialize(Attributor &A) override { 8767 AAPotentialValuesImpl::initialize(A); 8768 if (isAtFixpoint()) 8769 return; 8770 8771 Value &V = getAssociatedValue(); 8772 8773 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8774 unionAssumed(C->getValue()); 8775 indicateOptimisticFixpoint(); 8776 return; 8777 } 8778 8779 if (isa<UndefValue>(&V)) { 8780 unionAssumedWithUndef(); 8781 indicateOptimisticFixpoint(); 8782 return; 8783 } 8784 8785 if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V)) 8786 return; 8787 8788 if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V)) 8789 return; 8790 8791 indicatePessimisticFixpoint(); 8792 8793 LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: " 8794 << getAssociatedValue() << "\n"); 8795 } 8796 8797 static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS, 8798 const APInt &RHS) { 8799 return ICmpInst::compare(LHS, RHS, ICI->getPredicate()); 8800 } 8801 8802 static APInt calculateCastInst(const CastInst *CI, const APInt &Src, 8803 uint32_t ResultBitWidth) { 8804 Instruction::CastOps CastOp = CI->getOpcode(); 8805 switch (CastOp) { 8806 default: 8807 llvm_unreachable("unsupported or not integer cast"); 8808 case Instruction::Trunc: 8809 return Src.trunc(ResultBitWidth); 8810 case Instruction::SExt: 8811 return Src.sext(ResultBitWidth); 8812 case Instruction::ZExt: 8813 return Src.zext(ResultBitWidth); 8814 case Instruction::BitCast: 8815 return Src; 8816 } 8817 } 8818 8819 static APInt calculateBinaryOperator(const BinaryOperator *BinOp, 8820 const APInt &LHS, const APInt &RHS, 8821 bool &SkipOperation, bool &Unsupported) { 8822 Instruction::BinaryOps BinOpcode = BinOp->getOpcode(); 8823 // Unsupported is set to true when the binary operator is not supported. 8824 // SkipOperation is set to true when UB occur with the given operand pair 8825 // (LHS, RHS). 8826 // TODO: we should look at nsw and nuw keywords to handle operations 8827 // that create poison or undef value. 8828 switch (BinOpcode) { 8829 default: 8830 Unsupported = true; 8831 return LHS; 8832 case Instruction::Add: 8833 return LHS + RHS; 8834 case Instruction::Sub: 8835 return LHS - RHS; 8836 case Instruction::Mul: 8837 return LHS * RHS; 8838 case Instruction::UDiv: 8839 if (RHS.isZero()) { 8840 SkipOperation = true; 8841 return LHS; 8842 } 8843 return LHS.udiv(RHS); 8844 case Instruction::SDiv: 8845 if (RHS.isZero()) { 8846 SkipOperation = true; 8847 return LHS; 8848 } 8849 return LHS.sdiv(RHS); 8850 case Instruction::URem: 8851 if (RHS.isZero()) { 8852 SkipOperation = true; 8853 return LHS; 8854 } 8855 return LHS.urem(RHS); 8856 case Instruction::SRem: 8857 if (RHS.isZero()) { 8858 SkipOperation = true; 8859 return LHS; 8860 } 8861 return LHS.srem(RHS); 8862 case Instruction::Shl: 8863 return LHS.shl(RHS); 8864 case Instruction::LShr: 8865 return LHS.lshr(RHS); 8866 case Instruction::AShr: 8867 return LHS.ashr(RHS); 8868 case Instruction::And: 8869 return LHS & RHS; 8870 case Instruction::Or: 8871 return LHS | RHS; 8872 case Instruction::Xor: 8873 return LHS ^ RHS; 8874 } 8875 } 8876 8877 bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp, 8878 const APInt &LHS, const APInt &RHS) { 8879 bool SkipOperation = false; 8880 bool Unsupported = false; 8881 APInt Result = 8882 calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported); 8883 if (Unsupported) 8884 return false; 8885 // If SkipOperation is true, we can ignore this operand pair (L, R). 8886 if (!SkipOperation) 8887 unionAssumed(Result); 8888 return isValidState(); 8889 } 8890 8891 ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) { 8892 auto AssumedBefore = getAssumed(); 8893 Value *LHS = ICI->getOperand(0); 8894 Value *RHS = ICI->getOperand(1); 8895 8896 // Simplify the operands first. 8897 bool UsedAssumedInformation = false; 8898 const auto &SimplifiedLHS = 8899 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8900 *this, UsedAssumedInformation); 8901 if (!SimplifiedLHS.hasValue()) 8902 return ChangeStatus::UNCHANGED; 8903 if (!SimplifiedLHS.getValue()) 8904 return indicatePessimisticFixpoint(); 8905 LHS = *SimplifiedLHS; 8906 8907 const auto &SimplifiedRHS = 8908 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8909 *this, UsedAssumedInformation); 8910 if (!SimplifiedRHS.hasValue()) 8911 return ChangeStatus::UNCHANGED; 8912 if (!SimplifiedRHS.getValue()) 8913 return indicatePessimisticFixpoint(); 8914 RHS = *SimplifiedRHS; 8915 8916 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8917 return indicatePessimisticFixpoint(); 8918 8919 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 8920 DepClassTy::REQUIRED); 8921 if (!LHSAA.isValidState()) 8922 return indicatePessimisticFixpoint(); 8923 8924 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 8925 DepClassTy::REQUIRED); 8926 if (!RHSAA.isValidState()) 8927 return indicatePessimisticFixpoint(); 8928 8929 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 8930 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 8931 8932 // TODO: make use of undef flag to limit potential values aggressively. 8933 bool MaybeTrue = false, MaybeFalse = false; 8934 const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0); 8935 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 8936 // The result of any comparison between undefs can be soundly replaced 8937 // with undef. 8938 unionAssumedWithUndef(); 8939 } else if (LHSAA.undefIsContained()) { 8940 for (const APInt &R : RHSAAPVS) { 8941 bool CmpResult = calculateICmpInst(ICI, Zero, R); 8942 MaybeTrue |= CmpResult; 8943 MaybeFalse |= !CmpResult; 8944 if (MaybeTrue & MaybeFalse) 8945 return indicatePessimisticFixpoint(); 8946 } 8947 } else if (RHSAA.undefIsContained()) { 8948 for (const APInt &L : LHSAAPVS) { 8949 bool CmpResult = calculateICmpInst(ICI, L, Zero); 8950 MaybeTrue |= CmpResult; 8951 MaybeFalse |= !CmpResult; 8952 if (MaybeTrue & MaybeFalse) 8953 return indicatePessimisticFixpoint(); 8954 } 8955 } else { 8956 for (const APInt &L : LHSAAPVS) { 8957 for (const APInt &R : RHSAAPVS) { 8958 bool CmpResult = calculateICmpInst(ICI, L, R); 8959 MaybeTrue |= CmpResult; 8960 MaybeFalse |= !CmpResult; 8961 if (MaybeTrue & MaybeFalse) 8962 return indicatePessimisticFixpoint(); 8963 } 8964 } 8965 } 8966 if (MaybeTrue) 8967 unionAssumed(APInt(/* numBits */ 1, /* val */ 1)); 8968 if (MaybeFalse) 8969 unionAssumed(APInt(/* numBits */ 1, /* val */ 0)); 8970 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 8971 : ChangeStatus::CHANGED; 8972 } 8973 8974 ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) { 8975 auto AssumedBefore = getAssumed(); 8976 Value *LHS = SI->getTrueValue(); 8977 Value *RHS = SI->getFalseValue(); 8978 8979 // Simplify the operands first. 8980 bool UsedAssumedInformation = false; 8981 const auto &SimplifiedLHS = 8982 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8983 *this, UsedAssumedInformation); 8984 if (!SimplifiedLHS.hasValue()) 8985 return ChangeStatus::UNCHANGED; 8986 if (!SimplifiedLHS.getValue()) 8987 return indicatePessimisticFixpoint(); 8988 LHS = *SimplifiedLHS; 8989 8990 const auto &SimplifiedRHS = 8991 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8992 *this, UsedAssumedInformation); 8993 if (!SimplifiedRHS.hasValue()) 8994 return ChangeStatus::UNCHANGED; 8995 if (!SimplifiedRHS.getValue()) 8996 return indicatePessimisticFixpoint(); 8997 RHS = *SimplifiedRHS; 8998 8999 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 9000 return indicatePessimisticFixpoint(); 9001 9002 Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this, 9003 UsedAssumedInformation); 9004 9005 // Check if we only need one operand. 9006 bool OnlyLeft = false, OnlyRight = false; 9007 if (C.hasValue() && *C && (*C)->isOneValue()) 9008 OnlyLeft = true; 9009 else if (C.hasValue() && *C && (*C)->isZeroValue()) 9010 OnlyRight = true; 9011 9012 const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr; 9013 if (!OnlyRight) { 9014 LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 9015 DepClassTy::REQUIRED); 9016 if (!LHSAA->isValidState()) 9017 return indicatePessimisticFixpoint(); 9018 } 9019 if (!OnlyLeft) { 9020 RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 9021 DepClassTy::REQUIRED); 9022 if (!RHSAA->isValidState()) 9023 return indicatePessimisticFixpoint(); 9024 } 9025 9026 if (!LHSAA || !RHSAA) { 9027 // select (true/false), lhs, rhs 9028 auto *OpAA = LHSAA ? LHSAA : RHSAA; 9029 9030 if (OpAA->undefIsContained()) 9031 unionAssumedWithUndef(); 9032 else 9033 unionAssumed(*OpAA); 9034 9035 } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) { 9036 // select i1 *, undef , undef => undef 9037 unionAssumedWithUndef(); 9038 } else { 9039 unionAssumed(*LHSAA); 9040 unionAssumed(*RHSAA); 9041 } 9042 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9043 : ChangeStatus::CHANGED; 9044 } 9045 9046 ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) { 9047 auto AssumedBefore = getAssumed(); 9048 if (!CI->isIntegerCast()) 9049 return indicatePessimisticFixpoint(); 9050 assert(CI->getNumOperands() == 1 && "Expected cast to be unary!"); 9051 uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth(); 9052 Value *Src = CI->getOperand(0); 9053 9054 // Simplify the operand first. 9055 bool UsedAssumedInformation = false; 9056 const auto &SimplifiedSrc = 9057 A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()), 9058 *this, UsedAssumedInformation); 9059 if (!SimplifiedSrc.hasValue()) 9060 return ChangeStatus::UNCHANGED; 9061 if (!SimplifiedSrc.getValue()) 9062 return indicatePessimisticFixpoint(); 9063 Src = *SimplifiedSrc; 9064 9065 auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src), 9066 DepClassTy::REQUIRED); 9067 if (!SrcAA.isValidState()) 9068 return indicatePessimisticFixpoint(); 9069 const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet(); 9070 if (SrcAA.undefIsContained()) 9071 unionAssumedWithUndef(); 9072 else { 9073 for (const APInt &S : SrcAAPVS) { 9074 APInt T = calculateCastInst(CI, S, ResultBitWidth); 9075 unionAssumed(T); 9076 } 9077 } 9078 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9079 : ChangeStatus::CHANGED; 9080 } 9081 9082 ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) { 9083 auto AssumedBefore = getAssumed(); 9084 Value *LHS = BinOp->getOperand(0); 9085 Value *RHS = BinOp->getOperand(1); 9086 9087 // Simplify the operands first. 9088 bool UsedAssumedInformation = false; 9089 const auto &SimplifiedLHS = 9090 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 9091 *this, UsedAssumedInformation); 9092 if (!SimplifiedLHS.hasValue()) 9093 return ChangeStatus::UNCHANGED; 9094 if (!SimplifiedLHS.getValue()) 9095 return indicatePessimisticFixpoint(); 9096 LHS = *SimplifiedLHS; 9097 9098 const auto &SimplifiedRHS = 9099 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 9100 *this, UsedAssumedInformation); 9101 if (!SimplifiedRHS.hasValue()) 9102 return ChangeStatus::UNCHANGED; 9103 if (!SimplifiedRHS.getValue()) 9104 return indicatePessimisticFixpoint(); 9105 RHS = *SimplifiedRHS; 9106 9107 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 9108 return indicatePessimisticFixpoint(); 9109 9110 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 9111 DepClassTy::REQUIRED); 9112 if (!LHSAA.isValidState()) 9113 return indicatePessimisticFixpoint(); 9114 9115 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 9116 DepClassTy::REQUIRED); 9117 if (!RHSAA.isValidState()) 9118 return indicatePessimisticFixpoint(); 9119 9120 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 9121 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 9122 const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0); 9123 9124 // TODO: make use of undef flag to limit potential values aggressively. 9125 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 9126 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero)) 9127 return indicatePessimisticFixpoint(); 9128 } else if (LHSAA.undefIsContained()) { 9129 for (const APInt &R : RHSAAPVS) { 9130 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R)) 9131 return indicatePessimisticFixpoint(); 9132 } 9133 } else if (RHSAA.undefIsContained()) { 9134 for (const APInt &L : LHSAAPVS) { 9135 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero)) 9136 return indicatePessimisticFixpoint(); 9137 } 9138 } else { 9139 for (const APInt &L : LHSAAPVS) { 9140 for (const APInt &R : RHSAAPVS) { 9141 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R)) 9142 return indicatePessimisticFixpoint(); 9143 } 9144 } 9145 } 9146 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9147 : ChangeStatus::CHANGED; 9148 } 9149 9150 ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) { 9151 auto AssumedBefore = getAssumed(); 9152 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 9153 Value *IncomingValue = PHI->getIncomingValue(u); 9154 9155 // Simplify the operand first. 9156 bool UsedAssumedInformation = false; 9157 const auto &SimplifiedIncomingValue = A.getAssumedSimplified( 9158 IRPosition::value(*IncomingValue, getCallBaseContext()), *this, 9159 UsedAssumedInformation); 9160 if (!SimplifiedIncomingValue.hasValue()) 9161 continue; 9162 if (!SimplifiedIncomingValue.getValue()) 9163 return indicatePessimisticFixpoint(); 9164 IncomingValue = *SimplifiedIncomingValue; 9165 9166 auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>( 9167 *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED); 9168 if (!PotentialValuesAA.isValidState()) 9169 return indicatePessimisticFixpoint(); 9170 if (PotentialValuesAA.undefIsContained()) 9171 unionAssumedWithUndef(); 9172 else 9173 unionAssumed(PotentialValuesAA.getAssumed()); 9174 } 9175 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9176 : ChangeStatus::CHANGED; 9177 } 9178 9179 ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) { 9180 if (!L.getType()->isIntegerTy()) 9181 return indicatePessimisticFixpoint(); 9182 9183 auto Union = [&](Value &V) { 9184 if (isa<UndefValue>(V)) { 9185 unionAssumedWithUndef(); 9186 return true; 9187 } 9188 if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) { 9189 unionAssumed(CI->getValue()); 9190 return true; 9191 } 9192 return false; 9193 }; 9194 auto AssumedBefore = getAssumed(); 9195 9196 if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union)) 9197 return indicatePessimisticFixpoint(); 9198 9199 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9200 : ChangeStatus::CHANGED; 9201 } 9202 9203 /// See AbstractAttribute::updateImpl(...). 9204 ChangeStatus updateImpl(Attributor &A) override { 9205 Value &V = getAssociatedValue(); 9206 Instruction *I = dyn_cast<Instruction>(&V); 9207 9208 if (auto *ICI = dyn_cast<ICmpInst>(I)) 9209 return updateWithICmpInst(A, ICI); 9210 9211 if (auto *SI = dyn_cast<SelectInst>(I)) 9212 return updateWithSelectInst(A, SI); 9213 9214 if (auto *CI = dyn_cast<CastInst>(I)) 9215 return updateWithCastInst(A, CI); 9216 9217 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) 9218 return updateWithBinaryOperator(A, BinOp); 9219 9220 if (auto *PHI = dyn_cast<PHINode>(I)) 9221 return updateWithPHINode(A, PHI); 9222 9223 if (auto *L = dyn_cast<LoadInst>(I)) 9224 return updateWithLoad(A, *L); 9225 9226 return indicatePessimisticFixpoint(); 9227 } 9228 9229 /// See AbstractAttribute::trackStatistics() 9230 void trackStatistics() const override { 9231 STATS_DECLTRACK_FLOATING_ATTR(potential_values) 9232 } 9233 }; 9234 9235 struct AAPotentialValuesFunction : AAPotentialValuesImpl { 9236 AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A) 9237 : AAPotentialValuesImpl(IRP, A) {} 9238 9239 /// See AbstractAttribute::initialize(...). 9240 ChangeStatus updateImpl(Attributor &A) override { 9241 llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will " 9242 "not be called"); 9243 } 9244 9245 /// See AbstractAttribute::trackStatistics() 9246 void trackStatistics() const override { 9247 STATS_DECLTRACK_FN_ATTR(potential_values) 9248 } 9249 }; 9250 9251 struct AAPotentialValuesCallSite : AAPotentialValuesFunction { 9252 AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A) 9253 : AAPotentialValuesFunction(IRP, A) {} 9254 9255 /// See AbstractAttribute::trackStatistics() 9256 void trackStatistics() const override { 9257 STATS_DECLTRACK_CS_ATTR(potential_values) 9258 } 9259 }; 9260 9261 struct AAPotentialValuesCallSiteReturned 9262 : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> { 9263 AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A) 9264 : AACallSiteReturnedFromReturned<AAPotentialValues, 9265 AAPotentialValuesImpl>(IRP, A) {} 9266 9267 /// See AbstractAttribute::trackStatistics() 9268 void trackStatistics() const override { 9269 STATS_DECLTRACK_CSRET_ATTR(potential_values) 9270 } 9271 }; 9272 9273 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating { 9274 AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A) 9275 : AAPotentialValuesFloating(IRP, A) {} 9276 9277 /// See AbstractAttribute::initialize(..). 9278 void initialize(Attributor &A) override { 9279 AAPotentialValuesImpl::initialize(A); 9280 if (isAtFixpoint()) 9281 return; 9282 9283 Value &V = getAssociatedValue(); 9284 9285 if (auto *C = dyn_cast<ConstantInt>(&V)) { 9286 unionAssumed(C->getValue()); 9287 indicateOptimisticFixpoint(); 9288 return; 9289 } 9290 9291 if (isa<UndefValue>(&V)) { 9292 unionAssumedWithUndef(); 9293 indicateOptimisticFixpoint(); 9294 return; 9295 } 9296 } 9297 9298 /// See AbstractAttribute::updateImpl(...). 9299 ChangeStatus updateImpl(Attributor &A) override { 9300 Value &V = getAssociatedValue(); 9301 auto AssumedBefore = getAssumed(); 9302 auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V), 9303 DepClassTy::REQUIRED); 9304 const auto &S = AA.getAssumed(); 9305 unionAssumed(S); 9306 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9307 : ChangeStatus::CHANGED; 9308 } 9309 9310 /// See AbstractAttribute::trackStatistics() 9311 void trackStatistics() const override { 9312 STATS_DECLTRACK_CSARG_ATTR(potential_values) 9313 } 9314 }; 9315 9316 /// ------------------------ NoUndef Attribute --------------------------------- 9317 struct AANoUndefImpl : AANoUndef { 9318 AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {} 9319 9320 /// See AbstractAttribute::initialize(...). 9321 void initialize(Attributor &A) override { 9322 if (getIRPosition().hasAttr({Attribute::NoUndef})) { 9323 indicateOptimisticFixpoint(); 9324 return; 9325 } 9326 Value &V = getAssociatedValue(); 9327 if (isa<UndefValue>(V)) 9328 indicatePessimisticFixpoint(); 9329 else if (isa<FreezeInst>(V)) 9330 indicateOptimisticFixpoint(); 9331 else if (getPositionKind() != IRPosition::IRP_RETURNED && 9332 isGuaranteedNotToBeUndefOrPoison(&V)) 9333 indicateOptimisticFixpoint(); 9334 else 9335 AANoUndef::initialize(A); 9336 } 9337 9338 /// See followUsesInMBEC 9339 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 9340 AANoUndef::StateType &State) { 9341 const Value *UseV = U->get(); 9342 const DominatorTree *DT = nullptr; 9343 AssumptionCache *AC = nullptr; 9344 InformationCache &InfoCache = A.getInfoCache(); 9345 if (Function *F = getAnchorScope()) { 9346 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 9347 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 9348 } 9349 State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT)); 9350 bool TrackUse = false; 9351 // Track use for instructions which must produce undef or poison bits when 9352 // at least one operand contains such bits. 9353 if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I)) 9354 TrackUse = true; 9355 return TrackUse; 9356 } 9357 9358 /// See AbstractAttribute::getAsStr(). 9359 const std::string getAsStr() const override { 9360 return getAssumed() ? "noundef" : "may-undef-or-poison"; 9361 } 9362 9363 ChangeStatus manifest(Attributor &A) override { 9364 // We don't manifest noundef attribute for dead positions because the 9365 // associated values with dead positions would be replaced with undef 9366 // values. 9367 bool UsedAssumedInformation = false; 9368 if (A.isAssumedDead(getIRPosition(), nullptr, nullptr, 9369 UsedAssumedInformation)) 9370 return ChangeStatus::UNCHANGED; 9371 // A position whose simplified value does not have any value is 9372 // considered to be dead. We don't manifest noundef in such positions for 9373 // the same reason above. 9374 if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation) 9375 .hasValue()) 9376 return ChangeStatus::UNCHANGED; 9377 return AANoUndef::manifest(A); 9378 } 9379 }; 9380 9381 struct AANoUndefFloating : public AANoUndefImpl { 9382 AANoUndefFloating(const IRPosition &IRP, Attributor &A) 9383 : AANoUndefImpl(IRP, A) {} 9384 9385 /// See AbstractAttribute::initialize(...). 9386 void initialize(Attributor &A) override { 9387 AANoUndefImpl::initialize(A); 9388 if (!getState().isAtFixpoint()) 9389 if (Instruction *CtxI = getCtxI()) 9390 followUsesInMBEC(*this, A, getState(), *CtxI); 9391 } 9392 9393 /// See AbstractAttribute::updateImpl(...). 9394 ChangeStatus updateImpl(Attributor &A) override { 9395 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 9396 AANoUndef::StateType &T, bool Stripped) -> bool { 9397 const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V), 9398 DepClassTy::REQUIRED); 9399 if (!Stripped && this == &AA) { 9400 T.indicatePessimisticFixpoint(); 9401 } else { 9402 const AANoUndef::StateType &S = 9403 static_cast<const AANoUndef::StateType &>(AA.getState()); 9404 T ^= S; 9405 } 9406 return T.isValidState(); 9407 }; 9408 9409 StateType T; 9410 bool UsedAssumedInformation = false; 9411 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 9412 VisitValueCB, getCtxI(), 9413 UsedAssumedInformation)) 9414 return indicatePessimisticFixpoint(); 9415 9416 return clampStateAndIndicateChange(getState(), T); 9417 } 9418 9419 /// See AbstractAttribute::trackStatistics() 9420 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9421 }; 9422 9423 struct AANoUndefReturned final 9424 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> { 9425 AANoUndefReturned(const IRPosition &IRP, Attributor &A) 9426 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {} 9427 9428 /// See AbstractAttribute::trackStatistics() 9429 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9430 }; 9431 9432 struct AANoUndefArgument final 9433 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> { 9434 AANoUndefArgument(const IRPosition &IRP, Attributor &A) 9435 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {} 9436 9437 /// See AbstractAttribute::trackStatistics() 9438 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) } 9439 }; 9440 9441 struct AANoUndefCallSiteArgument final : AANoUndefFloating { 9442 AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A) 9443 : AANoUndefFloating(IRP, A) {} 9444 9445 /// See AbstractAttribute::trackStatistics() 9446 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) } 9447 }; 9448 9449 struct AANoUndefCallSiteReturned final 9450 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> { 9451 AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A) 9452 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {} 9453 9454 /// See AbstractAttribute::trackStatistics() 9455 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) } 9456 }; 9457 9458 struct AACallEdgesImpl : public AACallEdges { 9459 AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {} 9460 9461 virtual const SetVector<Function *> &getOptimisticEdges() const override { 9462 return CalledFunctions; 9463 } 9464 9465 virtual bool hasUnknownCallee() const override { return HasUnknownCallee; } 9466 9467 virtual bool hasNonAsmUnknownCallee() const override { 9468 return HasUnknownCalleeNonAsm; 9469 } 9470 9471 const std::string getAsStr() const override { 9472 return "CallEdges[" + std::to_string(HasUnknownCallee) + "," + 9473 std::to_string(CalledFunctions.size()) + "]"; 9474 } 9475 9476 void trackStatistics() const override {} 9477 9478 protected: 9479 void addCalledFunction(Function *Fn, ChangeStatus &Change) { 9480 if (CalledFunctions.insert(Fn)) { 9481 Change = ChangeStatus::CHANGED; 9482 LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName() 9483 << "\n"); 9484 } 9485 } 9486 9487 void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) { 9488 if (!HasUnknownCallee) 9489 Change = ChangeStatus::CHANGED; 9490 if (NonAsm && !HasUnknownCalleeNonAsm) 9491 Change = ChangeStatus::CHANGED; 9492 HasUnknownCalleeNonAsm |= NonAsm; 9493 HasUnknownCallee = true; 9494 } 9495 9496 private: 9497 /// Optimistic set of functions that might be called by this position. 9498 SetVector<Function *> CalledFunctions; 9499 9500 /// Is there any call with a unknown callee. 9501 bool HasUnknownCallee = false; 9502 9503 /// Is there any call with a unknown callee, excluding any inline asm. 9504 bool HasUnknownCalleeNonAsm = false; 9505 }; 9506 9507 struct AACallEdgesCallSite : public AACallEdgesImpl { 9508 AACallEdgesCallSite(const IRPosition &IRP, Attributor &A) 9509 : AACallEdgesImpl(IRP, A) {} 9510 /// See AbstractAttribute::updateImpl(...). 9511 ChangeStatus updateImpl(Attributor &A) override { 9512 ChangeStatus Change = ChangeStatus::UNCHANGED; 9513 9514 auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown, 9515 bool Stripped) -> bool { 9516 if (Function *Fn = dyn_cast<Function>(&V)) { 9517 addCalledFunction(Fn, Change); 9518 } else { 9519 LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n"); 9520 setHasUnknownCallee(true, Change); 9521 } 9522 9523 // Explore all values. 9524 return true; 9525 }; 9526 9527 // Process any value that we might call. 9528 auto ProcessCalledOperand = [&](Value *V) { 9529 bool DummyValue = false; 9530 bool UsedAssumedInformation = false; 9531 if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this, 9532 DummyValue, VisitValue, nullptr, 9533 UsedAssumedInformation, false)) { 9534 // If we haven't gone through all values, assume that there are unknown 9535 // callees. 9536 setHasUnknownCallee(true, Change); 9537 } 9538 }; 9539 9540 CallBase *CB = cast<CallBase>(getCtxI()); 9541 9542 if (CB->isInlineAsm()) { 9543 setHasUnknownCallee(false, Change); 9544 return Change; 9545 } 9546 9547 // Process callee metadata if available. 9548 if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) { 9549 for (auto &Op : MD->operands()) { 9550 Function *Callee = mdconst::dyn_extract_or_null<Function>(Op); 9551 if (Callee) 9552 addCalledFunction(Callee, Change); 9553 } 9554 return Change; 9555 } 9556 9557 // The most simple case. 9558 ProcessCalledOperand(CB->getCalledOperand()); 9559 9560 // Process callback functions. 9561 SmallVector<const Use *, 4u> CallbackUses; 9562 AbstractCallSite::getCallbackUses(*CB, CallbackUses); 9563 for (const Use *U : CallbackUses) 9564 ProcessCalledOperand(U->get()); 9565 9566 return Change; 9567 } 9568 }; 9569 9570 struct AACallEdgesFunction : public AACallEdgesImpl { 9571 AACallEdgesFunction(const IRPosition &IRP, Attributor &A) 9572 : AACallEdgesImpl(IRP, A) {} 9573 9574 /// See AbstractAttribute::updateImpl(...). 9575 ChangeStatus updateImpl(Attributor &A) override { 9576 ChangeStatus Change = ChangeStatus::UNCHANGED; 9577 9578 auto ProcessCallInst = [&](Instruction &Inst) { 9579 CallBase &CB = cast<CallBase>(Inst); 9580 9581 auto &CBEdges = A.getAAFor<AACallEdges>( 9582 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9583 if (CBEdges.hasNonAsmUnknownCallee()) 9584 setHasUnknownCallee(true, Change); 9585 if (CBEdges.hasUnknownCallee()) 9586 setHasUnknownCallee(false, Change); 9587 9588 for (Function *F : CBEdges.getOptimisticEdges()) 9589 addCalledFunction(F, Change); 9590 9591 return true; 9592 }; 9593 9594 // Visit all callable instructions. 9595 bool UsedAssumedInformation = false; 9596 if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this, 9597 UsedAssumedInformation, 9598 /* CheckBBLivenessOnly */ true)) { 9599 // If we haven't looked at all call like instructions, assume that there 9600 // are unknown callees. 9601 setHasUnknownCallee(true, Change); 9602 } 9603 9604 return Change; 9605 } 9606 }; 9607 9608 struct AAFunctionReachabilityFunction : public AAFunctionReachability { 9609 private: 9610 struct QuerySet { 9611 void markReachable(const Function &Fn) { 9612 Reachable.insert(&Fn); 9613 Unreachable.erase(&Fn); 9614 } 9615 9616 /// If there is no information about the function None is returned. 9617 Optional<bool> isCachedReachable(const Function &Fn) { 9618 // Assume that we can reach the function. 9619 // TODO: Be more specific with the unknown callee. 9620 if (CanReachUnknownCallee) 9621 return true; 9622 9623 if (Reachable.count(&Fn)) 9624 return true; 9625 9626 if (Unreachable.count(&Fn)) 9627 return false; 9628 9629 return llvm::None; 9630 } 9631 9632 /// Set of functions that we know for sure is reachable. 9633 DenseSet<const Function *> Reachable; 9634 9635 /// Set of functions that are unreachable, but might become reachable. 9636 DenseSet<const Function *> Unreachable; 9637 9638 /// If we can reach a function with a call to a unknown function we assume 9639 /// that we can reach any function. 9640 bool CanReachUnknownCallee = false; 9641 }; 9642 9643 struct QueryResolver : public QuerySet { 9644 ChangeStatus update(Attributor &A, const AAFunctionReachability &AA, 9645 ArrayRef<const AACallEdges *> AAEdgesList) { 9646 ChangeStatus Change = ChangeStatus::UNCHANGED; 9647 9648 for (auto *AAEdges : AAEdgesList) { 9649 if (AAEdges->hasUnknownCallee()) { 9650 if (!CanReachUnknownCallee) 9651 Change = ChangeStatus::CHANGED; 9652 CanReachUnknownCallee = true; 9653 return Change; 9654 } 9655 } 9656 9657 for (const Function *Fn : make_early_inc_range(Unreachable)) { 9658 if (checkIfReachable(A, AA, AAEdgesList, *Fn)) { 9659 Change = ChangeStatus::CHANGED; 9660 markReachable(*Fn); 9661 } 9662 } 9663 return Change; 9664 } 9665 9666 bool isReachable(Attributor &A, AAFunctionReachability &AA, 9667 ArrayRef<const AACallEdges *> AAEdgesList, 9668 const Function &Fn) { 9669 Optional<bool> Cached = isCachedReachable(Fn); 9670 if (Cached.hasValue()) 9671 return Cached.getValue(); 9672 9673 // The query was not cached, thus it is new. We need to request an update 9674 // explicitly to make sure this the information is properly run to a 9675 // fixpoint. 9676 A.registerForUpdate(AA); 9677 9678 // We need to assume that this function can't reach Fn to prevent 9679 // an infinite loop if this function is recursive. 9680 Unreachable.insert(&Fn); 9681 9682 bool Result = checkIfReachable(A, AA, AAEdgesList, Fn); 9683 if (Result) 9684 markReachable(Fn); 9685 return Result; 9686 } 9687 9688 bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA, 9689 ArrayRef<const AACallEdges *> AAEdgesList, 9690 const Function &Fn) const { 9691 9692 // Handle the most trivial case first. 9693 for (auto *AAEdges : AAEdgesList) { 9694 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9695 9696 if (Edges.count(const_cast<Function *>(&Fn))) 9697 return true; 9698 } 9699 9700 SmallVector<const AAFunctionReachability *, 8> Deps; 9701 for (auto &AAEdges : AAEdgesList) { 9702 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9703 9704 for (Function *Edge : Edges) { 9705 // Functions that do not call back into the module can be ignored. 9706 if (Edge->hasFnAttribute(Attribute::NoCallback)) 9707 continue; 9708 9709 // We don't need a dependency if the result is reachable. 9710 const AAFunctionReachability &EdgeReachability = 9711 A.getAAFor<AAFunctionReachability>( 9712 AA, IRPosition::function(*Edge), DepClassTy::NONE); 9713 Deps.push_back(&EdgeReachability); 9714 9715 if (EdgeReachability.canReach(A, Fn)) 9716 return true; 9717 } 9718 } 9719 9720 // The result is false for now, set dependencies and leave. 9721 for (auto *Dep : Deps) 9722 A.recordDependence(*Dep, AA, DepClassTy::REQUIRED); 9723 9724 return false; 9725 } 9726 }; 9727 9728 /// Get call edges that can be reached by this instruction. 9729 bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability, 9730 const Instruction &Inst, 9731 SmallVector<const AACallEdges *> &Result) const { 9732 // Determine call like instructions that we can reach from the inst. 9733 auto CheckCallBase = [&](Instruction &CBInst) { 9734 if (!Reachability.isAssumedReachable(A, Inst, CBInst)) 9735 return true; 9736 9737 auto &CB = cast<CallBase>(CBInst); 9738 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9739 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9740 9741 Result.push_back(&AAEdges); 9742 return true; 9743 }; 9744 9745 bool UsedAssumedInformation = false; 9746 return A.checkForAllCallLikeInstructions(CheckCallBase, *this, 9747 UsedAssumedInformation, 9748 /* CheckBBLivenessOnly */ true); 9749 } 9750 9751 public: 9752 AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A) 9753 : AAFunctionReachability(IRP, A) {} 9754 9755 bool canReach(Attributor &A, const Function &Fn) const override { 9756 if (!isValidState()) 9757 return true; 9758 9759 const AACallEdges &AAEdges = 9760 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9761 9762 // Attributor returns attributes as const, so this function has to be 9763 // const for users of this attribute to use it without having to do 9764 // a const_cast. 9765 // This is a hack for us to be able to cache queries. 9766 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9767 bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis, 9768 {&AAEdges}, Fn); 9769 9770 return Result; 9771 } 9772 9773 /// Can \p CB reach \p Fn 9774 bool canReach(Attributor &A, CallBase &CB, 9775 const Function &Fn) const override { 9776 if (!isValidState()) 9777 return true; 9778 9779 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9780 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9781 9782 // Attributor returns attributes as const, so this function has to be 9783 // const for users of this attribute to use it without having to do 9784 // a const_cast. 9785 // This is a hack for us to be able to cache queries. 9786 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9787 QueryResolver &CBQuery = NonConstThis->CBQueries[&CB]; 9788 9789 bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn); 9790 9791 return Result; 9792 } 9793 9794 bool instructionCanReach(Attributor &A, const Instruction &Inst, 9795 const Function &Fn, 9796 bool UseBackwards) const override { 9797 if (!isValidState()) 9798 return true; 9799 9800 if (UseBackwards) 9801 return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr); 9802 9803 const auto &Reachability = A.getAAFor<AAReachability>( 9804 *this, IRPosition::function(*getAssociatedFunction()), 9805 DepClassTy::REQUIRED); 9806 9807 SmallVector<const AACallEdges *> CallEdges; 9808 bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges); 9809 // Attributor returns attributes as const, so this function has to be 9810 // const for users of this attribute to use it without having to do 9811 // a const_cast. 9812 // This is a hack for us to be able to cache queries. 9813 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9814 QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst]; 9815 if (!AllKnown) 9816 InstQSet.CanReachUnknownCallee = true; 9817 9818 return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn); 9819 } 9820 9821 /// See AbstractAttribute::updateImpl(...). 9822 ChangeStatus updateImpl(Attributor &A) override { 9823 const AACallEdges &AAEdges = 9824 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9825 ChangeStatus Change = ChangeStatus::UNCHANGED; 9826 9827 Change |= WholeFunction.update(A, *this, {&AAEdges}); 9828 9829 for (auto &CBPair : CBQueries) { 9830 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9831 *this, IRPosition::callsite_function(*CBPair.first), 9832 DepClassTy::REQUIRED); 9833 9834 Change |= CBPair.second.update(A, *this, {&AAEdges}); 9835 } 9836 9837 // Update the Instruction queries. 9838 if (!InstQueries.empty()) { 9839 const AAReachability *Reachability = &A.getAAFor<AAReachability>( 9840 *this, IRPosition::function(*getAssociatedFunction()), 9841 DepClassTy::REQUIRED); 9842 9843 // Check for local callbases first. 9844 for (auto &InstPair : InstQueries) { 9845 SmallVector<const AACallEdges *> CallEdges; 9846 bool AllKnown = 9847 getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges); 9848 // Update will return change if we this effects any queries. 9849 if (!AllKnown) 9850 InstPair.second.CanReachUnknownCallee = true; 9851 Change |= InstPair.second.update(A, *this, CallEdges); 9852 } 9853 } 9854 9855 return Change; 9856 } 9857 9858 const std::string getAsStr() const override { 9859 size_t QueryCount = 9860 WholeFunction.Reachable.size() + WholeFunction.Unreachable.size(); 9861 9862 return "FunctionReachability [" + 9863 std::to_string(WholeFunction.Reachable.size()) + "," + 9864 std::to_string(QueryCount) + "]"; 9865 } 9866 9867 void trackStatistics() const override {} 9868 9869 private: 9870 bool canReachUnknownCallee() const override { 9871 return WholeFunction.CanReachUnknownCallee; 9872 } 9873 9874 /// Used to answer if a the whole function can reacha a specific function. 9875 QueryResolver WholeFunction; 9876 9877 /// Used to answer if a call base inside this function can reach a specific 9878 /// function. 9879 MapVector<const CallBase *, QueryResolver> CBQueries; 9880 9881 /// This is for instruction queries than scan "forward". 9882 MapVector<const Instruction *, QueryResolver> InstQueries; 9883 }; 9884 } // namespace 9885 9886 /// ---------------------- Assumption Propagation ------------------------------ 9887 namespace { 9888 struct AAAssumptionInfoImpl : public AAAssumptionInfo { 9889 AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A, 9890 const DenseSet<StringRef> &Known) 9891 : AAAssumptionInfo(IRP, A, Known) {} 9892 9893 bool hasAssumption(const StringRef Assumption) const override { 9894 return isValidState() && setContains(Assumption); 9895 } 9896 9897 /// See AbstractAttribute::getAsStr() 9898 const std::string getAsStr() const override { 9899 const SetContents &Known = getKnown(); 9900 const SetContents &Assumed = getAssumed(); 9901 9902 const std::string KnownStr = 9903 llvm::join(Known.getSet().begin(), Known.getSet().end(), ","); 9904 const std::string AssumedStr = 9905 (Assumed.isUniversal()) 9906 ? "Universal" 9907 : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ","); 9908 9909 return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]"; 9910 } 9911 }; 9912 9913 /// Propagates assumption information from parent functions to all of their 9914 /// successors. An assumption can be propagated if the containing function 9915 /// dominates the called function. 9916 /// 9917 /// We start with a "known" set of assumptions already valid for the associated 9918 /// function and an "assumed" set that initially contains all possible 9919 /// assumptions. The assumed set is inter-procedurally updated by narrowing its 9920 /// contents as concrete values are known. The concrete values are seeded by the 9921 /// first nodes that are either entries into the call graph, or contains no 9922 /// assumptions. Each node is updated as the intersection of the assumed state 9923 /// with all of its predecessors. 9924 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl { 9925 AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A) 9926 : AAAssumptionInfoImpl(IRP, A, 9927 getAssumptions(*IRP.getAssociatedFunction())) {} 9928 9929 /// See AbstractAttribute::manifest(...). 9930 ChangeStatus manifest(Attributor &A) override { 9931 const auto &Assumptions = getKnown(); 9932 9933 // Don't manifest a universal set if it somehow made it here. 9934 if (Assumptions.isUniversal()) 9935 return ChangeStatus::UNCHANGED; 9936 9937 Function *AssociatedFunction = getAssociatedFunction(); 9938 9939 bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet()); 9940 9941 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9942 } 9943 9944 /// See AbstractAttribute::updateImpl(...). 9945 ChangeStatus updateImpl(Attributor &A) override { 9946 bool Changed = false; 9947 9948 auto CallSitePred = [&](AbstractCallSite ACS) { 9949 const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>( 9950 *this, IRPosition::callsite_function(*ACS.getInstruction()), 9951 DepClassTy::REQUIRED); 9952 // Get the set of assumptions shared by all of this function's callers. 9953 Changed |= getIntersection(AssumptionAA.getAssumed()); 9954 return !getAssumed().empty() || !getKnown().empty(); 9955 }; 9956 9957 bool UsedAssumedInformation = false; 9958 // Get the intersection of all assumptions held by this node's predecessors. 9959 // If we don't know all the call sites then this is either an entry into the 9960 // call graph or an empty node. This node is known to only contain its own 9961 // assumptions and can be propagated to its successors. 9962 if (!A.checkForAllCallSites(CallSitePred, *this, true, 9963 UsedAssumedInformation)) 9964 return indicatePessimisticFixpoint(); 9965 9966 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9967 } 9968 9969 void trackStatistics() const override {} 9970 }; 9971 9972 /// Assumption Info defined for call sites. 9973 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl { 9974 9975 AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A) 9976 : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {} 9977 9978 /// See AbstractAttribute::initialize(...). 9979 void initialize(Attributor &A) override { 9980 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 9981 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 9982 } 9983 9984 /// See AbstractAttribute::manifest(...). 9985 ChangeStatus manifest(Attributor &A) override { 9986 // Don't manifest a universal set if it somehow made it here. 9987 if (getKnown().isUniversal()) 9988 return ChangeStatus::UNCHANGED; 9989 9990 CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue()); 9991 bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet()); 9992 9993 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9994 } 9995 9996 /// See AbstractAttribute::updateImpl(...). 9997 ChangeStatus updateImpl(Attributor &A) override { 9998 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 9999 auto &AssumptionAA = 10000 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 10001 bool Changed = getIntersection(AssumptionAA.getAssumed()); 10002 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 10003 } 10004 10005 /// See AbstractAttribute::trackStatistics() 10006 void trackStatistics() const override {} 10007 10008 private: 10009 /// Helper to initialized the known set as all the assumptions this call and 10010 /// the callee contain. 10011 DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) { 10012 const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue()); 10013 auto Assumptions = getAssumptions(CB); 10014 if (Function *F = IRP.getAssociatedFunction()) 10015 set_union(Assumptions, getAssumptions(*F)); 10016 if (Function *F = IRP.getAssociatedFunction()) 10017 set_union(Assumptions, getAssumptions(*F)); 10018 return Assumptions; 10019 } 10020 }; 10021 } // namespace 10022 10023 AACallGraphNode *AACallEdgeIterator::operator*() const { 10024 return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>( 10025 &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I)))); 10026 } 10027 10028 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); } 10029 10030 const char AAReturnedValues::ID = 0; 10031 const char AANoUnwind::ID = 0; 10032 const char AANoSync::ID = 0; 10033 const char AANoFree::ID = 0; 10034 const char AANonNull::ID = 0; 10035 const char AANoRecurse::ID = 0; 10036 const char AAWillReturn::ID = 0; 10037 const char AAUndefinedBehavior::ID = 0; 10038 const char AANoAlias::ID = 0; 10039 const char AAReachability::ID = 0; 10040 const char AANoReturn::ID = 0; 10041 const char AAIsDead::ID = 0; 10042 const char AADereferenceable::ID = 0; 10043 const char AAAlign::ID = 0; 10044 const char AANoCapture::ID = 0; 10045 const char AAValueSimplify::ID = 0; 10046 const char AAHeapToStack::ID = 0; 10047 const char AAPrivatizablePtr::ID = 0; 10048 const char AAMemoryBehavior::ID = 0; 10049 const char AAMemoryLocation::ID = 0; 10050 const char AAValueConstantRange::ID = 0; 10051 const char AAPotentialValues::ID = 0; 10052 const char AANoUndef::ID = 0; 10053 const char AACallEdges::ID = 0; 10054 const char AAFunctionReachability::ID = 0; 10055 const char AAPointerInfo::ID = 0; 10056 const char AAAssumptionInfo::ID = 0; 10057 10058 // Macro magic to create the static generator function for attributes that 10059 // follow the naming scheme. 10060 10061 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ 10062 case IRPosition::PK: \ 10063 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); 10064 10065 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ 10066 case IRPosition::PK: \ 10067 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ 10068 ++NumAAs; \ 10069 break; 10070 10071 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10072 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10073 CLASS *AA = nullptr; \ 10074 switch (IRP.getPositionKind()) { \ 10075 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10076 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 10077 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 10078 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10079 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 10080 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 10081 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10082 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10083 } \ 10084 return *AA; \ 10085 } 10086 10087 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10088 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10089 CLASS *AA = nullptr; \ 10090 switch (IRP.getPositionKind()) { \ 10091 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10092 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ 10093 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 10094 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10095 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10096 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 10097 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10098 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10099 } \ 10100 return *AA; \ 10101 } 10102 10103 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10104 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10105 CLASS *AA = nullptr; \ 10106 switch (IRP.getPositionKind()) { \ 10107 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10108 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10109 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10110 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10111 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10112 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 10113 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10114 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10115 } \ 10116 return *AA; \ 10117 } 10118 10119 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10120 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10121 CLASS *AA = nullptr; \ 10122 switch (IRP.getPositionKind()) { \ 10123 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10124 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 10125 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 10126 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10127 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 10128 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 10129 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 10130 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10131 } \ 10132 return *AA; \ 10133 } 10134 10135 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10136 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10137 CLASS *AA = nullptr; \ 10138 switch (IRP.getPositionKind()) { \ 10139 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10140 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10141 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10142 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10143 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10144 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10145 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10146 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10147 } \ 10148 return *AA; \ 10149 } 10150 10151 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) 10152 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) 10153 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) 10154 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) 10155 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) 10156 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) 10157 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) 10158 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges) 10159 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo) 10160 10161 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) 10162 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) 10163 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) 10164 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) 10165 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) 10166 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) 10167 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) 10168 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues) 10169 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef) 10170 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo) 10171 10172 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) 10173 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) 10174 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) 10175 10176 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) 10177 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability) 10178 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) 10179 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability) 10180 10181 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) 10182 10183 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION 10184 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION 10185 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION 10186 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION 10187 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION 10188 #undef SWITCH_PK_CREATE 10189 #undef SWITCH_PK_INV 10190