1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // See the Attributor.h file comment and the class descriptions in that file for 10 // more information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/ADT/SetVector.h" 15 #include "llvm/Transforms/IPO/Attributor.h" 16 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/MapVector.h" 19 #include "llvm/ADT/SCCIterator.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetOperations.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/Analysis/AssumeBundleQueries.h" 26 #include "llvm/Analysis/AssumptionCache.h" 27 #include "llvm/Analysis/CaptureTracking.h" 28 #include "llvm/Analysis/InstructionSimplify.h" 29 #include "llvm/Analysis/LazyValueInfo.h" 30 #include "llvm/Analysis/MemoryBuiltins.h" 31 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 32 #include "llvm/Analysis/ScalarEvolution.h" 33 #include "llvm/Analysis/TargetTransformInfo.h" 34 #include "llvm/Analysis/ValueTracking.h" 35 #include "llvm/IR/Argument.h" 36 #include "llvm/IR/Assumptions.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DataLayout.h" 39 #include "llvm/IR/IRBuilder.h" 40 #include "llvm/IR/Instruction.h" 41 #include "llvm/IR/Instructions.h" 42 #include "llvm/IR/IntrinsicInst.h" 43 #include "llvm/IR/NoFolder.h" 44 #include "llvm/IR/Value.h" 45 #include "llvm/Support/Alignment.h" 46 #include "llvm/Support/Casting.h" 47 #include "llvm/Support/CommandLine.h" 48 #include "llvm/Support/ErrorHandling.h" 49 #include "llvm/Support/GraphWriter.h" 50 #include "llvm/Support/MathExtras.h" 51 #include "llvm/Support/raw_ostream.h" 52 #include "llvm/Transforms/IPO/ArgumentPromotion.h" 53 #include "llvm/Transforms/Utils/Cloning.h" 54 #include "llvm/Transforms/Utils/Local.h" 55 #include <cassert> 56 57 using namespace llvm; 58 59 #define DEBUG_TYPE "attributor" 60 61 static cl::opt<bool> ManifestInternal( 62 "attributor-manifest-internal", cl::Hidden, 63 cl::desc("Manifest Attributor internal string attributes."), 64 cl::init(false)); 65 66 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), 67 cl::Hidden); 68 69 template <> 70 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0; 71 72 static cl::opt<unsigned, true> MaxPotentialValues( 73 "attributor-max-potential-values", cl::Hidden, 74 cl::desc("Maximum number of potential values to be " 75 "tracked for each position."), 76 cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues), 77 cl::init(7)); 78 79 static cl::opt<unsigned> MaxInterferingAccesses( 80 "attributor-max-interfering-accesses", cl::Hidden, 81 cl::desc("Maximum number of interfering accesses to " 82 "check before assuming all might interfere."), 83 cl::init(6)); 84 85 STATISTIC(NumAAs, "Number of abstract attributes created"); 86 87 // Some helper macros to deal with statistics tracking. 88 // 89 // Usage: 90 // For simple IR attribute tracking overload trackStatistics in the abstract 91 // attribute and choose the right STATS_DECLTRACK_********* macro, 92 // e.g.,: 93 // void trackStatistics() const override { 94 // STATS_DECLTRACK_ARG_ATTR(returned) 95 // } 96 // If there is a single "increment" side one can use the macro 97 // STATS_DECLTRACK with a custom message. If there are multiple increment 98 // sides, STATS_DECL and STATS_TRACK can also be used separately. 99 // 100 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ 101 ("Number of " #TYPE " marked '" #NAME "'") 102 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME 103 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); 104 #define STATS_DECL(NAME, TYPE, MSG) \ 105 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); 106 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); 107 #define STATS_DECLTRACK(NAME, TYPE, MSG) \ 108 { \ 109 STATS_DECL(NAME, TYPE, MSG) \ 110 STATS_TRACK(NAME, TYPE) \ 111 } 112 #define STATS_DECLTRACK_ARG_ATTR(NAME) \ 113 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) 114 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ 115 STATS_DECLTRACK(NAME, CSArguments, \ 116 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) 117 #define STATS_DECLTRACK_FN_ATTR(NAME) \ 118 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) 119 #define STATS_DECLTRACK_CS_ATTR(NAME) \ 120 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) 121 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ 122 STATS_DECLTRACK(NAME, FunctionReturn, \ 123 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) 124 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ 125 STATS_DECLTRACK(NAME, CSReturn, \ 126 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) 127 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ 128 STATS_DECLTRACK(NAME, Floating, \ 129 ("Number of floating values known to be '" #NAME "'")) 130 131 // Specialization of the operator<< for abstract attributes subclasses. This 132 // disambiguates situations where multiple operators are applicable. 133 namespace llvm { 134 #define PIPE_OPERATOR(CLASS) \ 135 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ 136 return OS << static_cast<const AbstractAttribute &>(AA); \ 137 } 138 139 PIPE_OPERATOR(AAIsDead) 140 PIPE_OPERATOR(AANoUnwind) 141 PIPE_OPERATOR(AANoSync) 142 PIPE_OPERATOR(AANoRecurse) 143 PIPE_OPERATOR(AAWillReturn) 144 PIPE_OPERATOR(AANoReturn) 145 PIPE_OPERATOR(AAReturnedValues) 146 PIPE_OPERATOR(AANonNull) 147 PIPE_OPERATOR(AANoAlias) 148 PIPE_OPERATOR(AADereferenceable) 149 PIPE_OPERATOR(AAAlign) 150 PIPE_OPERATOR(AANoCapture) 151 PIPE_OPERATOR(AAValueSimplify) 152 PIPE_OPERATOR(AANoFree) 153 PIPE_OPERATOR(AAHeapToStack) 154 PIPE_OPERATOR(AAReachability) 155 PIPE_OPERATOR(AAMemoryBehavior) 156 PIPE_OPERATOR(AAMemoryLocation) 157 PIPE_OPERATOR(AAValueConstantRange) 158 PIPE_OPERATOR(AAPrivatizablePtr) 159 PIPE_OPERATOR(AAUndefinedBehavior) 160 PIPE_OPERATOR(AAPotentialValues) 161 PIPE_OPERATOR(AANoUndef) 162 PIPE_OPERATOR(AACallEdges) 163 PIPE_OPERATOR(AAFunctionReachability) 164 PIPE_OPERATOR(AAPointerInfo) 165 PIPE_OPERATOR(AAAssumptionInfo) 166 167 #undef PIPE_OPERATOR 168 169 template <> 170 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, 171 const DerefState &R) { 172 ChangeStatus CS0 = 173 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); 174 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); 175 return CS0 | CS1; 176 } 177 178 } // namespace llvm 179 180 /// Get pointer operand of memory accessing instruction. If \p I is 181 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, 182 /// is set to false and the instruction is volatile, return nullptr. 183 static const Value *getPointerOperand(const Instruction *I, 184 bool AllowVolatile) { 185 if (!AllowVolatile && I->isVolatile()) 186 return nullptr; 187 188 if (auto *LI = dyn_cast<LoadInst>(I)) { 189 return LI->getPointerOperand(); 190 } 191 192 if (auto *SI = dyn_cast<StoreInst>(I)) { 193 return SI->getPointerOperand(); 194 } 195 196 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { 197 return CXI->getPointerOperand(); 198 } 199 200 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { 201 return RMWI->getPointerOperand(); 202 } 203 204 return nullptr; 205 } 206 207 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and 208 /// advanced by \p Offset bytes. To aid later analysis the method tries to build 209 /// getelement pointer instructions that traverse the natural type of \p Ptr if 210 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence 211 /// through a cast to i8*. 212 /// 213 /// TODO: This could probably live somewhere more prominantly if it doesn't 214 /// already exist. 215 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr, 216 int64_t Offset, IRBuilder<NoFolder> &IRB, 217 const DataLayout &DL) { 218 assert(Offset >= 0 && "Negative offset not supported yet!"); 219 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset 220 << "-bytes as " << *ResTy << "\n"); 221 222 if (Offset) { 223 Type *Ty = PtrElemTy; 224 APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset); 225 SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset); 226 227 SmallVector<Value *, 4> ValIndices; 228 std::string GEPName = Ptr->getName().str(); 229 for (const APInt &Index : IntIndices) { 230 ValIndices.push_back(IRB.getInt(Index)); 231 GEPName += "." + std::to_string(Index.getZExtValue()); 232 } 233 234 // Create a GEP for the indices collected above. 235 Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName); 236 237 // If an offset is left we use byte-wise adjustment. 238 if (IntOffset != 0) { 239 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); 240 Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset), 241 GEPName + ".b" + Twine(IntOffset.getZExtValue())); 242 } 243 } 244 245 // Ensure the result has the requested type. 246 Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy, 247 Ptr->getName() + ".cast"); 248 249 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n"); 250 return Ptr; 251 } 252 253 /// Recursively visit all values that might become \p IRP at some point. This 254 /// will be done by looking through cast instructions, selects, phis, and calls 255 /// with the "returned" attribute. Once we cannot look through the value any 256 /// further, the callback \p VisitValueCB is invoked and passed the current 257 /// value, the \p State, and a flag to indicate if we stripped anything. 258 /// Stripped means that we unpacked the value associated with \p IRP at least 259 /// once. Note that the value used for the callback may still be the value 260 /// associated with \p IRP (due to PHIs). To limit how much effort is invested, 261 /// we will never visit more values than specified by \p MaxValues. 262 /// If \p Intraprocedural is set to true only values valid in the scope of 263 /// \p CtxI will be visited and simplification into other scopes is prevented. 264 template <typename StateTy> 265 static bool genericValueTraversal( 266 Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA, 267 StateTy &State, 268 function_ref<bool(Value &, const Instruction *, StateTy &, bool)> 269 VisitValueCB, 270 const Instruction *CtxI, bool &UsedAssumedInformation, 271 bool UseValueSimplify = true, int MaxValues = 16, 272 function_ref<Value *(Value *)> StripCB = nullptr, 273 bool Intraprocedural = false) { 274 275 struct LivenessInfo { 276 const AAIsDead *LivenessAA = nullptr; 277 bool AnyDead = false; 278 }; 279 SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs; 280 auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & { 281 LivenessInfo &LI = LivenessAAs[&F]; 282 if (!LI.LivenessAA) 283 LI.LivenessAA = &A.getAAFor<AAIsDead>(QueryingAA, IRPosition::function(F), 284 DepClassTy::NONE); 285 return LI; 286 }; 287 288 Value *InitialV = &IRP.getAssociatedValue(); 289 using Item = std::pair<Value *, const Instruction *>; 290 SmallSet<Item, 16> Visited; 291 SmallVector<Item, 16> Worklist; 292 Worklist.push_back({InitialV, CtxI}); 293 294 int Iteration = 0; 295 do { 296 Item I = Worklist.pop_back_val(); 297 Value *V = I.first; 298 CtxI = I.second; 299 if (StripCB) 300 V = StripCB(V); 301 302 // Check if we should process the current value. To prevent endless 303 // recursion keep a record of the values we followed! 304 if (!Visited.insert(I).second) 305 continue; 306 307 // Make sure we limit the compile time for complex expressions. 308 if (Iteration++ >= MaxValues) { 309 LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: " 310 << Iteration << "!\n"); 311 return false; 312 } 313 314 // Explicitly look through calls with a "returned" attribute if we do 315 // not have a pointer as stripPointerCasts only works on them. 316 Value *NewV = nullptr; 317 if (V->getType()->isPointerTy()) { 318 NewV = V->stripPointerCasts(); 319 } else { 320 auto *CB = dyn_cast<CallBase>(V); 321 if (CB && CB->getCalledFunction()) { 322 for (Argument &Arg : CB->getCalledFunction()->args()) 323 if (Arg.hasReturnedAttr()) { 324 NewV = CB->getArgOperand(Arg.getArgNo()); 325 break; 326 } 327 } 328 } 329 if (NewV && NewV != V) { 330 Worklist.push_back({NewV, CtxI}); 331 continue; 332 } 333 334 // Look through select instructions, visit assumed potential values. 335 if (auto *SI = dyn_cast<SelectInst>(V)) { 336 Optional<Constant *> C = A.getAssumedConstant( 337 *SI->getCondition(), QueryingAA, UsedAssumedInformation); 338 bool NoValueYet = !C.hasValue(); 339 if (NoValueYet || isa_and_nonnull<UndefValue>(*C)) 340 continue; 341 if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) { 342 if (CI->isZero()) 343 Worklist.push_back({SI->getFalseValue(), CtxI}); 344 else 345 Worklist.push_back({SI->getTrueValue(), CtxI}); 346 continue; 347 } 348 // We could not simplify the condition, assume both values.( 349 Worklist.push_back({SI->getTrueValue(), CtxI}); 350 Worklist.push_back({SI->getFalseValue(), CtxI}); 351 continue; 352 } 353 354 // Look through phi nodes, visit all live operands. 355 if (auto *PHI = dyn_cast<PHINode>(V)) { 356 LivenessInfo &LI = GetLivenessInfo(*PHI->getFunction()); 357 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 358 BasicBlock *IncomingBB = PHI->getIncomingBlock(u); 359 if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI->getParent())) { 360 LI.AnyDead = true; 361 UsedAssumedInformation |= !LI.LivenessAA->isAtFixpoint(); 362 continue; 363 } 364 Worklist.push_back( 365 {PHI->getIncomingValue(u), IncomingBB->getTerminator()}); 366 } 367 continue; 368 } 369 370 if (auto *Arg = dyn_cast<Argument>(V)) { 371 if (!Intraprocedural && !Arg->hasPassPointeeByValueCopyAttr()) { 372 SmallVector<Item> CallSiteValues; 373 bool UsedAssumedInformation = false; 374 if (A.checkForAllCallSites( 375 [&](AbstractCallSite ACS) { 376 // Callbacks might not have a corresponding call site operand, 377 // stick with the argument in that case. 378 Value *CSOp = ACS.getCallArgOperand(*Arg); 379 if (!CSOp) 380 return false; 381 CallSiteValues.push_back({CSOp, ACS.getInstruction()}); 382 return true; 383 }, 384 *Arg->getParent(), true, &QueryingAA, UsedAssumedInformation)) { 385 Worklist.append(CallSiteValues); 386 continue; 387 } 388 } 389 } 390 391 if (UseValueSimplify && !isa<Constant>(V)) { 392 Optional<Value *> SimpleV = 393 A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation); 394 if (!SimpleV.hasValue()) 395 continue; 396 Value *NewV = SimpleV.getValue(); 397 if (NewV && NewV != V) { 398 if (!Intraprocedural || !CtxI || 399 AA::isValidInScope(*NewV, CtxI->getFunction())) { 400 Worklist.push_back({NewV, CtxI}); 401 continue; 402 } 403 } 404 } 405 406 if (auto *LI = dyn_cast<LoadInst>(V)) { 407 bool UsedAssumedInformation = false; 408 // If we ask for the potentially loaded values from the initial pointer we 409 // will simply end up here again. The load is as far as we can make it. 410 if (LI->getPointerOperand() != InitialV) { 411 SmallSetVector<Value *, 4> PotentialCopies; 412 if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies, QueryingAA, 413 UsedAssumedInformation, 414 /* OnlyExact */ true)) { 415 // Values have to be dynamically unique or we loose the fact that a 416 // single llvm::Value might represent two runtime values (e.g., stack 417 // locations in different recursive calls). 418 bool DynamicallyUnique = 419 llvm::all_of(PotentialCopies, [&A, &QueryingAA](Value *PC) { 420 return AA::isDynamicallyUnique(A, QueryingAA, *PC); 421 }); 422 if (DynamicallyUnique && 423 (!Intraprocedural || !CtxI || 424 llvm::all_of(PotentialCopies, [CtxI](Value *PC) { 425 return AA::isValidInScope(*PC, CtxI->getFunction()); 426 }))) { 427 for (auto *PotentialCopy : PotentialCopies) 428 Worklist.push_back({PotentialCopy, CtxI}); 429 continue; 430 } 431 } 432 } 433 } 434 435 // Once a leaf is reached we inform the user through the callback. 436 if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) { 437 LLVM_DEBUG(dbgs() << "Generic value traversal visit callback failed for: " 438 << *V << "!\n"); 439 return false; 440 } 441 } while (!Worklist.empty()); 442 443 // If we actually used liveness information so we have to record a dependence. 444 for (auto &It : LivenessAAs) 445 if (It.second.AnyDead) 446 A.recordDependence(*It.second.LivenessAA, QueryingAA, 447 DepClassTy::OPTIONAL); 448 449 // All values have been visited. 450 return true; 451 } 452 453 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr, 454 SmallVectorImpl<Value *> &Objects, 455 const AbstractAttribute &QueryingAA, 456 const Instruction *CtxI, 457 bool &UsedAssumedInformation, 458 bool Intraprocedural) { 459 auto StripCB = [&](Value *V) { return getUnderlyingObject(V); }; 460 SmallPtrSet<Value *, 8> SeenObjects; 461 auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *, 462 SmallVectorImpl<Value *> &Objects, 463 bool) -> bool { 464 if (SeenObjects.insert(&Val).second) 465 Objects.push_back(&Val); 466 return true; 467 }; 468 if (!genericValueTraversal<decltype(Objects)>( 469 A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI, 470 UsedAssumedInformation, true, 32, StripCB, Intraprocedural)) 471 return false; 472 return true; 473 } 474 475 static const Value * 476 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA, 477 const Value *Val, const DataLayout &DL, APInt &Offset, 478 bool GetMinOffset, bool AllowNonInbounds, 479 bool UseAssumed = false) { 480 481 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { 482 const IRPosition &Pos = IRPosition::value(V); 483 // Only track dependence if we are going to use the assumed info. 484 const AAValueConstantRange &ValueConstantRangeAA = 485 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, 486 UseAssumed ? DepClassTy::OPTIONAL 487 : DepClassTy::NONE); 488 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() 489 : ValueConstantRangeAA.getKnown(); 490 if (Range.isFullSet()) 491 return false; 492 493 // We can only use the lower part of the range because the upper part can 494 // be higher than what the value can really be. 495 if (GetMinOffset) 496 ROffset = Range.getSignedMin(); 497 else 498 ROffset = Range.getSignedMax(); 499 return true; 500 }; 501 502 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, 503 /* AllowInvariant */ true, 504 AttributorAnalysis); 505 } 506 507 static const Value * 508 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA, 509 const Value *Ptr, int64_t &BytesOffset, 510 const DataLayout &DL, bool AllowNonInbounds = false) { 511 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 512 const Value *Base = 513 stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt, 514 /* GetMinOffset */ true, AllowNonInbounds); 515 516 BytesOffset = OffsetAPInt.getSExtValue(); 517 return Base; 518 } 519 520 /// Clamp the information known for all returned values of a function 521 /// (identified by \p QueryingAA) into \p S. 522 template <typename AAType, typename StateType = typename AAType::StateType> 523 static void clampReturnedValueStates( 524 Attributor &A, const AAType &QueryingAA, StateType &S, 525 const IRPosition::CallBaseContext *CBContext = nullptr) { 526 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " 527 << QueryingAA << " into " << S << "\n"); 528 529 assert((QueryingAA.getIRPosition().getPositionKind() == 530 IRPosition::IRP_RETURNED || 531 QueryingAA.getIRPosition().getPositionKind() == 532 IRPosition::IRP_CALL_SITE_RETURNED) && 533 "Can only clamp returned value states for a function returned or call " 534 "site returned position!"); 535 536 // Use an optional state as there might not be any return values and we want 537 // to join (IntegerState::operator&) the state of all there are. 538 Optional<StateType> T; 539 540 // Callback for each possibly returned value. 541 auto CheckReturnValue = [&](Value &RV) -> bool { 542 const IRPosition &RVPos = IRPosition::value(RV, CBContext); 543 const AAType &AA = 544 A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED); 545 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() 546 << " @ " << RVPos << "\n"); 547 const StateType &AAS = AA.getState(); 548 if (T.hasValue()) 549 *T &= AAS; 550 else 551 T = AAS; 552 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T 553 << "\n"); 554 return T->isValidState(); 555 }; 556 557 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) 558 S.indicatePessimisticFixpoint(); 559 else if (T.hasValue()) 560 S ^= *T; 561 } 562 563 namespace { 564 /// Helper class for generic deduction: return value -> returned position. 565 template <typename AAType, typename BaseType, 566 typename StateType = typename BaseType::StateType, 567 bool PropagateCallBaseContext = false> 568 struct AAReturnedFromReturnedValues : public BaseType { 569 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) 570 : BaseType(IRP, A) {} 571 572 /// See AbstractAttribute::updateImpl(...). 573 ChangeStatus updateImpl(Attributor &A) override { 574 StateType S(StateType::getBestState(this->getState())); 575 clampReturnedValueStates<AAType, StateType>( 576 A, *this, S, 577 PropagateCallBaseContext ? this->getCallBaseContext() : nullptr); 578 // TODO: If we know we visited all returned values, thus no are assumed 579 // dead, we can take the known information from the state T. 580 return clampStateAndIndicateChange<StateType>(this->getState(), S); 581 } 582 }; 583 584 /// Clamp the information known at all call sites for a given argument 585 /// (identified by \p QueryingAA) into \p S. 586 template <typename AAType, typename StateType = typename AAType::StateType> 587 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, 588 StateType &S) { 589 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " 590 << QueryingAA << " into " << S << "\n"); 591 592 assert(QueryingAA.getIRPosition().getPositionKind() == 593 IRPosition::IRP_ARGUMENT && 594 "Can only clamp call site argument states for an argument position!"); 595 596 // Use an optional state as there might not be any return values and we want 597 // to join (IntegerState::operator&) the state of all there are. 598 Optional<StateType> T; 599 600 // The argument number which is also the call site argument number. 601 unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo(); 602 603 auto CallSiteCheck = [&](AbstractCallSite ACS) { 604 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 605 // Check if a coresponding argument was found or if it is on not associated 606 // (which can happen for callback calls). 607 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 608 return false; 609 610 const AAType &AA = 611 A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED); 612 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() 613 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n"); 614 const StateType &AAS = AA.getState(); 615 if (T.hasValue()) 616 *T &= AAS; 617 else 618 T = AAS; 619 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T 620 << "\n"); 621 return T->isValidState(); 622 }; 623 624 bool UsedAssumedInformation = false; 625 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, 626 UsedAssumedInformation)) 627 S.indicatePessimisticFixpoint(); 628 else if (T.hasValue()) 629 S ^= *T; 630 } 631 632 /// This function is the bridge between argument position and the call base 633 /// context. 634 template <typename AAType, typename BaseType, 635 typename StateType = typename AAType::StateType> 636 bool getArgumentStateFromCallBaseContext(Attributor &A, 637 BaseType &QueryingAttribute, 638 IRPosition &Pos, StateType &State) { 639 assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && 640 "Expected an 'argument' position !"); 641 const CallBase *CBContext = Pos.getCallBaseContext(); 642 if (!CBContext) 643 return false; 644 645 int ArgNo = Pos.getCallSiteArgNo(); 646 assert(ArgNo >= 0 && "Invalid Arg No!"); 647 648 const auto &AA = A.getAAFor<AAType>( 649 QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo), 650 DepClassTy::REQUIRED); 651 const StateType &CBArgumentState = 652 static_cast<const StateType &>(AA.getState()); 653 654 LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument" 655 << "Position:" << Pos << "CB Arg state:" << CBArgumentState 656 << "\n"); 657 658 // NOTE: If we want to do call site grouping it should happen here. 659 State ^= CBArgumentState; 660 return true; 661 } 662 663 /// Helper class for generic deduction: call site argument -> argument position. 664 template <typename AAType, typename BaseType, 665 typename StateType = typename AAType::StateType, 666 bool BridgeCallBaseContext = false> 667 struct AAArgumentFromCallSiteArguments : public BaseType { 668 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) 669 : BaseType(IRP, A) {} 670 671 /// See AbstractAttribute::updateImpl(...). 672 ChangeStatus updateImpl(Attributor &A) override { 673 StateType S = StateType::getBestState(this->getState()); 674 675 if (BridgeCallBaseContext) { 676 bool Success = 677 getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>( 678 A, *this, this->getIRPosition(), S); 679 if (Success) 680 return clampStateAndIndicateChange<StateType>(this->getState(), S); 681 } 682 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); 683 684 // TODO: If we know we visited all incoming values, thus no are assumed 685 // dead, we can take the known information from the state T. 686 return clampStateAndIndicateChange<StateType>(this->getState(), S); 687 } 688 }; 689 690 /// Helper class for generic replication: function returned -> cs returned. 691 template <typename AAType, typename BaseType, 692 typename StateType = typename BaseType::StateType, 693 bool IntroduceCallBaseContext = false> 694 struct AACallSiteReturnedFromReturned : public BaseType { 695 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) 696 : BaseType(IRP, A) {} 697 698 /// See AbstractAttribute::updateImpl(...). 699 ChangeStatus updateImpl(Attributor &A) override { 700 assert(this->getIRPosition().getPositionKind() == 701 IRPosition::IRP_CALL_SITE_RETURNED && 702 "Can only wrap function returned positions for call site returned " 703 "positions!"); 704 auto &S = this->getState(); 705 706 const Function *AssociatedFunction = 707 this->getIRPosition().getAssociatedFunction(); 708 if (!AssociatedFunction) 709 return S.indicatePessimisticFixpoint(); 710 711 CallBase &CBContext = cast<CallBase>(this->getAnchorValue()); 712 if (IntroduceCallBaseContext) 713 LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:" 714 << CBContext << "\n"); 715 716 IRPosition FnPos = IRPosition::returned( 717 *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr); 718 const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED); 719 return clampStateAndIndicateChange(S, AA.getState()); 720 } 721 }; 722 723 /// Helper function to accumulate uses. 724 template <class AAType, typename StateType = typename AAType::StateType> 725 static void followUsesInContext(AAType &AA, Attributor &A, 726 MustBeExecutedContextExplorer &Explorer, 727 const Instruction *CtxI, 728 SetVector<const Use *> &Uses, 729 StateType &State) { 730 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); 731 for (unsigned u = 0; u < Uses.size(); ++u) { 732 const Use *U = Uses[u]; 733 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { 734 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); 735 if (Found && AA.followUseInMBEC(A, U, UserI, State)) 736 for (const Use &Us : UserI->uses()) 737 Uses.insert(&Us); 738 } 739 } 740 } 741 742 /// Use the must-be-executed-context around \p I to add information into \p S. 743 /// The AAType class is required to have `followUseInMBEC` method with the 744 /// following signature and behaviour: 745 /// 746 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) 747 /// U - Underlying use. 748 /// I - The user of the \p U. 749 /// Returns true if the value should be tracked transitively. 750 /// 751 template <class AAType, typename StateType = typename AAType::StateType> 752 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, 753 Instruction &CtxI) { 754 755 // Container for (transitive) uses of the associated value. 756 SetVector<const Use *> Uses; 757 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) 758 Uses.insert(&U); 759 760 MustBeExecutedContextExplorer &Explorer = 761 A.getInfoCache().getMustBeExecutedContextExplorer(); 762 763 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); 764 765 if (S.isAtFixpoint()) 766 return; 767 768 SmallVector<const BranchInst *, 4> BrInsts; 769 auto Pred = [&](const Instruction *I) { 770 if (const BranchInst *Br = dyn_cast<BranchInst>(I)) 771 if (Br->isConditional()) 772 BrInsts.push_back(Br); 773 return true; 774 }; 775 776 // Here, accumulate conditional branch instructions in the context. We 777 // explore the child paths and collect the known states. The disjunction of 778 // those states can be merged to its own state. Let ParentState_i be a state 779 // to indicate the known information for an i-th branch instruction in the 780 // context. ChildStates are created for its successors respectively. 781 // 782 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} 783 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} 784 // ... 785 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} 786 // 787 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m 788 // 789 // FIXME: Currently, recursive branches are not handled. For example, we 790 // can't deduce that ptr must be dereferenced in below function. 791 // 792 // void f(int a, int c, int *ptr) { 793 // if(a) 794 // if (b) { 795 // *ptr = 0; 796 // } else { 797 // *ptr = 1; 798 // } 799 // else { 800 // if (b) { 801 // *ptr = 0; 802 // } else { 803 // *ptr = 1; 804 // } 805 // } 806 // } 807 808 Explorer.checkForAllContext(&CtxI, Pred); 809 for (const BranchInst *Br : BrInsts) { 810 StateType ParentState; 811 812 // The known state of the parent state is a conjunction of children's 813 // known states so it is initialized with a best state. 814 ParentState.indicateOptimisticFixpoint(); 815 816 for (const BasicBlock *BB : Br->successors()) { 817 StateType ChildState; 818 819 size_t BeforeSize = Uses.size(); 820 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); 821 822 // Erase uses which only appear in the child. 823 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) 824 It = Uses.erase(It); 825 826 ParentState &= ChildState; 827 } 828 829 // Use only known state. 830 S += ParentState; 831 } 832 } 833 } // namespace 834 835 /// ------------------------ PointerInfo --------------------------------------- 836 837 namespace llvm { 838 namespace AA { 839 namespace PointerInfo { 840 841 struct State; 842 843 } // namespace PointerInfo 844 } // namespace AA 845 846 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage. 847 template <> 848 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> { 849 using Access = AAPointerInfo::Access; 850 static inline Access getEmptyKey(); 851 static inline Access getTombstoneKey(); 852 static unsigned getHashValue(const Access &A); 853 static bool isEqual(const Access &LHS, const Access &RHS); 854 }; 855 856 /// Helper that allows OffsetAndSize as a key in a DenseMap. 857 template <> 858 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize> 859 : DenseMapInfo<std::pair<int64_t, int64_t>> {}; 860 861 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign 862 /// but the instruction 863 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> { 864 using Base = DenseMapInfo<Instruction *>; 865 using Access = AAPointerInfo::Access; 866 static inline Access getEmptyKey(); 867 static inline Access getTombstoneKey(); 868 static unsigned getHashValue(const Access &A); 869 static bool isEqual(const Access &LHS, const Access &RHS); 870 }; 871 872 } // namespace llvm 873 874 /// A type to track pointer/struct usage and accesses for AAPointerInfo. 875 struct AA::PointerInfo::State : public AbstractState { 876 877 ~State() { 878 // We do not delete the Accesses objects but need to destroy them still. 879 for (auto &It : AccessBins) 880 It.second->~Accesses(); 881 } 882 883 /// Return the best possible representable state. 884 static State getBestState(const State &SIS) { return State(); } 885 886 /// Return the worst possible representable state. 887 static State getWorstState(const State &SIS) { 888 State R; 889 R.indicatePessimisticFixpoint(); 890 return R; 891 } 892 893 State() = default; 894 State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) { 895 SIS.AccessBins.clear(); 896 } 897 898 const State &getAssumed() const { return *this; } 899 900 /// See AbstractState::isValidState(). 901 bool isValidState() const override { return BS.isValidState(); } 902 903 /// See AbstractState::isAtFixpoint(). 904 bool isAtFixpoint() const override { return BS.isAtFixpoint(); } 905 906 /// See AbstractState::indicateOptimisticFixpoint(). 907 ChangeStatus indicateOptimisticFixpoint() override { 908 BS.indicateOptimisticFixpoint(); 909 return ChangeStatus::UNCHANGED; 910 } 911 912 /// See AbstractState::indicatePessimisticFixpoint(). 913 ChangeStatus indicatePessimisticFixpoint() override { 914 BS.indicatePessimisticFixpoint(); 915 return ChangeStatus::CHANGED; 916 } 917 918 State &operator=(const State &R) { 919 if (this == &R) 920 return *this; 921 BS = R.BS; 922 AccessBins = R.AccessBins; 923 return *this; 924 } 925 926 State &operator=(State &&R) { 927 if (this == &R) 928 return *this; 929 std::swap(BS, R.BS); 930 std::swap(AccessBins, R.AccessBins); 931 return *this; 932 } 933 934 bool operator==(const State &R) const { 935 if (BS != R.BS) 936 return false; 937 if (AccessBins.size() != R.AccessBins.size()) 938 return false; 939 auto It = begin(), RIt = R.begin(), E = end(); 940 while (It != E) { 941 if (It->getFirst() != RIt->getFirst()) 942 return false; 943 auto &Accs = It->getSecond(); 944 auto &RAccs = RIt->getSecond(); 945 if (Accs->size() != RAccs->size()) 946 return false; 947 for (const auto &ZipIt : llvm::zip(*Accs, *RAccs)) 948 if (std::get<0>(ZipIt) != std::get<1>(ZipIt)) 949 return false; 950 ++It; 951 ++RIt; 952 } 953 return true; 954 } 955 bool operator!=(const State &R) const { return !(*this == R); } 956 957 /// We store accesses in a set with the instruction as key. 958 struct Accesses { 959 SmallVector<AAPointerInfo::Access, 4> Accesses; 960 DenseMap<const Instruction *, unsigned> Map; 961 962 unsigned size() const { return Accesses.size(); } 963 964 using vec_iterator = decltype(Accesses)::iterator; 965 vec_iterator begin() { return Accesses.begin(); } 966 vec_iterator end() { return Accesses.end(); } 967 968 using iterator = decltype(Map)::const_iterator; 969 iterator find(AAPointerInfo::Access &Acc) { 970 return Map.find(Acc.getRemoteInst()); 971 } 972 iterator find_end() { return Map.end(); } 973 974 AAPointerInfo::Access &get(iterator &It) { 975 return Accesses[It->getSecond()]; 976 } 977 978 void insert(AAPointerInfo::Access &Acc) { 979 Map[Acc.getRemoteInst()] = Accesses.size(); 980 Accesses.push_back(Acc); 981 } 982 }; 983 984 /// We store all accesses in bins denoted by their offset and size. 985 using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>; 986 987 AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); } 988 AccessBinsTy::const_iterator end() const { return AccessBins.end(); } 989 990 protected: 991 /// The bins with all the accesses for the associated pointer. 992 AccessBinsTy AccessBins; 993 994 /// Add a new access to the state at offset \p Offset and with size \p Size. 995 /// The access is associated with \p I, writes \p Content (if anything), and 996 /// is of kind \p Kind. 997 /// \Returns CHANGED, if the state changed, UNCHANGED otherwise. 998 ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size, 999 Instruction &I, Optional<Value *> Content, 1000 AAPointerInfo::AccessKind Kind, Type *Ty, 1001 Instruction *RemoteI = nullptr, 1002 Accesses *BinPtr = nullptr) { 1003 AAPointerInfo::OffsetAndSize Key{Offset, Size}; 1004 Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key]; 1005 if (!Bin) 1006 Bin = new (A.Allocator) Accesses; 1007 AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty); 1008 // Check if we have an access for this instruction in this bin, if not, 1009 // simply add it. 1010 auto It = Bin->find(Acc); 1011 if (It == Bin->find_end()) { 1012 Bin->insert(Acc); 1013 return ChangeStatus::CHANGED; 1014 } 1015 // If the existing access is the same as then new one, nothing changed. 1016 AAPointerInfo::Access &Current = Bin->get(It); 1017 AAPointerInfo::Access Before = Current; 1018 // The new one will be combined with the existing one. 1019 Current &= Acc; 1020 return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; 1021 } 1022 1023 /// See AAPointerInfo::forallInterferingAccesses. 1024 bool forallInterferingAccesses( 1025 AAPointerInfo::OffsetAndSize OAS, 1026 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { 1027 if (!isValidState()) 1028 return false; 1029 1030 for (auto &It : AccessBins) { 1031 AAPointerInfo::OffsetAndSize ItOAS = It.getFirst(); 1032 if (!OAS.mayOverlap(ItOAS)) 1033 continue; 1034 bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown(); 1035 for (auto &Access : *It.getSecond()) 1036 if (!CB(Access, IsExact)) 1037 return false; 1038 } 1039 return true; 1040 } 1041 1042 /// See AAPointerInfo::forallInterferingAccesses. 1043 bool forallInterferingAccesses( 1044 Instruction &I, 1045 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { 1046 if (!isValidState()) 1047 return false; 1048 1049 // First find the offset and size of I. 1050 AAPointerInfo::OffsetAndSize OAS(-1, -1); 1051 for (auto &It : AccessBins) { 1052 for (auto &Access : *It.getSecond()) { 1053 if (Access.getRemoteInst() == &I) { 1054 OAS = It.getFirst(); 1055 break; 1056 } 1057 } 1058 if (OAS.getSize() != -1) 1059 break; 1060 } 1061 // No access for I was found, we are done. 1062 if (OAS.getSize() == -1) 1063 return true; 1064 1065 // Now that we have an offset and size, find all overlapping ones and use 1066 // the callback on the accesses. 1067 return forallInterferingAccesses(OAS, CB); 1068 } 1069 1070 private: 1071 /// State to track fixpoint and validity. 1072 BooleanState BS; 1073 }; 1074 1075 namespace { 1076 struct AAPointerInfoImpl 1077 : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> { 1078 using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>; 1079 AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {} 1080 1081 /// See AbstractAttribute::initialize(...). 1082 void initialize(Attributor &A) override { AAPointerInfo::initialize(A); } 1083 1084 /// See AbstractAttribute::getAsStr(). 1085 const std::string getAsStr() const override { 1086 return std::string("PointerInfo ") + 1087 (isValidState() ? (std::string("#") + 1088 std::to_string(AccessBins.size()) + " bins") 1089 : "<invalid>"); 1090 } 1091 1092 /// See AbstractAttribute::manifest(...). 1093 ChangeStatus manifest(Attributor &A) override { 1094 return AAPointerInfo::manifest(A); 1095 } 1096 1097 bool forallInterferingAccesses( 1098 OffsetAndSize OAS, 1099 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) 1100 const override { 1101 return State::forallInterferingAccesses(OAS, CB); 1102 } 1103 bool forallInterferingAccesses( 1104 Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I, 1105 function_ref<bool(const Access &, bool)> UserCB) const override { 1106 SmallPtrSet<const Access *, 8> DominatingWrites; 1107 SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses; 1108 1109 Function &Scope = *I.getFunction(); 1110 const auto &NoSyncAA = A.getAAFor<AANoSync>( 1111 QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL); 1112 const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>( 1113 IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL); 1114 const bool NoSync = NoSyncAA.isAssumedNoSync(); 1115 1116 // Helper to determine if we need to consider threading, which we cannot 1117 // right now. However, if the function is (assumed) nosync or the thread 1118 // executing all instructions is the main thread only we can ignore 1119 // threading. 1120 auto CanIgnoreThreading = [&](const Instruction &I) -> bool { 1121 if (NoSync) 1122 return true; 1123 if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I)) 1124 return true; 1125 return false; 1126 }; 1127 1128 // Helper to determine if the access is executed by the same thread as the 1129 // load, for now it is sufficient to avoid any potential threading effects 1130 // as we cannot deal with them anyway. 1131 auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool { 1132 return CanIgnoreThreading(*Acc.getLocalInst()); 1133 }; 1134 1135 // TODO: Use inter-procedural reachability and dominance. 1136 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1137 QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL); 1138 1139 const bool FindInterferingWrites = I.mayReadFromMemory(); 1140 const bool FindInterferingReads = I.mayWriteToMemory(); 1141 const bool UseDominanceReasoning = FindInterferingWrites; 1142 const bool CanUseCFGResoning = CanIgnoreThreading(I); 1143 InformationCache &InfoCache = A.getInfoCache(); 1144 const DominatorTree *DT = 1145 NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning 1146 ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( 1147 Scope) 1148 : nullptr; 1149 1150 enum GPUAddressSpace : unsigned { 1151 Generic = 0, 1152 Global = 1, 1153 Shared = 3, 1154 Constant = 4, 1155 Local = 5, 1156 }; 1157 1158 // Helper to check if a value has "kernel lifetime", that is it will not 1159 // outlive a GPU kernel. This is true for shared, constant, and local 1160 // globals on AMD and NVIDIA GPUs. 1161 auto HasKernelLifetime = [&](Value *V, Module &M) { 1162 Triple T(M.getTargetTriple()); 1163 if (!(T.isAMDGPU() || T.isNVPTX())) 1164 return false; 1165 switch (V->getType()->getPointerAddressSpace()) { 1166 case GPUAddressSpace::Shared: 1167 case GPUAddressSpace::Constant: 1168 case GPUAddressSpace::Local: 1169 return true; 1170 default: 1171 return false; 1172 }; 1173 }; 1174 1175 // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query 1176 // to determine if we should look at reachability from the callee. For 1177 // certain pointers we know the lifetime and we do not have to step into the 1178 // callee to determine reachability as the pointer would be dead in the 1179 // callee. See the conditional initialization below. 1180 std::function<bool(const Function &)> IsLiveInCalleeCB; 1181 1182 if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) { 1183 // If the alloca containing function is not recursive the alloca 1184 // must be dead in the callee. 1185 const Function *AIFn = AI->getFunction(); 1186 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1187 *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL); 1188 if (NoRecurseAA.isAssumedNoRecurse()) { 1189 IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; }; 1190 } 1191 } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) { 1192 // If the global has kernel lifetime we can stop if we reach a kernel 1193 // as it is "dead" in the (unknown) callees. 1194 if (HasKernelLifetime(GV, *GV->getParent())) 1195 IsLiveInCalleeCB = [](const Function &Fn) { 1196 return !Fn.hasFnAttribute("kernel"); 1197 }; 1198 } 1199 1200 auto AccessCB = [&](const Access &Acc, bool Exact) { 1201 if ((!FindInterferingWrites || !Acc.isWrite()) && 1202 (!FindInterferingReads || !Acc.isRead())) 1203 return true; 1204 1205 // For now we only filter accesses based on CFG reasoning which does not 1206 // work yet if we have threading effects, or the access is complicated. 1207 if (CanUseCFGResoning) { 1208 if ((!Acc.isWrite() || 1209 !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA, 1210 IsLiveInCalleeCB)) && 1211 (!Acc.isRead() || 1212 !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA, 1213 IsLiveInCalleeCB))) 1214 return true; 1215 if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) && 1216 IsSameThreadAsLoad(Acc)) { 1217 if (DT->dominates(Acc.getLocalInst(), &I)) 1218 DominatingWrites.insert(&Acc); 1219 } 1220 } 1221 1222 InterferingAccesses.push_back({&Acc, Exact}); 1223 return true; 1224 }; 1225 if (!State::forallInterferingAccesses(I, AccessCB)) 1226 return false; 1227 1228 // If we cannot use CFG reasoning we only filter the non-write accesses 1229 // and are done here. 1230 if (!CanUseCFGResoning) { 1231 for (auto &It : InterferingAccesses) 1232 if (!UserCB(*It.first, It.second)) 1233 return false; 1234 return true; 1235 } 1236 1237 // Helper to determine if we can skip a specific write access. This is in 1238 // the worst case quadratic as we are looking for another write that will 1239 // hide the effect of this one. 1240 auto CanSkipAccess = [&](const Access &Acc, bool Exact) { 1241 if (!IsSameThreadAsLoad(Acc)) 1242 return false; 1243 if (!DominatingWrites.count(&Acc)) 1244 return false; 1245 for (const Access *DomAcc : DominatingWrites) { 1246 assert(Acc.getLocalInst()->getFunction() == 1247 DomAcc->getLocalInst()->getFunction() && 1248 "Expected dominating writes to be in the same function!"); 1249 1250 if (DomAcc != &Acc && 1251 DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) { 1252 return true; 1253 } 1254 } 1255 return false; 1256 }; 1257 1258 // Run the user callback on all accesses we cannot skip and return if that 1259 // succeeded for all or not. 1260 unsigned NumInterferingAccesses = InterferingAccesses.size(); 1261 for (auto &It : InterferingAccesses) { 1262 if (!DT || NumInterferingAccesses > MaxInterferingAccesses || 1263 !CanSkipAccess(*It.first, It.second)) { 1264 if (!UserCB(*It.first, It.second)) 1265 return false; 1266 } 1267 } 1268 return true; 1269 } 1270 1271 ChangeStatus translateAndAddCalleeState(Attributor &A, 1272 const AAPointerInfo &CalleeAA, 1273 int64_t CallArgOffset, CallBase &CB) { 1274 using namespace AA::PointerInfo; 1275 if (!CalleeAA.getState().isValidState() || !isValidState()) 1276 return indicatePessimisticFixpoint(); 1277 1278 const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA); 1279 bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr(); 1280 1281 // Combine the accesses bin by bin. 1282 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1283 for (auto &It : CalleeImplAA.getState()) { 1284 OffsetAndSize OAS = OffsetAndSize::getUnknown(); 1285 if (CallArgOffset != OffsetAndSize::Unknown) 1286 OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset, 1287 It.first.getSize()); 1288 Accesses *Bin = AccessBins[OAS]; 1289 for (const AAPointerInfo::Access &RAcc : *It.second) { 1290 if (IsByval && !RAcc.isRead()) 1291 continue; 1292 bool UsedAssumedInformation = false; 1293 Optional<Value *> Content = A.translateArgumentToCallSiteContent( 1294 RAcc.getContent(), CB, *this, UsedAssumedInformation); 1295 AccessKind AK = 1296 AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ 1297 : AccessKind::AK_READ_WRITE)); 1298 Changed = 1299 Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content, 1300 AK, RAcc.getType(), RAcc.getRemoteInst(), Bin); 1301 } 1302 } 1303 return Changed; 1304 } 1305 1306 /// Statistic tracking for all AAPointerInfo implementations. 1307 /// See AbstractAttribute::trackStatistics(). 1308 void trackPointerInfoStatistics(const IRPosition &IRP) const {} 1309 }; 1310 1311 struct AAPointerInfoFloating : public AAPointerInfoImpl { 1312 using AccessKind = AAPointerInfo::AccessKind; 1313 AAPointerInfoFloating(const IRPosition &IRP, Attributor &A) 1314 : AAPointerInfoImpl(IRP, A) {} 1315 1316 /// See AbstractAttribute::initialize(...). 1317 void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); } 1318 1319 /// Deal with an access and signal if it was handled successfully. 1320 bool handleAccess(Attributor &A, Instruction &I, Value &Ptr, 1321 Optional<Value *> Content, AccessKind Kind, int64_t Offset, 1322 ChangeStatus &Changed, Type *Ty, 1323 int64_t Size = OffsetAndSize::Unknown) { 1324 using namespace AA::PointerInfo; 1325 // No need to find a size if one is given or the offset is unknown. 1326 if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown && 1327 Ty) { 1328 const DataLayout &DL = A.getDataLayout(); 1329 TypeSize AccessSize = DL.getTypeStoreSize(Ty); 1330 if (!AccessSize.isScalable()) 1331 Size = AccessSize.getFixedSize(); 1332 } 1333 Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty); 1334 return true; 1335 }; 1336 1337 /// Helper struct, will support ranges eventually. 1338 struct OffsetInfo { 1339 int64_t Offset = OffsetAndSize::Unknown; 1340 1341 bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; } 1342 }; 1343 1344 /// See AbstractAttribute::updateImpl(...). 1345 ChangeStatus updateImpl(Attributor &A) override { 1346 using namespace AA::PointerInfo; 1347 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1348 Value &AssociatedValue = getAssociatedValue(); 1349 1350 const DataLayout &DL = A.getDataLayout(); 1351 DenseMap<Value *, OffsetInfo> OffsetInfoMap; 1352 OffsetInfoMap[&AssociatedValue] = OffsetInfo{0}; 1353 1354 auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI, 1355 bool &Follow) { 1356 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1357 UsrOI = PtrOI; 1358 Follow = true; 1359 return true; 1360 }; 1361 1362 const auto *TLI = getAnchorScope() 1363 ? A.getInfoCache().getTargetLibraryInfoForFunction( 1364 *getAnchorScope()) 1365 : nullptr; 1366 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 1367 Value *CurPtr = U.get(); 1368 User *Usr = U.getUser(); 1369 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " 1370 << *Usr << "\n"); 1371 assert(OffsetInfoMap.count(CurPtr) && 1372 "The current pointer offset should have been seeded!"); 1373 1374 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) { 1375 if (CE->isCast()) 1376 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); 1377 if (CE->isCompare()) 1378 return true; 1379 if (!isa<GEPOperator>(CE)) { 1380 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE 1381 << "\n"); 1382 return false; 1383 } 1384 } 1385 if (auto *GEP = dyn_cast<GEPOperator>(Usr)) { 1386 // Note the order here, the Usr access might change the map, CurPtr is 1387 // already in it though. 1388 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1389 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1390 UsrOI = PtrOI; 1391 1392 // TODO: Use range information. 1393 if (PtrOI.Offset == OffsetAndSize::Unknown || 1394 !GEP->hasAllConstantIndices()) { 1395 UsrOI.Offset = OffsetAndSize::Unknown; 1396 Follow = true; 1397 return true; 1398 } 1399 1400 SmallVector<Value *, 8> Indices; 1401 for (Use &Idx : GEP->indices()) { 1402 if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) { 1403 Indices.push_back(CIdx); 1404 continue; 1405 } 1406 1407 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP 1408 << " : " << *Idx << "\n"); 1409 return false; 1410 } 1411 UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType( 1412 GEP->getSourceElementType(), Indices); 1413 Follow = true; 1414 return true; 1415 } 1416 if (isa<CastInst>(Usr) || isa<SelectInst>(Usr)) 1417 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); 1418 1419 // For PHIs we need to take care of the recurrence explicitly as the value 1420 // might change while we iterate through a loop. For now, we give up if 1421 // the PHI is not invariant. 1422 if (isa<PHINode>(Usr)) { 1423 // Note the order here, the Usr access might change the map, CurPtr is 1424 // already in it though. 1425 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1426 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1427 // Check if the PHI is invariant (so far). 1428 if (UsrOI == PtrOI) 1429 return true; 1430 1431 // Check if the PHI operand has already an unknown offset as we can't 1432 // improve on that anymore. 1433 if (PtrOI.Offset == OffsetAndSize::Unknown) { 1434 UsrOI = PtrOI; 1435 Follow = true; 1436 return true; 1437 } 1438 1439 // Check if the PHI operand is not dependent on the PHI itself. 1440 // TODO: This is not great as we look at the pointer type. However, it 1441 // is unclear where the Offset size comes from with typeless pointers. 1442 APInt Offset( 1443 DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()), 1444 0); 1445 if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets( 1446 DL, Offset, /* AllowNonInbounds */ true)) { 1447 if (Offset != PtrOI.Offset) { 1448 LLVM_DEBUG(dbgs() 1449 << "[AAPointerInfo] PHI operand pointer offset mismatch " 1450 << *CurPtr << " in " << *Usr << "\n"); 1451 return false; 1452 } 1453 return HandlePassthroughUser(Usr, PtrOI, Follow); 1454 } 1455 1456 // TODO: Approximate in case we know the direction of the recurrence. 1457 LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex " 1458 << *CurPtr << " in " << *Usr << "\n"); 1459 UsrOI = PtrOI; 1460 UsrOI.Offset = OffsetAndSize::Unknown; 1461 Follow = true; 1462 return true; 1463 } 1464 1465 if (auto *LoadI = dyn_cast<LoadInst>(Usr)) 1466 return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr, 1467 AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset, 1468 Changed, LoadI->getType()); 1469 if (auto *StoreI = dyn_cast<StoreInst>(Usr)) { 1470 if (StoreI->getValueOperand() == CurPtr) { 1471 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store " 1472 << *StoreI << "\n"); 1473 return false; 1474 } 1475 bool UsedAssumedInformation = false; 1476 Optional<Value *> Content = A.getAssumedSimplified( 1477 *StoreI->getValueOperand(), *this, UsedAssumedInformation); 1478 return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE, 1479 OffsetInfoMap[CurPtr].Offset, Changed, 1480 StoreI->getValueOperand()->getType()); 1481 } 1482 if (auto *CB = dyn_cast<CallBase>(Usr)) { 1483 if (CB->isLifetimeStartOrEnd()) 1484 return true; 1485 if (TLI && isFreeCall(CB, TLI)) 1486 return true; 1487 if (CB->isArgOperand(&U)) { 1488 unsigned ArgNo = CB->getArgOperandNo(&U); 1489 const auto &CSArgPI = A.getAAFor<AAPointerInfo>( 1490 *this, IRPosition::callsite_argument(*CB, ArgNo), 1491 DepClassTy::REQUIRED); 1492 Changed = translateAndAddCalleeState( 1493 A, CSArgPI, OffsetInfoMap[CurPtr].Offset, *CB) | 1494 Changed; 1495 return true; 1496 } 1497 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB 1498 << "\n"); 1499 // TODO: Allow some call uses 1500 return false; 1501 } 1502 1503 LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n"); 1504 return false; 1505 }; 1506 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { 1507 if (OffsetInfoMap.count(NewU)) 1508 return OffsetInfoMap[NewU] == OffsetInfoMap[OldU]; 1509 OffsetInfoMap[NewU] = OffsetInfoMap[OldU]; 1510 return true; 1511 }; 1512 if (!A.checkForAllUses(UsePred, *this, AssociatedValue, 1513 /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL, 1514 EquivalentUseCB)) 1515 return indicatePessimisticFixpoint(); 1516 1517 LLVM_DEBUG({ 1518 dbgs() << "Accesses by bin after update:\n"; 1519 for (auto &It : AccessBins) { 1520 dbgs() << "[" << It.first.getOffset() << "-" 1521 << It.first.getOffset() + It.first.getSize() 1522 << "] : " << It.getSecond()->size() << "\n"; 1523 for (auto &Acc : *It.getSecond()) { 1524 dbgs() << " - " << Acc.getKind() << " - " << *Acc.getLocalInst() 1525 << "\n"; 1526 if (Acc.getLocalInst() != Acc.getRemoteInst()) 1527 dbgs() << " --> " 1528 << *Acc.getRemoteInst() << "\n"; 1529 if (!Acc.isWrittenValueYetUndetermined()) { 1530 if (Acc.getWrittenValue()) 1531 dbgs() << " - c: " << *Acc.getWrittenValue() << "\n"; 1532 else 1533 dbgs() << " - c: <unknown>\n"; 1534 } 1535 } 1536 } 1537 }); 1538 1539 return Changed; 1540 } 1541 1542 /// See AbstractAttribute::trackStatistics() 1543 void trackStatistics() const override { 1544 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1545 } 1546 }; 1547 1548 struct AAPointerInfoReturned final : AAPointerInfoImpl { 1549 AAPointerInfoReturned(const IRPosition &IRP, Attributor &A) 1550 : AAPointerInfoImpl(IRP, A) {} 1551 1552 /// See AbstractAttribute::updateImpl(...). 1553 ChangeStatus updateImpl(Attributor &A) override { 1554 return indicatePessimisticFixpoint(); 1555 } 1556 1557 /// See AbstractAttribute::trackStatistics() 1558 void trackStatistics() const override { 1559 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1560 } 1561 }; 1562 1563 struct AAPointerInfoArgument final : AAPointerInfoFloating { 1564 AAPointerInfoArgument(const IRPosition &IRP, Attributor &A) 1565 : AAPointerInfoFloating(IRP, A) {} 1566 1567 /// See AbstractAttribute::initialize(...). 1568 void initialize(Attributor &A) override { 1569 AAPointerInfoFloating::initialize(A); 1570 if (getAnchorScope()->isDeclaration()) 1571 indicatePessimisticFixpoint(); 1572 } 1573 1574 /// See AbstractAttribute::trackStatistics() 1575 void trackStatistics() const override { 1576 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1577 } 1578 }; 1579 1580 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating { 1581 AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) 1582 : AAPointerInfoFloating(IRP, A) {} 1583 1584 /// See AbstractAttribute::updateImpl(...). 1585 ChangeStatus updateImpl(Attributor &A) override { 1586 using namespace AA::PointerInfo; 1587 // We handle memory intrinsics explicitly, at least the first (= 1588 // destination) and second (=source) arguments as we know how they are 1589 // accessed. 1590 if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) { 1591 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 1592 int64_t LengthVal = OffsetAndSize::Unknown; 1593 if (Length) 1594 LengthVal = Length->getSExtValue(); 1595 Value &Ptr = getAssociatedValue(); 1596 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 1597 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1598 if (ArgNo == 0) { 1599 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed, 1600 nullptr, LengthVal); 1601 } else if (ArgNo == 1) { 1602 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed, 1603 nullptr, LengthVal); 1604 } else { 1605 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic " 1606 << *MI << "\n"); 1607 return indicatePessimisticFixpoint(); 1608 } 1609 return Changed; 1610 } 1611 1612 // TODO: Once we have call site specific value information we can provide 1613 // call site specific liveness information and then it makes 1614 // sense to specialize attributes for call sites arguments instead of 1615 // redirecting requests to the callee argument. 1616 Argument *Arg = getAssociatedArgument(); 1617 if (!Arg) 1618 return indicatePessimisticFixpoint(); 1619 const IRPosition &ArgPos = IRPosition::argument(*Arg); 1620 auto &ArgAA = 1621 A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED); 1622 return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI())); 1623 } 1624 1625 /// See AbstractAttribute::trackStatistics() 1626 void trackStatistics() const override { 1627 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1628 } 1629 }; 1630 1631 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating { 1632 AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) 1633 : AAPointerInfoFloating(IRP, A) {} 1634 1635 /// See AbstractAttribute::trackStatistics() 1636 void trackStatistics() const override { 1637 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1638 } 1639 }; 1640 } // namespace 1641 1642 /// -----------------------NoUnwind Function Attribute-------------------------- 1643 1644 namespace { 1645 struct AANoUnwindImpl : AANoUnwind { 1646 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} 1647 1648 const std::string getAsStr() const override { 1649 return getAssumed() ? "nounwind" : "may-unwind"; 1650 } 1651 1652 /// See AbstractAttribute::updateImpl(...). 1653 ChangeStatus updateImpl(Attributor &A) override { 1654 auto Opcodes = { 1655 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 1656 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 1657 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 1658 1659 auto CheckForNoUnwind = [&](Instruction &I) { 1660 if (!I.mayThrow()) 1661 return true; 1662 1663 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1664 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( 1665 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 1666 return NoUnwindAA.isAssumedNoUnwind(); 1667 } 1668 return false; 1669 }; 1670 1671 bool UsedAssumedInformation = false; 1672 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes, 1673 UsedAssumedInformation)) 1674 return indicatePessimisticFixpoint(); 1675 1676 return ChangeStatus::UNCHANGED; 1677 } 1678 }; 1679 1680 struct AANoUnwindFunction final : public AANoUnwindImpl { 1681 AANoUnwindFunction(const IRPosition &IRP, Attributor &A) 1682 : AANoUnwindImpl(IRP, A) {} 1683 1684 /// See AbstractAttribute::trackStatistics() 1685 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } 1686 }; 1687 1688 /// NoUnwind attribute deduction for a call sites. 1689 struct AANoUnwindCallSite final : AANoUnwindImpl { 1690 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) 1691 : AANoUnwindImpl(IRP, A) {} 1692 1693 /// See AbstractAttribute::initialize(...). 1694 void initialize(Attributor &A) override { 1695 AANoUnwindImpl::initialize(A); 1696 Function *F = getAssociatedFunction(); 1697 if (!F || F->isDeclaration()) 1698 indicatePessimisticFixpoint(); 1699 } 1700 1701 /// See AbstractAttribute::updateImpl(...). 1702 ChangeStatus updateImpl(Attributor &A) override { 1703 // TODO: Once we have call site specific value information we can provide 1704 // call site specific liveness information and then it makes 1705 // sense to specialize attributes for call sites arguments instead of 1706 // redirecting requests to the callee argument. 1707 Function *F = getAssociatedFunction(); 1708 const IRPosition &FnPos = IRPosition::function(*F); 1709 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED); 1710 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1711 } 1712 1713 /// See AbstractAttribute::trackStatistics() 1714 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } 1715 }; 1716 } // namespace 1717 1718 /// --------------------- Function Return Values ------------------------------- 1719 1720 namespace { 1721 /// "Attribute" that collects all potential returned values and the return 1722 /// instructions that they arise from. 1723 /// 1724 /// If there is a unique returned value R, the manifest method will: 1725 /// - mark R with the "returned" attribute, if R is an argument. 1726 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { 1727 1728 /// Mapping of values potentially returned by the associated function to the 1729 /// return instructions that might return them. 1730 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; 1731 1732 /// State flags 1733 /// 1734 ///{ 1735 bool IsFixed = false; 1736 bool IsValidState = true; 1737 ///} 1738 1739 public: 1740 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) 1741 : AAReturnedValues(IRP, A) {} 1742 1743 /// See AbstractAttribute::initialize(...). 1744 void initialize(Attributor &A) override { 1745 // Reset the state. 1746 IsFixed = false; 1747 IsValidState = true; 1748 ReturnedValues.clear(); 1749 1750 Function *F = getAssociatedFunction(); 1751 if (!F || F->isDeclaration()) { 1752 indicatePessimisticFixpoint(); 1753 return; 1754 } 1755 assert(!F->getReturnType()->isVoidTy() && 1756 "Did not expect a void return type!"); 1757 1758 // The map from instruction opcodes to those instructions in the function. 1759 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); 1760 1761 // Look through all arguments, if one is marked as returned we are done. 1762 for (Argument &Arg : F->args()) { 1763 if (Arg.hasReturnedAttr()) { 1764 auto &ReturnInstSet = ReturnedValues[&Arg]; 1765 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) 1766 for (Instruction *RI : *Insts) 1767 ReturnInstSet.insert(cast<ReturnInst>(RI)); 1768 1769 indicateOptimisticFixpoint(); 1770 return; 1771 } 1772 } 1773 1774 if (!A.isFunctionIPOAmendable(*F)) 1775 indicatePessimisticFixpoint(); 1776 } 1777 1778 /// See AbstractAttribute::manifest(...). 1779 ChangeStatus manifest(Attributor &A) override; 1780 1781 /// See AbstractAttribute::getState(...). 1782 AbstractState &getState() override { return *this; } 1783 1784 /// See AbstractAttribute::getState(...). 1785 const AbstractState &getState() const override { return *this; } 1786 1787 /// See AbstractAttribute::updateImpl(Attributor &A). 1788 ChangeStatus updateImpl(Attributor &A) override; 1789 1790 llvm::iterator_range<iterator> returned_values() override { 1791 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1792 } 1793 1794 llvm::iterator_range<const_iterator> returned_values() const override { 1795 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1796 } 1797 1798 /// Return the number of potential return values, -1 if unknown. 1799 size_t getNumReturnValues() const override { 1800 return isValidState() ? ReturnedValues.size() : -1; 1801 } 1802 1803 /// Return an assumed unique return value if a single candidate is found. If 1804 /// there cannot be one, return a nullptr. If it is not clear yet, return the 1805 /// Optional::NoneType. 1806 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 1807 1808 /// See AbstractState::checkForAllReturnedValues(...). 1809 bool checkForAllReturnedValuesAndReturnInsts( 1810 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1811 const override; 1812 1813 /// Pretty print the attribute similar to the IR representation. 1814 const std::string getAsStr() const override; 1815 1816 /// See AbstractState::isAtFixpoint(). 1817 bool isAtFixpoint() const override { return IsFixed; } 1818 1819 /// See AbstractState::isValidState(). 1820 bool isValidState() const override { return IsValidState; } 1821 1822 /// See AbstractState::indicateOptimisticFixpoint(...). 1823 ChangeStatus indicateOptimisticFixpoint() override { 1824 IsFixed = true; 1825 return ChangeStatus::UNCHANGED; 1826 } 1827 1828 ChangeStatus indicatePessimisticFixpoint() override { 1829 IsFixed = true; 1830 IsValidState = false; 1831 return ChangeStatus::CHANGED; 1832 } 1833 }; 1834 1835 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 1836 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1837 1838 // Bookkeeping. 1839 assert(isValidState()); 1840 STATS_DECLTRACK(KnownReturnValues, FunctionReturn, 1841 "Number of function with known return values"); 1842 1843 // Check if we have an assumed unique return value that we could manifest. 1844 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); 1845 1846 if (!UniqueRV.hasValue() || !UniqueRV.getValue()) 1847 return Changed; 1848 1849 // Bookkeeping. 1850 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, 1851 "Number of function with unique return"); 1852 // If the assumed unique return value is an argument, annotate it. 1853 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { 1854 if (UniqueRVArg->getType()->canLosslesslyBitCastTo( 1855 getAssociatedFunction()->getReturnType())) { 1856 getIRPosition() = IRPosition::argument(*UniqueRVArg); 1857 Changed = IRAttribute::manifest(A); 1858 } 1859 } 1860 return Changed; 1861 } 1862 1863 const std::string AAReturnedValuesImpl::getAsStr() const { 1864 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 1865 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")"; 1866 } 1867 1868 Optional<Value *> 1869 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { 1870 // If checkForAllReturnedValues provides a unique value, ignoring potential 1871 // undef values that can also be present, it is assumed to be the actual 1872 // return value and forwarded to the caller of this method. If there are 1873 // multiple, a nullptr is returned indicating there cannot be a unique 1874 // returned value. 1875 Optional<Value *> UniqueRV; 1876 Type *Ty = getAssociatedFunction()->getReturnType(); 1877 1878 auto Pred = [&](Value &RV) -> bool { 1879 UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty); 1880 return UniqueRV != Optional<Value *>(nullptr); 1881 }; 1882 1883 if (!A.checkForAllReturnedValues(Pred, *this)) 1884 UniqueRV = nullptr; 1885 1886 return UniqueRV; 1887 } 1888 1889 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( 1890 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1891 const { 1892 if (!isValidState()) 1893 return false; 1894 1895 // Check all returned values but ignore call sites as long as we have not 1896 // encountered an overdefined one during an update. 1897 for (auto &It : ReturnedValues) { 1898 Value *RV = It.first; 1899 if (!Pred(*RV, It.second)) 1900 return false; 1901 } 1902 1903 return true; 1904 } 1905 1906 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 1907 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1908 1909 auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret, 1910 bool) -> bool { 1911 assert(AA::isValidInScope(V, Ret.getFunction()) && 1912 "Assumed returned value should be valid in function scope!"); 1913 if (ReturnedValues[&V].insert(&Ret)) 1914 Changed = ChangeStatus::CHANGED; 1915 return true; 1916 }; 1917 1918 bool UsedAssumedInformation = false; 1919 auto ReturnInstCB = [&](Instruction &I) { 1920 ReturnInst &Ret = cast<ReturnInst>(I); 1921 return genericValueTraversal<ReturnInst>( 1922 A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB, 1923 &I, UsedAssumedInformation, /* UseValueSimplify */ true, 1924 /* MaxValues */ 16, 1925 /* StripCB */ nullptr, /* Intraprocedural */ true); 1926 }; 1927 1928 // Discover returned values from all live returned instructions in the 1929 // associated function. 1930 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret}, 1931 UsedAssumedInformation)) 1932 return indicatePessimisticFixpoint(); 1933 return Changed; 1934 } 1935 1936 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { 1937 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) 1938 : AAReturnedValuesImpl(IRP, A) {} 1939 1940 /// See AbstractAttribute::trackStatistics() 1941 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } 1942 }; 1943 1944 /// Returned values information for a call sites. 1945 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { 1946 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) 1947 : AAReturnedValuesImpl(IRP, A) {} 1948 1949 /// See AbstractAttribute::initialize(...). 1950 void initialize(Attributor &A) override { 1951 // TODO: Once we have call site specific value information we can provide 1952 // call site specific liveness information and then it makes 1953 // sense to specialize attributes for call sites instead of 1954 // redirecting requests to the callee. 1955 llvm_unreachable("Abstract attributes for returned values are not " 1956 "supported for call sites yet!"); 1957 } 1958 1959 /// See AbstractAttribute::updateImpl(...). 1960 ChangeStatus updateImpl(Attributor &A) override { 1961 return indicatePessimisticFixpoint(); 1962 } 1963 1964 /// See AbstractAttribute::trackStatistics() 1965 void trackStatistics() const override {} 1966 }; 1967 } // namespace 1968 1969 /// ------------------------ NoSync Function Attribute ------------------------- 1970 1971 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) { 1972 if (!I->isAtomic()) 1973 return false; 1974 1975 if (auto *FI = dyn_cast<FenceInst>(I)) 1976 // All legal orderings for fence are stronger than monotonic. 1977 return FI->getSyncScopeID() != SyncScope::SingleThread; 1978 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) { 1979 // Unordered is not a legal ordering for cmpxchg. 1980 return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic || 1981 AI->getFailureOrdering() != AtomicOrdering::Monotonic); 1982 } 1983 1984 AtomicOrdering Ordering; 1985 switch (I->getOpcode()) { 1986 case Instruction::AtomicRMW: 1987 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 1988 break; 1989 case Instruction::Store: 1990 Ordering = cast<StoreInst>(I)->getOrdering(); 1991 break; 1992 case Instruction::Load: 1993 Ordering = cast<LoadInst>(I)->getOrdering(); 1994 break; 1995 default: 1996 llvm_unreachable( 1997 "New atomic operations need to be known in the attributor."); 1998 } 1999 2000 return (Ordering != AtomicOrdering::Unordered && 2001 Ordering != AtomicOrdering::Monotonic); 2002 } 2003 2004 /// Return true if this intrinsic is nosync. This is only used for intrinsics 2005 /// which would be nosync except that they have a volatile flag. All other 2006 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td. 2007 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) { 2008 if (auto *MI = dyn_cast<MemIntrinsic>(I)) 2009 return !MI->isVolatile(); 2010 return false; 2011 } 2012 2013 namespace { 2014 struct AANoSyncImpl : AANoSync { 2015 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} 2016 2017 const std::string getAsStr() const override { 2018 return getAssumed() ? "nosync" : "may-sync"; 2019 } 2020 2021 /// See AbstractAttribute::updateImpl(...). 2022 ChangeStatus updateImpl(Attributor &A) override; 2023 }; 2024 2025 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { 2026 2027 auto CheckRWInstForNoSync = [&](Instruction &I) { 2028 return AA::isNoSyncInst(A, I, *this); 2029 }; 2030 2031 auto CheckForNoSync = [&](Instruction &I) { 2032 // At this point we handled all read/write effects and they are all 2033 // nosync, so they can be skipped. 2034 if (I.mayReadOrWriteMemory()) 2035 return true; 2036 2037 // non-convergent and readnone imply nosync. 2038 return !cast<CallBase>(I).isConvergent(); 2039 }; 2040 2041 bool UsedAssumedInformation = false; 2042 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this, 2043 UsedAssumedInformation) || 2044 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this, 2045 UsedAssumedInformation)) 2046 return indicatePessimisticFixpoint(); 2047 2048 return ChangeStatus::UNCHANGED; 2049 } 2050 2051 struct AANoSyncFunction final : public AANoSyncImpl { 2052 AANoSyncFunction(const IRPosition &IRP, Attributor &A) 2053 : AANoSyncImpl(IRP, A) {} 2054 2055 /// See AbstractAttribute::trackStatistics() 2056 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } 2057 }; 2058 2059 /// NoSync attribute deduction for a call sites. 2060 struct AANoSyncCallSite final : AANoSyncImpl { 2061 AANoSyncCallSite(const IRPosition &IRP, Attributor &A) 2062 : AANoSyncImpl(IRP, A) {} 2063 2064 /// See AbstractAttribute::initialize(...). 2065 void initialize(Attributor &A) override { 2066 AANoSyncImpl::initialize(A); 2067 Function *F = getAssociatedFunction(); 2068 if (!F || F->isDeclaration()) 2069 indicatePessimisticFixpoint(); 2070 } 2071 2072 /// See AbstractAttribute::updateImpl(...). 2073 ChangeStatus updateImpl(Attributor &A) override { 2074 // TODO: Once we have call site specific value information we can provide 2075 // call site specific liveness information and then it makes 2076 // sense to specialize attributes for call sites arguments instead of 2077 // redirecting requests to the callee argument. 2078 Function *F = getAssociatedFunction(); 2079 const IRPosition &FnPos = IRPosition::function(*F); 2080 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED); 2081 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2082 } 2083 2084 /// See AbstractAttribute::trackStatistics() 2085 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } 2086 }; 2087 } // namespace 2088 2089 /// ------------------------ No-Free Attributes ---------------------------- 2090 2091 namespace { 2092 struct AANoFreeImpl : public AANoFree { 2093 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} 2094 2095 /// See AbstractAttribute::updateImpl(...). 2096 ChangeStatus updateImpl(Attributor &A) override { 2097 auto CheckForNoFree = [&](Instruction &I) { 2098 const auto &CB = cast<CallBase>(I); 2099 if (CB.hasFnAttr(Attribute::NoFree)) 2100 return true; 2101 2102 const auto &NoFreeAA = A.getAAFor<AANoFree>( 2103 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 2104 return NoFreeAA.isAssumedNoFree(); 2105 }; 2106 2107 bool UsedAssumedInformation = false; 2108 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this, 2109 UsedAssumedInformation)) 2110 return indicatePessimisticFixpoint(); 2111 return ChangeStatus::UNCHANGED; 2112 } 2113 2114 /// See AbstractAttribute::getAsStr(). 2115 const std::string getAsStr() const override { 2116 return getAssumed() ? "nofree" : "may-free"; 2117 } 2118 }; 2119 2120 struct AANoFreeFunction final : public AANoFreeImpl { 2121 AANoFreeFunction(const IRPosition &IRP, Attributor &A) 2122 : AANoFreeImpl(IRP, A) {} 2123 2124 /// See AbstractAttribute::trackStatistics() 2125 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } 2126 }; 2127 2128 /// NoFree attribute deduction for a call sites. 2129 struct AANoFreeCallSite final : AANoFreeImpl { 2130 AANoFreeCallSite(const IRPosition &IRP, Attributor &A) 2131 : AANoFreeImpl(IRP, A) {} 2132 2133 /// See AbstractAttribute::initialize(...). 2134 void initialize(Attributor &A) override { 2135 AANoFreeImpl::initialize(A); 2136 Function *F = getAssociatedFunction(); 2137 if (!F || F->isDeclaration()) 2138 indicatePessimisticFixpoint(); 2139 } 2140 2141 /// See AbstractAttribute::updateImpl(...). 2142 ChangeStatus updateImpl(Attributor &A) override { 2143 // TODO: Once we have call site specific value information we can provide 2144 // call site specific liveness information and then it makes 2145 // sense to specialize attributes for call sites arguments instead of 2146 // redirecting requests to the callee argument. 2147 Function *F = getAssociatedFunction(); 2148 const IRPosition &FnPos = IRPosition::function(*F); 2149 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED); 2150 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2151 } 2152 2153 /// See AbstractAttribute::trackStatistics() 2154 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } 2155 }; 2156 2157 /// NoFree attribute for floating values. 2158 struct AANoFreeFloating : AANoFreeImpl { 2159 AANoFreeFloating(const IRPosition &IRP, Attributor &A) 2160 : AANoFreeImpl(IRP, A) {} 2161 2162 /// See AbstractAttribute::trackStatistics() 2163 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} 2164 2165 /// See Abstract Attribute::updateImpl(...). 2166 ChangeStatus updateImpl(Attributor &A) override { 2167 const IRPosition &IRP = getIRPosition(); 2168 2169 const auto &NoFreeAA = A.getAAFor<AANoFree>( 2170 *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL); 2171 if (NoFreeAA.isAssumedNoFree()) 2172 return ChangeStatus::UNCHANGED; 2173 2174 Value &AssociatedValue = getIRPosition().getAssociatedValue(); 2175 auto Pred = [&](const Use &U, bool &Follow) -> bool { 2176 Instruction *UserI = cast<Instruction>(U.getUser()); 2177 if (auto *CB = dyn_cast<CallBase>(UserI)) { 2178 if (CB->isBundleOperand(&U)) 2179 return false; 2180 if (!CB->isArgOperand(&U)) 2181 return true; 2182 unsigned ArgNo = CB->getArgOperandNo(&U); 2183 2184 const auto &NoFreeArg = A.getAAFor<AANoFree>( 2185 *this, IRPosition::callsite_argument(*CB, ArgNo), 2186 DepClassTy::REQUIRED); 2187 return NoFreeArg.isAssumedNoFree(); 2188 } 2189 2190 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 2191 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 2192 Follow = true; 2193 return true; 2194 } 2195 if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) || 2196 isa<ReturnInst>(UserI)) 2197 return true; 2198 2199 // Unknown user. 2200 return false; 2201 }; 2202 if (!A.checkForAllUses(Pred, *this, AssociatedValue)) 2203 return indicatePessimisticFixpoint(); 2204 2205 return ChangeStatus::UNCHANGED; 2206 } 2207 }; 2208 2209 /// NoFree attribute for a call site argument. 2210 struct AANoFreeArgument final : AANoFreeFloating { 2211 AANoFreeArgument(const IRPosition &IRP, Attributor &A) 2212 : AANoFreeFloating(IRP, A) {} 2213 2214 /// See AbstractAttribute::trackStatistics() 2215 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } 2216 }; 2217 2218 /// NoFree attribute for call site arguments. 2219 struct AANoFreeCallSiteArgument final : AANoFreeFloating { 2220 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) 2221 : AANoFreeFloating(IRP, A) {} 2222 2223 /// See AbstractAttribute::updateImpl(...). 2224 ChangeStatus updateImpl(Attributor &A) override { 2225 // TODO: Once we have call site specific value information we can provide 2226 // call site specific liveness information and then it makes 2227 // sense to specialize attributes for call sites arguments instead of 2228 // redirecting requests to the callee argument. 2229 Argument *Arg = getAssociatedArgument(); 2230 if (!Arg) 2231 return indicatePessimisticFixpoint(); 2232 const IRPosition &ArgPos = IRPosition::argument(*Arg); 2233 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED); 2234 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 2235 } 2236 2237 /// See AbstractAttribute::trackStatistics() 2238 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)}; 2239 }; 2240 2241 /// NoFree attribute for function return value. 2242 struct AANoFreeReturned final : AANoFreeFloating { 2243 AANoFreeReturned(const IRPosition &IRP, Attributor &A) 2244 : AANoFreeFloating(IRP, A) { 2245 llvm_unreachable("NoFree is not applicable to function returns!"); 2246 } 2247 2248 /// See AbstractAttribute::initialize(...). 2249 void initialize(Attributor &A) override { 2250 llvm_unreachable("NoFree is not applicable to function returns!"); 2251 } 2252 2253 /// See AbstractAttribute::updateImpl(...). 2254 ChangeStatus updateImpl(Attributor &A) override { 2255 llvm_unreachable("NoFree is not applicable to function returns!"); 2256 } 2257 2258 /// See AbstractAttribute::trackStatistics() 2259 void trackStatistics() const override {} 2260 }; 2261 2262 /// NoFree attribute deduction for a call site return value. 2263 struct AANoFreeCallSiteReturned final : AANoFreeFloating { 2264 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) 2265 : AANoFreeFloating(IRP, A) {} 2266 2267 ChangeStatus manifest(Attributor &A) override { 2268 return ChangeStatus::UNCHANGED; 2269 } 2270 /// See AbstractAttribute::trackStatistics() 2271 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } 2272 }; 2273 } // namespace 2274 2275 /// ------------------------ NonNull Argument Attribute ------------------------ 2276 namespace { 2277 static int64_t getKnownNonNullAndDerefBytesForUse( 2278 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, 2279 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { 2280 TrackUse = false; 2281 2282 const Value *UseV = U->get(); 2283 if (!UseV->getType()->isPointerTy()) 2284 return 0; 2285 2286 // We need to follow common pointer manipulation uses to the accesses they 2287 // feed into. We can try to be smart to avoid looking through things we do not 2288 // like for now, e.g., non-inbounds GEPs. 2289 if (isa<CastInst>(I)) { 2290 TrackUse = true; 2291 return 0; 2292 } 2293 2294 if (isa<GetElementPtrInst>(I)) { 2295 TrackUse = true; 2296 return 0; 2297 } 2298 2299 Type *PtrTy = UseV->getType(); 2300 const Function *F = I->getFunction(); 2301 bool NullPointerIsDefined = 2302 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; 2303 const DataLayout &DL = A.getInfoCache().getDL(); 2304 if (const auto *CB = dyn_cast<CallBase>(I)) { 2305 if (CB->isBundleOperand(U)) { 2306 if (RetainedKnowledge RK = getKnowledgeFromUse( 2307 U, {Attribute::NonNull, Attribute::Dereferenceable})) { 2308 IsNonNull |= 2309 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); 2310 return RK.ArgValue; 2311 } 2312 return 0; 2313 } 2314 2315 if (CB->isCallee(U)) { 2316 IsNonNull |= !NullPointerIsDefined; 2317 return 0; 2318 } 2319 2320 unsigned ArgNo = CB->getArgOperandNo(U); 2321 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 2322 // As long as we only use known information there is no need to track 2323 // dependences here. 2324 auto &DerefAA = 2325 A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE); 2326 IsNonNull |= DerefAA.isKnownNonNull(); 2327 return DerefAA.getKnownDereferenceableBytes(); 2328 } 2329 2330 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); 2331 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) 2332 return 0; 2333 2334 int64_t Offset; 2335 const Value *Base = 2336 getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL); 2337 if (Base && Base == &AssociatedValue) { 2338 int64_t DerefBytes = Loc->Size.getValue() + Offset; 2339 IsNonNull |= !NullPointerIsDefined; 2340 return std::max(int64_t(0), DerefBytes); 2341 } 2342 2343 /// Corner case when an offset is 0. 2344 Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL, 2345 /*AllowNonInbounds*/ true); 2346 if (Base && Base == &AssociatedValue && Offset == 0) { 2347 int64_t DerefBytes = Loc->Size.getValue(); 2348 IsNonNull |= !NullPointerIsDefined; 2349 return std::max(int64_t(0), DerefBytes); 2350 } 2351 2352 return 0; 2353 } 2354 2355 struct AANonNullImpl : AANonNull { 2356 AANonNullImpl(const IRPosition &IRP, Attributor &A) 2357 : AANonNull(IRP, A), 2358 NullIsDefined(NullPointerIsDefined( 2359 getAnchorScope(), 2360 getAssociatedValue().getType()->getPointerAddressSpace())) {} 2361 2362 /// See AbstractAttribute::initialize(...). 2363 void initialize(Attributor &A) override { 2364 Value &V = getAssociatedValue(); 2365 if (!NullIsDefined && 2366 hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, 2367 /* IgnoreSubsumingPositions */ false, &A)) { 2368 indicateOptimisticFixpoint(); 2369 return; 2370 } 2371 2372 if (isa<ConstantPointerNull>(V)) { 2373 indicatePessimisticFixpoint(); 2374 return; 2375 } 2376 2377 AANonNull::initialize(A); 2378 2379 bool CanBeNull, CanBeFreed; 2380 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull, 2381 CanBeFreed)) { 2382 if (!CanBeNull) { 2383 indicateOptimisticFixpoint(); 2384 return; 2385 } 2386 } 2387 2388 if (isa<GlobalValue>(&getAssociatedValue())) { 2389 indicatePessimisticFixpoint(); 2390 return; 2391 } 2392 2393 if (Instruction *CtxI = getCtxI()) 2394 followUsesInMBEC(*this, A, getState(), *CtxI); 2395 } 2396 2397 /// See followUsesInMBEC 2398 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 2399 AANonNull::StateType &State) { 2400 bool IsNonNull = false; 2401 bool TrackUse = false; 2402 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, 2403 IsNonNull, TrackUse); 2404 State.setKnown(IsNonNull); 2405 return TrackUse; 2406 } 2407 2408 /// See AbstractAttribute::getAsStr(). 2409 const std::string getAsStr() const override { 2410 return getAssumed() ? "nonnull" : "may-null"; 2411 } 2412 2413 /// Flag to determine if the underlying value can be null and still allow 2414 /// valid accesses. 2415 const bool NullIsDefined; 2416 }; 2417 2418 /// NonNull attribute for a floating value. 2419 struct AANonNullFloating : public AANonNullImpl { 2420 AANonNullFloating(const IRPosition &IRP, Attributor &A) 2421 : AANonNullImpl(IRP, A) {} 2422 2423 /// See AbstractAttribute::updateImpl(...). 2424 ChangeStatus updateImpl(Attributor &A) override { 2425 const DataLayout &DL = A.getDataLayout(); 2426 2427 DominatorTree *DT = nullptr; 2428 AssumptionCache *AC = nullptr; 2429 InformationCache &InfoCache = A.getInfoCache(); 2430 if (const Function *Fn = getAnchorScope()) { 2431 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); 2432 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); 2433 } 2434 2435 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 2436 AANonNull::StateType &T, bool Stripped) -> bool { 2437 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V), 2438 DepClassTy::REQUIRED); 2439 if (!Stripped && this == &AA) { 2440 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) 2441 T.indicatePessimisticFixpoint(); 2442 } else { 2443 // Use abstract attribute information. 2444 const AANonNull::StateType &NS = AA.getState(); 2445 T ^= NS; 2446 } 2447 return T.isValidState(); 2448 }; 2449 2450 StateType T; 2451 bool UsedAssumedInformation = false; 2452 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 2453 VisitValueCB, getCtxI(), 2454 UsedAssumedInformation)) 2455 return indicatePessimisticFixpoint(); 2456 2457 return clampStateAndIndicateChange(getState(), T); 2458 } 2459 2460 /// See AbstractAttribute::trackStatistics() 2461 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2462 }; 2463 2464 /// NonNull attribute for function return value. 2465 struct AANonNullReturned final 2466 : AAReturnedFromReturnedValues<AANonNull, AANonNull> { 2467 AANonNullReturned(const IRPosition &IRP, Attributor &A) 2468 : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {} 2469 2470 /// See AbstractAttribute::getAsStr(). 2471 const std::string getAsStr() const override { 2472 return getAssumed() ? "nonnull" : "may-null"; 2473 } 2474 2475 /// See AbstractAttribute::trackStatistics() 2476 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2477 }; 2478 2479 /// NonNull attribute for function argument. 2480 struct AANonNullArgument final 2481 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { 2482 AANonNullArgument(const IRPosition &IRP, Attributor &A) 2483 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} 2484 2485 /// See AbstractAttribute::trackStatistics() 2486 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } 2487 }; 2488 2489 struct AANonNullCallSiteArgument final : AANonNullFloating { 2490 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) 2491 : AANonNullFloating(IRP, A) {} 2492 2493 /// See AbstractAttribute::trackStatistics() 2494 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } 2495 }; 2496 2497 /// NonNull attribute for a call site return position. 2498 struct AANonNullCallSiteReturned final 2499 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { 2500 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) 2501 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} 2502 2503 /// See AbstractAttribute::trackStatistics() 2504 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } 2505 }; 2506 } // namespace 2507 2508 /// ------------------------ No-Recurse Attributes ---------------------------- 2509 2510 namespace { 2511 struct AANoRecurseImpl : public AANoRecurse { 2512 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} 2513 2514 /// See AbstractAttribute::getAsStr() 2515 const std::string getAsStr() const override { 2516 return getAssumed() ? "norecurse" : "may-recurse"; 2517 } 2518 }; 2519 2520 struct AANoRecurseFunction final : AANoRecurseImpl { 2521 AANoRecurseFunction(const IRPosition &IRP, Attributor &A) 2522 : AANoRecurseImpl(IRP, A) {} 2523 2524 /// See AbstractAttribute::updateImpl(...). 2525 ChangeStatus updateImpl(Attributor &A) override { 2526 2527 // If all live call sites are known to be no-recurse, we are as well. 2528 auto CallSitePred = [&](AbstractCallSite ACS) { 2529 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 2530 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 2531 DepClassTy::NONE); 2532 return NoRecurseAA.isKnownNoRecurse(); 2533 }; 2534 bool UsedAssumedInformation = false; 2535 if (A.checkForAllCallSites(CallSitePred, *this, true, 2536 UsedAssumedInformation)) { 2537 // If we know all call sites and all are known no-recurse, we are done. 2538 // If all known call sites, which might not be all that exist, are known 2539 // to be no-recurse, we are not done but we can continue to assume 2540 // no-recurse. If one of the call sites we have not visited will become 2541 // live, another update is triggered. 2542 if (!UsedAssumedInformation) 2543 indicateOptimisticFixpoint(); 2544 return ChangeStatus::UNCHANGED; 2545 } 2546 2547 const AAFunctionReachability &EdgeReachability = 2548 A.getAAFor<AAFunctionReachability>(*this, getIRPosition(), 2549 DepClassTy::REQUIRED); 2550 if (EdgeReachability.canReach(A, *getAnchorScope())) 2551 return indicatePessimisticFixpoint(); 2552 return ChangeStatus::UNCHANGED; 2553 } 2554 2555 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } 2556 }; 2557 2558 /// NoRecurse attribute deduction for a call sites. 2559 struct AANoRecurseCallSite final : AANoRecurseImpl { 2560 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) 2561 : AANoRecurseImpl(IRP, A) {} 2562 2563 /// See AbstractAttribute::initialize(...). 2564 void initialize(Attributor &A) override { 2565 AANoRecurseImpl::initialize(A); 2566 Function *F = getAssociatedFunction(); 2567 if (!F || F->isDeclaration()) 2568 indicatePessimisticFixpoint(); 2569 } 2570 2571 /// See AbstractAttribute::updateImpl(...). 2572 ChangeStatus updateImpl(Attributor &A) override { 2573 // TODO: Once we have call site specific value information we can provide 2574 // call site specific liveness information and then it makes 2575 // sense to specialize attributes for call sites arguments instead of 2576 // redirecting requests to the callee argument. 2577 Function *F = getAssociatedFunction(); 2578 const IRPosition &FnPos = IRPosition::function(*F); 2579 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED); 2580 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2581 } 2582 2583 /// See AbstractAttribute::trackStatistics() 2584 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } 2585 }; 2586 } // namespace 2587 2588 /// -------------------- Undefined-Behavior Attributes ------------------------ 2589 2590 namespace { 2591 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { 2592 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) 2593 : AAUndefinedBehavior(IRP, A) {} 2594 2595 /// See AbstractAttribute::updateImpl(...). 2596 // through a pointer (i.e. also branches etc.) 2597 ChangeStatus updateImpl(Attributor &A) override { 2598 const size_t UBPrevSize = KnownUBInsts.size(); 2599 const size_t NoUBPrevSize = AssumedNoUBInsts.size(); 2600 2601 auto InspectMemAccessInstForUB = [&](Instruction &I) { 2602 // Lang ref now states volatile store is not UB, let's skip them. 2603 if (I.isVolatile() && I.mayWriteToMemory()) 2604 return true; 2605 2606 // Skip instructions that are already saved. 2607 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2608 return true; 2609 2610 // If we reach here, we know we have an instruction 2611 // that accesses memory through a pointer operand, 2612 // for which getPointerOperand() should give it to us. 2613 Value *PtrOp = 2614 const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true)); 2615 assert(PtrOp && 2616 "Expected pointer operand of memory accessing instruction"); 2617 2618 // Either we stopped and the appropriate action was taken, 2619 // or we got back a simplified value to continue. 2620 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); 2621 if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue()) 2622 return true; 2623 const Value *PtrOpVal = SimplifiedPtrOp.getValue(); 2624 2625 // A memory access through a pointer is considered UB 2626 // only if the pointer has constant null value. 2627 // TODO: Expand it to not only check constant values. 2628 if (!isa<ConstantPointerNull>(PtrOpVal)) { 2629 AssumedNoUBInsts.insert(&I); 2630 return true; 2631 } 2632 const Type *PtrTy = PtrOpVal->getType(); 2633 2634 // Because we only consider instructions inside functions, 2635 // assume that a parent function exists. 2636 const Function *F = I.getFunction(); 2637 2638 // A memory access using constant null pointer is only considered UB 2639 // if null pointer is _not_ defined for the target platform. 2640 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) 2641 AssumedNoUBInsts.insert(&I); 2642 else 2643 KnownUBInsts.insert(&I); 2644 return true; 2645 }; 2646 2647 auto InspectBrInstForUB = [&](Instruction &I) { 2648 // A conditional branch instruction is considered UB if it has `undef` 2649 // condition. 2650 2651 // Skip instructions that are already saved. 2652 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2653 return true; 2654 2655 // We know we have a branch instruction. 2656 auto *BrInst = cast<BranchInst>(&I); 2657 2658 // Unconditional branches are never considered UB. 2659 if (BrInst->isUnconditional()) 2660 return true; 2661 2662 // Either we stopped and the appropriate action was taken, 2663 // or we got back a simplified value to continue. 2664 Optional<Value *> SimplifiedCond = 2665 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); 2666 if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue()) 2667 return true; 2668 AssumedNoUBInsts.insert(&I); 2669 return true; 2670 }; 2671 2672 auto InspectCallSiteForUB = [&](Instruction &I) { 2673 // Check whether a callsite always cause UB or not 2674 2675 // Skip instructions that are already saved. 2676 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2677 return true; 2678 2679 // Check nonnull and noundef argument attribute violation for each 2680 // callsite. 2681 CallBase &CB = cast<CallBase>(I); 2682 Function *Callee = CB.getCalledFunction(); 2683 if (!Callee) 2684 return true; 2685 for (unsigned idx = 0; idx < CB.arg_size(); idx++) { 2686 // If current argument is known to be simplified to null pointer and the 2687 // corresponding argument position is known to have nonnull attribute, 2688 // the argument is poison. Furthermore, if the argument is poison and 2689 // the position is known to have noundef attriubte, this callsite is 2690 // considered UB. 2691 if (idx >= Callee->arg_size()) 2692 break; 2693 Value *ArgVal = CB.getArgOperand(idx); 2694 if (!ArgVal) 2695 continue; 2696 // Here, we handle three cases. 2697 // (1) Not having a value means it is dead. (we can replace the value 2698 // with undef) 2699 // (2) Simplified to undef. The argument violate noundef attriubte. 2700 // (3) Simplified to null pointer where known to be nonnull. 2701 // The argument is a poison value and violate noundef attribute. 2702 IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx); 2703 auto &NoUndefAA = 2704 A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2705 if (!NoUndefAA.isKnownNoUndef()) 2706 continue; 2707 bool UsedAssumedInformation = false; 2708 Optional<Value *> SimplifiedVal = A.getAssumedSimplified( 2709 IRPosition::value(*ArgVal), *this, UsedAssumedInformation); 2710 if (UsedAssumedInformation) 2711 continue; 2712 if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue()) 2713 return true; 2714 if (!SimplifiedVal.hasValue() || 2715 isa<UndefValue>(*SimplifiedVal.getValue())) { 2716 KnownUBInsts.insert(&I); 2717 continue; 2718 } 2719 if (!ArgVal->getType()->isPointerTy() || 2720 !isa<ConstantPointerNull>(*SimplifiedVal.getValue())) 2721 continue; 2722 auto &NonNullAA = 2723 A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2724 if (NonNullAA.isKnownNonNull()) 2725 KnownUBInsts.insert(&I); 2726 } 2727 return true; 2728 }; 2729 2730 auto InspectReturnInstForUB = [&](Instruction &I) { 2731 auto &RI = cast<ReturnInst>(I); 2732 // Either we stopped and the appropriate action was taken, 2733 // or we got back a simplified return value to continue. 2734 Optional<Value *> SimplifiedRetValue = 2735 stopOnUndefOrAssumed(A, RI.getReturnValue(), &I); 2736 if (!SimplifiedRetValue.hasValue() || !SimplifiedRetValue.getValue()) 2737 return true; 2738 2739 // Check if a return instruction always cause UB or not 2740 // Note: It is guaranteed that the returned position of the anchor 2741 // scope has noundef attribute when this is called. 2742 // We also ensure the return position is not "assumed dead" 2743 // because the returned value was then potentially simplified to 2744 // `undef` in AAReturnedValues without removing the `noundef` 2745 // attribute yet. 2746 2747 // When the returned position has noundef attriubte, UB occurs in the 2748 // following cases. 2749 // (1) Returned value is known to be undef. 2750 // (2) The value is known to be a null pointer and the returned 2751 // position has nonnull attribute (because the returned value is 2752 // poison). 2753 if (isa<ConstantPointerNull>(*SimplifiedRetValue)) { 2754 auto &NonNullAA = A.getAAFor<AANonNull>( 2755 *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE); 2756 if (NonNullAA.isKnownNonNull()) 2757 KnownUBInsts.insert(&I); 2758 } 2759 2760 return true; 2761 }; 2762 2763 bool UsedAssumedInformation = false; 2764 A.checkForAllInstructions(InspectMemAccessInstForUB, *this, 2765 {Instruction::Load, Instruction::Store, 2766 Instruction::AtomicCmpXchg, 2767 Instruction::AtomicRMW}, 2768 UsedAssumedInformation, 2769 /* CheckBBLivenessOnly */ true); 2770 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, 2771 UsedAssumedInformation, 2772 /* CheckBBLivenessOnly */ true); 2773 A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this, 2774 UsedAssumedInformation); 2775 2776 // If the returned position of the anchor scope has noundef attriubte, check 2777 // all returned instructions. 2778 if (!getAnchorScope()->getReturnType()->isVoidTy()) { 2779 const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope()); 2780 if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) { 2781 auto &RetPosNoUndefAA = 2782 A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE); 2783 if (RetPosNoUndefAA.isKnownNoUndef()) 2784 A.checkForAllInstructions(InspectReturnInstForUB, *this, 2785 {Instruction::Ret}, UsedAssumedInformation, 2786 /* CheckBBLivenessOnly */ true); 2787 } 2788 } 2789 2790 if (NoUBPrevSize != AssumedNoUBInsts.size() || 2791 UBPrevSize != KnownUBInsts.size()) 2792 return ChangeStatus::CHANGED; 2793 return ChangeStatus::UNCHANGED; 2794 } 2795 2796 bool isKnownToCauseUB(Instruction *I) const override { 2797 return KnownUBInsts.count(I); 2798 } 2799 2800 bool isAssumedToCauseUB(Instruction *I) const override { 2801 // In simple words, if an instruction is not in the assumed to _not_ 2802 // cause UB, then it is assumed UB (that includes those 2803 // in the KnownUBInsts set). The rest is boilerplate 2804 // is to ensure that it is one of the instructions we test 2805 // for UB. 2806 2807 switch (I->getOpcode()) { 2808 case Instruction::Load: 2809 case Instruction::Store: 2810 case Instruction::AtomicCmpXchg: 2811 case Instruction::AtomicRMW: 2812 return !AssumedNoUBInsts.count(I); 2813 case Instruction::Br: { 2814 auto *BrInst = cast<BranchInst>(I); 2815 if (BrInst->isUnconditional()) 2816 return false; 2817 return !AssumedNoUBInsts.count(I); 2818 } break; 2819 default: 2820 return false; 2821 } 2822 return false; 2823 } 2824 2825 ChangeStatus manifest(Attributor &A) override { 2826 if (KnownUBInsts.empty()) 2827 return ChangeStatus::UNCHANGED; 2828 for (Instruction *I : KnownUBInsts) 2829 A.changeToUnreachableAfterManifest(I); 2830 return ChangeStatus::CHANGED; 2831 } 2832 2833 /// See AbstractAttribute::getAsStr() 2834 const std::string getAsStr() const override { 2835 return getAssumed() ? "undefined-behavior" : "no-ub"; 2836 } 2837 2838 /// Note: The correctness of this analysis depends on the fact that the 2839 /// following 2 sets will stop changing after some point. 2840 /// "Change" here means that their size changes. 2841 /// The size of each set is monotonically increasing 2842 /// (we only add items to them) and it is upper bounded by the number of 2843 /// instructions in the processed function (we can never save more 2844 /// elements in either set than this number). Hence, at some point, 2845 /// they will stop increasing. 2846 /// Consequently, at some point, both sets will have stopped 2847 /// changing, effectively making the analysis reach a fixpoint. 2848 2849 /// Note: These 2 sets are disjoint and an instruction can be considered 2850 /// one of 3 things: 2851 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in 2852 /// the KnownUBInsts set. 2853 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior 2854 /// has a reason to assume it). 2855 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior 2856 /// could not find a reason to assume or prove that it can cause UB, 2857 /// hence it assumes it doesn't. We have a set for these instructions 2858 /// so that we don't reprocess them in every update. 2859 /// Note however that instructions in this set may cause UB. 2860 2861 protected: 2862 /// A set of all live instructions _known_ to cause UB. 2863 SmallPtrSet<Instruction *, 8> KnownUBInsts; 2864 2865 private: 2866 /// A set of all the (live) instructions that are assumed to _not_ cause UB. 2867 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; 2868 2869 // Should be called on updates in which if we're processing an instruction 2870 // \p I that depends on a value \p V, one of the following has to happen: 2871 // - If the value is assumed, then stop. 2872 // - If the value is known but undef, then consider it UB. 2873 // - Otherwise, do specific processing with the simplified value. 2874 // We return None in the first 2 cases to signify that an appropriate 2875 // action was taken and the caller should stop. 2876 // Otherwise, we return the simplified value that the caller should 2877 // use for specific processing. 2878 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V, 2879 Instruction *I) { 2880 bool UsedAssumedInformation = false; 2881 Optional<Value *> SimplifiedV = A.getAssumedSimplified( 2882 IRPosition::value(*V), *this, UsedAssumedInformation); 2883 if (!UsedAssumedInformation) { 2884 // Don't depend on assumed values. 2885 if (!SimplifiedV.hasValue()) { 2886 // If it is known (which we tested above) but it doesn't have a value, 2887 // then we can assume `undef` and hence the instruction is UB. 2888 KnownUBInsts.insert(I); 2889 return llvm::None; 2890 } 2891 if (!SimplifiedV.getValue()) 2892 return nullptr; 2893 V = *SimplifiedV; 2894 } 2895 if (isa<UndefValue>(V)) { 2896 KnownUBInsts.insert(I); 2897 return llvm::None; 2898 } 2899 return V; 2900 } 2901 }; 2902 2903 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { 2904 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) 2905 : AAUndefinedBehaviorImpl(IRP, A) {} 2906 2907 /// See AbstractAttribute::trackStatistics() 2908 void trackStatistics() const override { 2909 STATS_DECL(UndefinedBehaviorInstruction, Instruction, 2910 "Number of instructions known to have UB"); 2911 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += 2912 KnownUBInsts.size(); 2913 } 2914 }; 2915 } // namespace 2916 2917 /// ------------------------ Will-Return Attributes ---------------------------- 2918 2919 namespace { 2920 // Helper function that checks whether a function has any cycle which we don't 2921 // know if it is bounded or not. 2922 // Loops with maximum trip count are considered bounded, any other cycle not. 2923 static bool mayContainUnboundedCycle(Function &F, Attributor &A) { 2924 ScalarEvolution *SE = 2925 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); 2926 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); 2927 // If either SCEV or LoopInfo is not available for the function then we assume 2928 // any cycle to be unbounded cycle. 2929 // We use scc_iterator which uses Tarjan algorithm to find all the maximal 2930 // SCCs.To detect if there's a cycle, we only need to find the maximal ones. 2931 if (!SE || !LI) { 2932 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) 2933 if (SCCI.hasCycle()) 2934 return true; 2935 return false; 2936 } 2937 2938 // If there's irreducible control, the function may contain non-loop cycles. 2939 if (mayContainIrreducibleControl(F, LI)) 2940 return true; 2941 2942 // Any loop that does not have a max trip count is considered unbounded cycle. 2943 for (auto *L : LI->getLoopsInPreorder()) { 2944 if (!SE->getSmallConstantMaxTripCount(L)) 2945 return true; 2946 } 2947 return false; 2948 } 2949 2950 struct AAWillReturnImpl : public AAWillReturn { 2951 AAWillReturnImpl(const IRPosition &IRP, Attributor &A) 2952 : AAWillReturn(IRP, A) {} 2953 2954 /// See AbstractAttribute::initialize(...). 2955 void initialize(Attributor &A) override { 2956 AAWillReturn::initialize(A); 2957 2958 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) { 2959 indicateOptimisticFixpoint(); 2960 return; 2961 } 2962 } 2963 2964 /// Check for `mustprogress` and `readonly` as they imply `willreturn`. 2965 bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) { 2966 // Check for `mustprogress` in the scope and the associated function which 2967 // might be different if this is a call site. 2968 if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) && 2969 (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress())) 2970 return false; 2971 2972 bool IsKnown; 2973 if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 2974 return IsKnown || !KnownOnly; 2975 return false; 2976 } 2977 2978 /// See AbstractAttribute::updateImpl(...). 2979 ChangeStatus updateImpl(Attributor &A) override { 2980 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 2981 return ChangeStatus::UNCHANGED; 2982 2983 auto CheckForWillReturn = [&](Instruction &I) { 2984 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); 2985 const auto &WillReturnAA = 2986 A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED); 2987 if (WillReturnAA.isKnownWillReturn()) 2988 return true; 2989 if (!WillReturnAA.isAssumedWillReturn()) 2990 return false; 2991 const auto &NoRecurseAA = 2992 A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED); 2993 return NoRecurseAA.isAssumedNoRecurse(); 2994 }; 2995 2996 bool UsedAssumedInformation = false; 2997 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this, 2998 UsedAssumedInformation)) 2999 return indicatePessimisticFixpoint(); 3000 3001 return ChangeStatus::UNCHANGED; 3002 } 3003 3004 /// See AbstractAttribute::getAsStr() 3005 const std::string getAsStr() const override { 3006 return getAssumed() ? "willreturn" : "may-noreturn"; 3007 } 3008 }; 3009 3010 struct AAWillReturnFunction final : AAWillReturnImpl { 3011 AAWillReturnFunction(const IRPosition &IRP, Attributor &A) 3012 : AAWillReturnImpl(IRP, A) {} 3013 3014 /// See AbstractAttribute::initialize(...). 3015 void initialize(Attributor &A) override { 3016 AAWillReturnImpl::initialize(A); 3017 3018 Function *F = getAnchorScope(); 3019 if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A)) 3020 indicatePessimisticFixpoint(); 3021 } 3022 3023 /// See AbstractAttribute::trackStatistics() 3024 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } 3025 }; 3026 3027 /// WillReturn attribute deduction for a call sites. 3028 struct AAWillReturnCallSite final : AAWillReturnImpl { 3029 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) 3030 : AAWillReturnImpl(IRP, A) {} 3031 3032 /// See AbstractAttribute::initialize(...). 3033 void initialize(Attributor &A) override { 3034 AAWillReturnImpl::initialize(A); 3035 Function *F = getAssociatedFunction(); 3036 if (!F || !A.isFunctionIPOAmendable(*F)) 3037 indicatePessimisticFixpoint(); 3038 } 3039 3040 /// See AbstractAttribute::updateImpl(...). 3041 ChangeStatus updateImpl(Attributor &A) override { 3042 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 3043 return ChangeStatus::UNCHANGED; 3044 3045 // TODO: Once we have call site specific value information we can provide 3046 // call site specific liveness information and then it makes 3047 // sense to specialize attributes for call sites arguments instead of 3048 // redirecting requests to the callee argument. 3049 Function *F = getAssociatedFunction(); 3050 const IRPosition &FnPos = IRPosition::function(*F); 3051 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED); 3052 return clampStateAndIndicateChange(getState(), FnAA.getState()); 3053 } 3054 3055 /// See AbstractAttribute::trackStatistics() 3056 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } 3057 }; 3058 } // namespace 3059 3060 /// -------------------AAReachability Attribute-------------------------- 3061 3062 namespace { 3063 struct AAReachabilityImpl : AAReachability { 3064 AAReachabilityImpl(const IRPosition &IRP, Attributor &A) 3065 : AAReachability(IRP, A) {} 3066 3067 const std::string getAsStr() const override { 3068 // TODO: Return the number of reachable queries. 3069 return "reachable"; 3070 } 3071 3072 /// See AbstractAttribute::updateImpl(...). 3073 ChangeStatus updateImpl(Attributor &A) override { 3074 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 3075 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 3076 if (!NoRecurseAA.isAssumedNoRecurse()) 3077 return indicatePessimisticFixpoint(); 3078 return ChangeStatus::UNCHANGED; 3079 } 3080 }; 3081 3082 struct AAReachabilityFunction final : public AAReachabilityImpl { 3083 AAReachabilityFunction(const IRPosition &IRP, Attributor &A) 3084 : AAReachabilityImpl(IRP, A) {} 3085 3086 /// See AbstractAttribute::trackStatistics() 3087 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); } 3088 }; 3089 } // namespace 3090 3091 /// ------------------------ NoAlias Argument Attribute ------------------------ 3092 3093 namespace { 3094 struct AANoAliasImpl : AANoAlias { 3095 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { 3096 assert(getAssociatedType()->isPointerTy() && 3097 "Noalias is a pointer attribute"); 3098 } 3099 3100 const std::string getAsStr() const override { 3101 return getAssumed() ? "noalias" : "may-alias"; 3102 } 3103 }; 3104 3105 /// NoAlias attribute for a floating value. 3106 struct AANoAliasFloating final : AANoAliasImpl { 3107 AANoAliasFloating(const IRPosition &IRP, Attributor &A) 3108 : AANoAliasImpl(IRP, A) {} 3109 3110 /// See AbstractAttribute::initialize(...). 3111 void initialize(Attributor &A) override { 3112 AANoAliasImpl::initialize(A); 3113 Value *Val = &getAssociatedValue(); 3114 do { 3115 CastInst *CI = dyn_cast<CastInst>(Val); 3116 if (!CI) 3117 break; 3118 Value *Base = CI->getOperand(0); 3119 if (!Base->hasOneUse()) 3120 break; 3121 Val = Base; 3122 } while (true); 3123 3124 if (!Val->getType()->isPointerTy()) { 3125 indicatePessimisticFixpoint(); 3126 return; 3127 } 3128 3129 if (isa<AllocaInst>(Val)) 3130 indicateOptimisticFixpoint(); 3131 else if (isa<ConstantPointerNull>(Val) && 3132 !NullPointerIsDefined(getAnchorScope(), 3133 Val->getType()->getPointerAddressSpace())) 3134 indicateOptimisticFixpoint(); 3135 else if (Val != &getAssociatedValue()) { 3136 const auto &ValNoAliasAA = A.getAAFor<AANoAlias>( 3137 *this, IRPosition::value(*Val), DepClassTy::OPTIONAL); 3138 if (ValNoAliasAA.isKnownNoAlias()) 3139 indicateOptimisticFixpoint(); 3140 } 3141 } 3142 3143 /// See AbstractAttribute::updateImpl(...). 3144 ChangeStatus updateImpl(Attributor &A) override { 3145 // TODO: Implement this. 3146 return indicatePessimisticFixpoint(); 3147 } 3148 3149 /// See AbstractAttribute::trackStatistics() 3150 void trackStatistics() const override { 3151 STATS_DECLTRACK_FLOATING_ATTR(noalias) 3152 } 3153 }; 3154 3155 /// NoAlias attribute for an argument. 3156 struct AANoAliasArgument final 3157 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { 3158 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; 3159 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 3160 3161 /// See AbstractAttribute::initialize(...). 3162 void initialize(Attributor &A) override { 3163 Base::initialize(A); 3164 // See callsite argument attribute and callee argument attribute. 3165 if (hasAttr({Attribute::ByVal})) 3166 indicateOptimisticFixpoint(); 3167 } 3168 3169 /// See AbstractAttribute::update(...). 3170 ChangeStatus updateImpl(Attributor &A) override { 3171 // We have to make sure no-alias on the argument does not break 3172 // synchronization when this is a callback argument, see also [1] below. 3173 // If synchronization cannot be affected, we delegate to the base updateImpl 3174 // function, otherwise we give up for now. 3175 3176 // If the function is no-sync, no-alias cannot break synchronization. 3177 const auto &NoSyncAA = 3178 A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()), 3179 DepClassTy::OPTIONAL); 3180 if (NoSyncAA.isAssumedNoSync()) 3181 return Base::updateImpl(A); 3182 3183 // If the argument is read-only, no-alias cannot break synchronization. 3184 bool IsKnown; 3185 if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 3186 return Base::updateImpl(A); 3187 3188 // If the argument is never passed through callbacks, no-alias cannot break 3189 // synchronization. 3190 bool UsedAssumedInformation = false; 3191 if (A.checkForAllCallSites( 3192 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, 3193 true, UsedAssumedInformation)) 3194 return Base::updateImpl(A); 3195 3196 // TODO: add no-alias but make sure it doesn't break synchronization by 3197 // introducing fake uses. See: 3198 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, 3199 // International Workshop on OpenMP 2018, 3200 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf 3201 3202 return indicatePessimisticFixpoint(); 3203 } 3204 3205 /// See AbstractAttribute::trackStatistics() 3206 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } 3207 }; 3208 3209 struct AANoAliasCallSiteArgument final : AANoAliasImpl { 3210 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) 3211 : AANoAliasImpl(IRP, A) {} 3212 3213 /// See AbstractAttribute::initialize(...). 3214 void initialize(Attributor &A) override { 3215 // See callsite argument attribute and callee argument attribute. 3216 const auto &CB = cast<CallBase>(getAnchorValue()); 3217 if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias)) 3218 indicateOptimisticFixpoint(); 3219 Value &Val = getAssociatedValue(); 3220 if (isa<ConstantPointerNull>(Val) && 3221 !NullPointerIsDefined(getAnchorScope(), 3222 Val.getType()->getPointerAddressSpace())) 3223 indicateOptimisticFixpoint(); 3224 } 3225 3226 /// Determine if the underlying value may alias with the call site argument 3227 /// \p OtherArgNo of \p ICS (= the underlying call site). 3228 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, 3229 const AAMemoryBehavior &MemBehaviorAA, 3230 const CallBase &CB, unsigned OtherArgNo) { 3231 // We do not need to worry about aliasing with the underlying IRP. 3232 if (this->getCalleeArgNo() == (int)OtherArgNo) 3233 return false; 3234 3235 // If it is not a pointer or pointer vector we do not alias. 3236 const Value *ArgOp = CB.getArgOperand(OtherArgNo); 3237 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 3238 return false; 3239 3240 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 3241 *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE); 3242 3243 // If the argument is readnone, there is no read-write aliasing. 3244 if (CBArgMemBehaviorAA.isAssumedReadNone()) { 3245 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3246 return false; 3247 } 3248 3249 // If the argument is readonly and the underlying value is readonly, there 3250 // is no read-write aliasing. 3251 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); 3252 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { 3253 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3254 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3255 return false; 3256 } 3257 3258 // We have to utilize actual alias analysis queries so we need the object. 3259 if (!AAR) 3260 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); 3261 3262 // Try to rule it out at the call site. 3263 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); 3264 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " 3265 "callsite arguments: " 3266 << getAssociatedValue() << " " << *ArgOp << " => " 3267 << (IsAliasing ? "" : "no-") << "alias \n"); 3268 3269 return IsAliasing; 3270 } 3271 3272 bool 3273 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, 3274 const AAMemoryBehavior &MemBehaviorAA, 3275 const AANoAlias &NoAliasAA) { 3276 // We can deduce "noalias" if the following conditions hold. 3277 // (i) Associated value is assumed to be noalias in the definition. 3278 // (ii) Associated value is assumed to be no-capture in all the uses 3279 // possibly executed before this callsite. 3280 // (iii) There is no other pointer argument which could alias with the 3281 // value. 3282 3283 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); 3284 if (!AssociatedValueIsNoAliasAtDef) { 3285 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() 3286 << " is not no-alias at the definition\n"); 3287 return false; 3288 } 3289 3290 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); 3291 3292 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3293 const Function *ScopeFn = VIRP.getAnchorScope(); 3294 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE); 3295 // Check whether the value is captured in the scope using AANoCapture. 3296 // Look at CFG and check only uses possibly executed before this 3297 // callsite. 3298 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 3299 Instruction *UserI = cast<Instruction>(U.getUser()); 3300 3301 // If UserI is the curr instruction and there is a single potential use of 3302 // the value in UserI we allow the use. 3303 // TODO: We should inspect the operands and allow those that cannot alias 3304 // with the value. 3305 if (UserI == getCtxI() && UserI->getNumOperands() == 1) 3306 return true; 3307 3308 if (ScopeFn) { 3309 const auto &ReachabilityAA = A.getAAFor<AAReachability>( 3310 *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL); 3311 3312 if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI())) 3313 return true; 3314 3315 if (auto *CB = dyn_cast<CallBase>(UserI)) { 3316 if (CB->isArgOperand(&U)) { 3317 3318 unsigned ArgNo = CB->getArgOperandNo(&U); 3319 3320 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 3321 *this, IRPosition::callsite_argument(*CB, ArgNo), 3322 DepClassTy::OPTIONAL); 3323 3324 if (NoCaptureAA.isAssumedNoCapture()) 3325 return true; 3326 } 3327 } 3328 } 3329 3330 // For cases which can potentially have more users 3331 if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) || 3332 isa<SelectInst>(U)) { 3333 Follow = true; 3334 return true; 3335 } 3336 3337 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n"); 3338 return false; 3339 }; 3340 3341 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 3342 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { 3343 LLVM_DEBUG( 3344 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() 3345 << " cannot be noalias as it is potentially captured\n"); 3346 return false; 3347 } 3348 } 3349 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); 3350 3351 // Check there is no other pointer argument which could alias with the 3352 // value passed at this call site. 3353 // TODO: AbstractCallSite 3354 const auto &CB = cast<CallBase>(getAnchorValue()); 3355 for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++) 3356 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) 3357 return false; 3358 3359 return true; 3360 } 3361 3362 /// See AbstractAttribute::updateImpl(...). 3363 ChangeStatus updateImpl(Attributor &A) override { 3364 // If the argument is readnone we are done as there are no accesses via the 3365 // argument. 3366 auto &MemBehaviorAA = 3367 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 3368 if (MemBehaviorAA.isAssumedReadNone()) { 3369 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3370 return ChangeStatus::UNCHANGED; 3371 } 3372 3373 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3374 const auto &NoAliasAA = 3375 A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE); 3376 3377 AAResults *AAR = nullptr; 3378 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, 3379 NoAliasAA)) { 3380 LLVM_DEBUG( 3381 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"); 3382 return ChangeStatus::UNCHANGED; 3383 } 3384 3385 return indicatePessimisticFixpoint(); 3386 } 3387 3388 /// See AbstractAttribute::trackStatistics() 3389 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } 3390 }; 3391 3392 /// NoAlias attribute for function return value. 3393 struct AANoAliasReturned final : AANoAliasImpl { 3394 AANoAliasReturned(const IRPosition &IRP, Attributor &A) 3395 : AANoAliasImpl(IRP, A) {} 3396 3397 /// See AbstractAttribute::initialize(...). 3398 void initialize(Attributor &A) override { 3399 AANoAliasImpl::initialize(A); 3400 Function *F = getAssociatedFunction(); 3401 if (!F || F->isDeclaration()) 3402 indicatePessimisticFixpoint(); 3403 } 3404 3405 /// See AbstractAttribute::updateImpl(...). 3406 virtual ChangeStatus updateImpl(Attributor &A) override { 3407 3408 auto CheckReturnValue = [&](Value &RV) -> bool { 3409 if (Constant *C = dyn_cast<Constant>(&RV)) 3410 if (C->isNullValue() || isa<UndefValue>(C)) 3411 return true; 3412 3413 /// For now, we can only deduce noalias if we have call sites. 3414 /// FIXME: add more support. 3415 if (!isa<CallBase>(&RV)) 3416 return false; 3417 3418 const IRPosition &RVPos = IRPosition::value(RV); 3419 const auto &NoAliasAA = 3420 A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED); 3421 if (!NoAliasAA.isAssumedNoAlias()) 3422 return false; 3423 3424 const auto &NoCaptureAA = 3425 A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED); 3426 return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); 3427 }; 3428 3429 if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) 3430 return indicatePessimisticFixpoint(); 3431 3432 return ChangeStatus::UNCHANGED; 3433 } 3434 3435 /// See AbstractAttribute::trackStatistics() 3436 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } 3437 }; 3438 3439 /// NoAlias attribute deduction for a call site return value. 3440 struct AANoAliasCallSiteReturned final : AANoAliasImpl { 3441 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) 3442 : AANoAliasImpl(IRP, A) {} 3443 3444 /// See AbstractAttribute::initialize(...). 3445 void initialize(Attributor &A) override { 3446 AANoAliasImpl::initialize(A); 3447 Function *F = getAssociatedFunction(); 3448 if (!F || F->isDeclaration()) 3449 indicatePessimisticFixpoint(); 3450 } 3451 3452 /// See AbstractAttribute::updateImpl(...). 3453 ChangeStatus updateImpl(Attributor &A) override { 3454 // TODO: Once we have call site specific value information we can provide 3455 // call site specific liveness information and then it makes 3456 // sense to specialize attributes for call sites arguments instead of 3457 // redirecting requests to the callee argument. 3458 Function *F = getAssociatedFunction(); 3459 const IRPosition &FnPos = IRPosition::returned(*F); 3460 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED); 3461 return clampStateAndIndicateChange(getState(), FnAA.getState()); 3462 } 3463 3464 /// See AbstractAttribute::trackStatistics() 3465 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } 3466 }; 3467 } // namespace 3468 3469 /// -------------------AAIsDead Function Attribute----------------------- 3470 3471 namespace { 3472 struct AAIsDeadValueImpl : public AAIsDead { 3473 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3474 3475 /// See AbstractAttribute::initialize(...). 3476 void initialize(Attributor &A) override { 3477 if (auto *Scope = getAnchorScope()) 3478 if (!A.isRunOn(*Scope)) 3479 indicatePessimisticFixpoint(); 3480 } 3481 3482 /// See AAIsDead::isAssumedDead(). 3483 bool isAssumedDead() const override { return isAssumed(IS_DEAD); } 3484 3485 /// See AAIsDead::isKnownDead(). 3486 bool isKnownDead() const override { return isKnown(IS_DEAD); } 3487 3488 /// See AAIsDead::isAssumedDead(BasicBlock *). 3489 bool isAssumedDead(const BasicBlock *BB) const override { return false; } 3490 3491 /// See AAIsDead::isKnownDead(BasicBlock *). 3492 bool isKnownDead(const BasicBlock *BB) const override { return false; } 3493 3494 /// See AAIsDead::isAssumedDead(Instruction *I). 3495 bool isAssumedDead(const Instruction *I) const override { 3496 return I == getCtxI() && isAssumedDead(); 3497 } 3498 3499 /// See AAIsDead::isKnownDead(Instruction *I). 3500 bool isKnownDead(const Instruction *I) const override { 3501 return isAssumedDead(I) && isKnownDead(); 3502 } 3503 3504 /// See AbstractAttribute::getAsStr(). 3505 virtual const std::string getAsStr() const override { 3506 return isAssumedDead() ? "assumed-dead" : "assumed-live"; 3507 } 3508 3509 /// Check if all uses are assumed dead. 3510 bool areAllUsesAssumedDead(Attributor &A, Value &V) { 3511 // Callers might not check the type, void has no uses. 3512 if (V.getType()->isVoidTy() || V.use_empty()) 3513 return true; 3514 3515 // If we replace a value with a constant there are no uses left afterwards. 3516 if (!isa<Constant>(V)) { 3517 if (auto *I = dyn_cast<Instruction>(&V)) 3518 if (!A.isRunOn(*I->getFunction())) 3519 return false; 3520 bool UsedAssumedInformation = false; 3521 Optional<Constant *> C = 3522 A.getAssumedConstant(V, *this, UsedAssumedInformation); 3523 if (!C.hasValue() || *C) 3524 return true; 3525 } 3526 3527 auto UsePred = [&](const Use &U, bool &Follow) { return false; }; 3528 // Explicitly set the dependence class to required because we want a long 3529 // chain of N dependent instructions to be considered live as soon as one is 3530 // without going through N update cycles. This is not required for 3531 // correctness. 3532 return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false, 3533 DepClassTy::REQUIRED); 3534 } 3535 3536 /// Determine if \p I is assumed to be side-effect free. 3537 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { 3538 if (!I || wouldInstructionBeTriviallyDead(I)) 3539 return true; 3540 3541 auto *CB = dyn_cast<CallBase>(I); 3542 if (!CB || isa<IntrinsicInst>(CB)) 3543 return false; 3544 3545 const IRPosition &CallIRP = IRPosition::callsite_function(*CB); 3546 const auto &NoUnwindAA = 3547 A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE); 3548 if (!NoUnwindAA.isAssumedNoUnwind()) 3549 return false; 3550 if (!NoUnwindAA.isKnownNoUnwind()) 3551 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); 3552 3553 bool IsKnown; 3554 return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown); 3555 } 3556 }; 3557 3558 struct AAIsDeadFloating : public AAIsDeadValueImpl { 3559 AAIsDeadFloating(const IRPosition &IRP, Attributor &A) 3560 : AAIsDeadValueImpl(IRP, A) {} 3561 3562 /// See AbstractAttribute::initialize(...). 3563 void initialize(Attributor &A) override { 3564 AAIsDeadValueImpl::initialize(A); 3565 3566 if (isa<UndefValue>(getAssociatedValue())) { 3567 indicatePessimisticFixpoint(); 3568 return; 3569 } 3570 3571 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3572 if (!isAssumedSideEffectFree(A, I)) { 3573 if (!isa_and_nonnull<StoreInst>(I)) 3574 indicatePessimisticFixpoint(); 3575 else 3576 removeAssumedBits(HAS_NO_EFFECT); 3577 } 3578 } 3579 3580 bool isDeadStore(Attributor &A, StoreInst &SI) { 3581 // Lang ref now states volatile store is not UB/dead, let's skip them. 3582 if (SI.isVolatile()) 3583 return false; 3584 3585 bool UsedAssumedInformation = false; 3586 SmallSetVector<Value *, 4> PotentialCopies; 3587 if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this, 3588 UsedAssumedInformation)) 3589 return false; 3590 return llvm::all_of(PotentialCopies, [&](Value *V) { 3591 return A.isAssumedDead(IRPosition::value(*V), this, nullptr, 3592 UsedAssumedInformation); 3593 }); 3594 } 3595 3596 /// See AbstractAttribute::getAsStr(). 3597 const std::string getAsStr() const override { 3598 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3599 if (isa_and_nonnull<StoreInst>(I)) 3600 if (isValidState()) 3601 return "assumed-dead-store"; 3602 return AAIsDeadValueImpl::getAsStr(); 3603 } 3604 3605 /// See AbstractAttribute::updateImpl(...). 3606 ChangeStatus updateImpl(Attributor &A) override { 3607 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3608 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { 3609 if (!isDeadStore(A, *SI)) 3610 return indicatePessimisticFixpoint(); 3611 } else { 3612 if (!isAssumedSideEffectFree(A, I)) 3613 return indicatePessimisticFixpoint(); 3614 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3615 return indicatePessimisticFixpoint(); 3616 } 3617 return ChangeStatus::UNCHANGED; 3618 } 3619 3620 /// See AbstractAttribute::manifest(...). 3621 ChangeStatus manifest(Attributor &A) override { 3622 Value &V = getAssociatedValue(); 3623 if (auto *I = dyn_cast<Instruction>(&V)) { 3624 // If we get here we basically know the users are all dead. We check if 3625 // isAssumedSideEffectFree returns true here again because it might not be 3626 // the case and only the users are dead but the instruction (=call) is 3627 // still needed. 3628 if (isa<StoreInst>(I) || 3629 (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) { 3630 A.deleteAfterManifest(*I); 3631 return ChangeStatus::CHANGED; 3632 } 3633 } 3634 return ChangeStatus::UNCHANGED; 3635 } 3636 3637 /// See AbstractAttribute::trackStatistics() 3638 void trackStatistics() const override { 3639 STATS_DECLTRACK_FLOATING_ATTR(IsDead) 3640 } 3641 }; 3642 3643 struct AAIsDeadArgument : public AAIsDeadFloating { 3644 AAIsDeadArgument(const IRPosition &IRP, Attributor &A) 3645 : AAIsDeadFloating(IRP, A) {} 3646 3647 /// See AbstractAttribute::initialize(...). 3648 void initialize(Attributor &A) override { 3649 AAIsDeadFloating::initialize(A); 3650 if (!A.isFunctionIPOAmendable(*getAnchorScope())) 3651 indicatePessimisticFixpoint(); 3652 } 3653 3654 /// See AbstractAttribute::manifest(...). 3655 ChangeStatus manifest(Attributor &A) override { 3656 Argument &Arg = *getAssociatedArgument(); 3657 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) 3658 if (A.registerFunctionSignatureRewrite( 3659 Arg, /* ReplacementTypes */ {}, 3660 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, 3661 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { 3662 return ChangeStatus::CHANGED; 3663 } 3664 return ChangeStatus::UNCHANGED; 3665 } 3666 3667 /// See AbstractAttribute::trackStatistics() 3668 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) } 3669 }; 3670 3671 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { 3672 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) 3673 : AAIsDeadValueImpl(IRP, A) {} 3674 3675 /// See AbstractAttribute::initialize(...). 3676 void initialize(Attributor &A) override { 3677 AAIsDeadValueImpl::initialize(A); 3678 if (isa<UndefValue>(getAssociatedValue())) 3679 indicatePessimisticFixpoint(); 3680 } 3681 3682 /// See AbstractAttribute::updateImpl(...). 3683 ChangeStatus updateImpl(Attributor &A) override { 3684 // TODO: Once we have call site specific value information we can provide 3685 // call site specific liveness information and then it makes 3686 // sense to specialize attributes for call sites arguments instead of 3687 // redirecting requests to the callee argument. 3688 Argument *Arg = getAssociatedArgument(); 3689 if (!Arg) 3690 return indicatePessimisticFixpoint(); 3691 const IRPosition &ArgPos = IRPosition::argument(*Arg); 3692 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED); 3693 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 3694 } 3695 3696 /// See AbstractAttribute::manifest(...). 3697 ChangeStatus manifest(Attributor &A) override { 3698 CallBase &CB = cast<CallBase>(getAnchorValue()); 3699 Use &U = CB.getArgOperandUse(getCallSiteArgNo()); 3700 assert(!isa<UndefValue>(U.get()) && 3701 "Expected undef values to be filtered out!"); 3702 UndefValue &UV = *UndefValue::get(U->getType()); 3703 if (A.changeUseAfterManifest(U, UV)) 3704 return ChangeStatus::CHANGED; 3705 return ChangeStatus::UNCHANGED; 3706 } 3707 3708 /// See AbstractAttribute::trackStatistics() 3709 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) } 3710 }; 3711 3712 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { 3713 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) 3714 : AAIsDeadFloating(IRP, A) {} 3715 3716 /// See AAIsDead::isAssumedDead(). 3717 bool isAssumedDead() const override { 3718 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; 3719 } 3720 3721 /// See AbstractAttribute::initialize(...). 3722 void initialize(Attributor &A) override { 3723 AAIsDeadFloating::initialize(A); 3724 if (isa<UndefValue>(getAssociatedValue())) { 3725 indicatePessimisticFixpoint(); 3726 return; 3727 } 3728 3729 // We track this separately as a secondary state. 3730 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); 3731 } 3732 3733 /// See AbstractAttribute::updateImpl(...). 3734 ChangeStatus updateImpl(Attributor &A) override { 3735 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3736 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { 3737 IsAssumedSideEffectFree = false; 3738 Changed = ChangeStatus::CHANGED; 3739 } 3740 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3741 return indicatePessimisticFixpoint(); 3742 return Changed; 3743 } 3744 3745 /// See AbstractAttribute::trackStatistics() 3746 void trackStatistics() const override { 3747 if (IsAssumedSideEffectFree) 3748 STATS_DECLTRACK_CSRET_ATTR(IsDead) 3749 else 3750 STATS_DECLTRACK_CSRET_ATTR(UnusedResult) 3751 } 3752 3753 /// See AbstractAttribute::getAsStr(). 3754 const std::string getAsStr() const override { 3755 return isAssumedDead() 3756 ? "assumed-dead" 3757 : (getAssumed() ? "assumed-dead-users" : "assumed-live"); 3758 } 3759 3760 private: 3761 bool IsAssumedSideEffectFree = true; 3762 }; 3763 3764 struct AAIsDeadReturned : public AAIsDeadValueImpl { 3765 AAIsDeadReturned(const IRPosition &IRP, Attributor &A) 3766 : AAIsDeadValueImpl(IRP, A) {} 3767 3768 /// See AbstractAttribute::updateImpl(...). 3769 ChangeStatus updateImpl(Attributor &A) override { 3770 3771 bool UsedAssumedInformation = false; 3772 A.checkForAllInstructions([](Instruction &) { return true; }, *this, 3773 {Instruction::Ret}, UsedAssumedInformation); 3774 3775 auto PredForCallSite = [&](AbstractCallSite ACS) { 3776 if (ACS.isCallbackCall() || !ACS.getInstruction()) 3777 return false; 3778 return areAllUsesAssumedDead(A, *ACS.getInstruction()); 3779 }; 3780 3781 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 3782 UsedAssumedInformation)) 3783 return indicatePessimisticFixpoint(); 3784 3785 return ChangeStatus::UNCHANGED; 3786 } 3787 3788 /// See AbstractAttribute::manifest(...). 3789 ChangeStatus manifest(Attributor &A) override { 3790 // TODO: Rewrite the signature to return void? 3791 bool AnyChange = false; 3792 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); 3793 auto RetInstPred = [&](Instruction &I) { 3794 ReturnInst &RI = cast<ReturnInst>(I); 3795 if (!isa<UndefValue>(RI.getReturnValue())) 3796 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); 3797 return true; 3798 }; 3799 bool UsedAssumedInformation = false; 3800 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}, 3801 UsedAssumedInformation); 3802 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3803 } 3804 3805 /// See AbstractAttribute::trackStatistics() 3806 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) } 3807 }; 3808 3809 struct AAIsDeadFunction : public AAIsDead { 3810 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3811 3812 /// See AbstractAttribute::initialize(...). 3813 void initialize(Attributor &A) override { 3814 Function *F = getAnchorScope(); 3815 if (!F || F->isDeclaration() || !A.isRunOn(*F)) { 3816 indicatePessimisticFixpoint(); 3817 return; 3818 } 3819 ToBeExploredFrom.insert(&F->getEntryBlock().front()); 3820 assumeLive(A, F->getEntryBlock()); 3821 } 3822 3823 /// See AbstractAttribute::getAsStr(). 3824 const std::string getAsStr() const override { 3825 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + 3826 std::to_string(getAnchorScope()->size()) + "][#TBEP " + 3827 std::to_string(ToBeExploredFrom.size()) + "][#KDE " + 3828 std::to_string(KnownDeadEnds.size()) + "]"; 3829 } 3830 3831 /// See AbstractAttribute::manifest(...). 3832 ChangeStatus manifest(Attributor &A) override { 3833 assert(getState().isValidState() && 3834 "Attempted to manifest an invalid state!"); 3835 3836 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 3837 Function &F = *getAnchorScope(); 3838 3839 if (AssumedLiveBlocks.empty()) { 3840 A.deleteAfterManifest(F); 3841 return ChangeStatus::CHANGED; 3842 } 3843 3844 // Flag to determine if we can change an invoke to a call assuming the 3845 // callee is nounwind. This is not possible if the personality of the 3846 // function allows to catch asynchronous exceptions. 3847 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 3848 3849 KnownDeadEnds.set_union(ToBeExploredFrom); 3850 for (const Instruction *DeadEndI : KnownDeadEnds) { 3851 auto *CB = dyn_cast<CallBase>(DeadEndI); 3852 if (!CB) 3853 continue; 3854 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3855 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 3856 bool MayReturn = !NoReturnAA.isAssumedNoReturn(); 3857 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) 3858 continue; 3859 3860 if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) 3861 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); 3862 else 3863 A.changeToUnreachableAfterManifest( 3864 const_cast<Instruction *>(DeadEndI->getNextNode())); 3865 HasChanged = ChangeStatus::CHANGED; 3866 } 3867 3868 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted."); 3869 for (BasicBlock &BB : F) 3870 if (!AssumedLiveBlocks.count(&BB)) { 3871 A.deleteAfterManifest(BB); 3872 ++BUILD_STAT_NAME(AAIsDead, BasicBlock); 3873 HasChanged = ChangeStatus::CHANGED; 3874 } 3875 3876 return HasChanged; 3877 } 3878 3879 /// See AbstractAttribute::updateImpl(...). 3880 ChangeStatus updateImpl(Attributor &A) override; 3881 3882 bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override { 3883 assert(From->getParent() == getAnchorScope() && 3884 To->getParent() == getAnchorScope() && 3885 "Used AAIsDead of the wrong function"); 3886 return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To)); 3887 } 3888 3889 /// See AbstractAttribute::trackStatistics() 3890 void trackStatistics() const override {} 3891 3892 /// Returns true if the function is assumed dead. 3893 bool isAssumedDead() const override { return false; } 3894 3895 /// See AAIsDead::isKnownDead(). 3896 bool isKnownDead() const override { return false; } 3897 3898 /// See AAIsDead::isAssumedDead(BasicBlock *). 3899 bool isAssumedDead(const BasicBlock *BB) const override { 3900 assert(BB->getParent() == getAnchorScope() && 3901 "BB must be in the same anchor scope function."); 3902 3903 if (!getAssumed()) 3904 return false; 3905 return !AssumedLiveBlocks.count(BB); 3906 } 3907 3908 /// See AAIsDead::isKnownDead(BasicBlock *). 3909 bool isKnownDead(const BasicBlock *BB) const override { 3910 return getKnown() && isAssumedDead(BB); 3911 } 3912 3913 /// See AAIsDead::isAssumed(Instruction *I). 3914 bool isAssumedDead(const Instruction *I) const override { 3915 assert(I->getParent()->getParent() == getAnchorScope() && 3916 "Instruction must be in the same anchor scope function."); 3917 3918 if (!getAssumed()) 3919 return false; 3920 3921 // If it is not in AssumedLiveBlocks then it for sure dead. 3922 // Otherwise, it can still be after noreturn call in a live block. 3923 if (!AssumedLiveBlocks.count(I->getParent())) 3924 return true; 3925 3926 // If it is not after a liveness barrier it is live. 3927 const Instruction *PrevI = I->getPrevNode(); 3928 while (PrevI) { 3929 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) 3930 return true; 3931 PrevI = PrevI->getPrevNode(); 3932 } 3933 return false; 3934 } 3935 3936 /// See AAIsDead::isKnownDead(Instruction *I). 3937 bool isKnownDead(const Instruction *I) const override { 3938 return getKnown() && isAssumedDead(I); 3939 } 3940 3941 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A 3942 /// that internal function called from \p BB should now be looked at. 3943 bool assumeLive(Attributor &A, const BasicBlock &BB) { 3944 if (!AssumedLiveBlocks.insert(&BB).second) 3945 return false; 3946 3947 // We assume that all of BB is (probably) live now and if there are calls to 3948 // internal functions we will assume that those are now live as well. This 3949 // is a performance optimization for blocks with calls to a lot of internal 3950 // functions. It can however cause dead functions to be treated as live. 3951 for (const Instruction &I : BB) 3952 if (const auto *CB = dyn_cast<CallBase>(&I)) 3953 if (const Function *F = CB->getCalledFunction()) 3954 if (F->hasLocalLinkage()) 3955 A.markLiveInternalFunction(*F); 3956 return true; 3957 } 3958 3959 /// Collection of instructions that need to be explored again, e.g., we 3960 /// did assume they do not transfer control to (one of their) successors. 3961 SmallSetVector<const Instruction *, 8> ToBeExploredFrom; 3962 3963 /// Collection of instructions that are known to not transfer control. 3964 SmallSetVector<const Instruction *, 8> KnownDeadEnds; 3965 3966 /// Collection of all assumed live edges 3967 DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges; 3968 3969 /// Collection of all assumed live BasicBlocks. 3970 DenseSet<const BasicBlock *> AssumedLiveBlocks; 3971 }; 3972 3973 static bool 3974 identifyAliveSuccessors(Attributor &A, const CallBase &CB, 3975 AbstractAttribute &AA, 3976 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3977 const IRPosition &IPos = IRPosition::callsite_function(CB); 3978 3979 const auto &NoReturnAA = 3980 A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL); 3981 if (NoReturnAA.isAssumedNoReturn()) 3982 return !NoReturnAA.isKnownNoReturn(); 3983 if (CB.isTerminator()) 3984 AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); 3985 else 3986 AliveSuccessors.push_back(CB.getNextNode()); 3987 return false; 3988 } 3989 3990 static bool 3991 identifyAliveSuccessors(Attributor &A, const InvokeInst &II, 3992 AbstractAttribute &AA, 3993 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3994 bool UsedAssumedInformation = 3995 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); 3996 3997 // First, determine if we can change an invoke to a call assuming the 3998 // callee is nounwind. This is not possible if the personality of the 3999 // function allows to catch asynchronous exceptions. 4000 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { 4001 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 4002 } else { 4003 const IRPosition &IPos = IRPosition::callsite_function(II); 4004 const auto &AANoUnw = 4005 A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL); 4006 if (AANoUnw.isAssumedNoUnwind()) { 4007 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); 4008 } else { 4009 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 4010 } 4011 } 4012 return UsedAssumedInformation; 4013 } 4014 4015 static bool 4016 identifyAliveSuccessors(Attributor &A, const BranchInst &BI, 4017 AbstractAttribute &AA, 4018 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 4019 bool UsedAssumedInformation = false; 4020 if (BI.getNumSuccessors() == 1) { 4021 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 4022 } else { 4023 Optional<Constant *> C = 4024 A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation); 4025 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) { 4026 // No value yet, assume both edges are dead. 4027 } else if (isa_and_nonnull<ConstantInt>(*C)) { 4028 const BasicBlock *SuccBB = 4029 BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue()); 4030 AliveSuccessors.push_back(&SuccBB->front()); 4031 } else { 4032 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 4033 AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); 4034 UsedAssumedInformation = false; 4035 } 4036 } 4037 return UsedAssumedInformation; 4038 } 4039 4040 static bool 4041 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, 4042 AbstractAttribute &AA, 4043 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 4044 bool UsedAssumedInformation = false; 4045 Optional<Constant *> C = 4046 A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation); 4047 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) { 4048 // No value yet, assume all edges are dead. 4049 } else if (isa_and_nonnull<ConstantInt>(C.getValue())) { 4050 for (auto &CaseIt : SI.cases()) { 4051 if (CaseIt.getCaseValue() == C.getValue()) { 4052 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); 4053 return UsedAssumedInformation; 4054 } 4055 } 4056 AliveSuccessors.push_back(&SI.getDefaultDest()->front()); 4057 return UsedAssumedInformation; 4058 } else { 4059 for (const BasicBlock *SuccBB : successors(SI.getParent())) 4060 AliveSuccessors.push_back(&SuccBB->front()); 4061 } 4062 return UsedAssumedInformation; 4063 } 4064 4065 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { 4066 ChangeStatus Change = ChangeStatus::UNCHANGED; 4067 4068 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" 4069 << getAnchorScope()->size() << "] BBs and " 4070 << ToBeExploredFrom.size() << " exploration points and " 4071 << KnownDeadEnds.size() << " known dead ends\n"); 4072 4073 // Copy and clear the list of instructions we need to explore from. It is 4074 // refilled with instructions the next update has to look at. 4075 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), 4076 ToBeExploredFrom.end()); 4077 decltype(ToBeExploredFrom) NewToBeExploredFrom; 4078 4079 SmallVector<const Instruction *, 8> AliveSuccessors; 4080 while (!Worklist.empty()) { 4081 const Instruction *I = Worklist.pop_back_val(); 4082 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"); 4083 4084 // Fast forward for uninteresting instructions. We could look for UB here 4085 // though. 4086 while (!I->isTerminator() && !isa<CallBase>(I)) 4087 I = I->getNextNode(); 4088 4089 AliveSuccessors.clear(); 4090 4091 bool UsedAssumedInformation = false; 4092 switch (I->getOpcode()) { 4093 // TODO: look for (assumed) UB to backwards propagate "deadness". 4094 default: 4095 assert(I->isTerminator() && 4096 "Expected non-terminators to be handled already!"); 4097 for (const BasicBlock *SuccBB : successors(I->getParent())) 4098 AliveSuccessors.push_back(&SuccBB->front()); 4099 break; 4100 case Instruction::Call: 4101 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), 4102 *this, AliveSuccessors); 4103 break; 4104 case Instruction::Invoke: 4105 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), 4106 *this, AliveSuccessors); 4107 break; 4108 case Instruction::Br: 4109 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), 4110 *this, AliveSuccessors); 4111 break; 4112 case Instruction::Switch: 4113 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), 4114 *this, AliveSuccessors); 4115 break; 4116 } 4117 4118 if (UsedAssumedInformation) { 4119 NewToBeExploredFrom.insert(I); 4120 } else if (AliveSuccessors.empty() || 4121 (I->isTerminator() && 4122 AliveSuccessors.size() < I->getNumSuccessors())) { 4123 if (KnownDeadEnds.insert(I)) 4124 Change = ChangeStatus::CHANGED; 4125 } 4126 4127 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: " 4128 << AliveSuccessors.size() << " UsedAssumedInformation: " 4129 << UsedAssumedInformation << "\n"); 4130 4131 for (const Instruction *AliveSuccessor : AliveSuccessors) { 4132 if (!I->isTerminator()) { 4133 assert(AliveSuccessors.size() == 1 && 4134 "Non-terminator expected to have a single successor!"); 4135 Worklist.push_back(AliveSuccessor); 4136 } else { 4137 // record the assumed live edge 4138 auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent()); 4139 if (AssumedLiveEdges.insert(Edge).second) 4140 Change = ChangeStatus::CHANGED; 4141 if (assumeLive(A, *AliveSuccessor->getParent())) 4142 Worklist.push_back(AliveSuccessor); 4143 } 4144 } 4145 } 4146 4147 // Check if the content of ToBeExploredFrom changed, ignore the order. 4148 if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() || 4149 llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) { 4150 return !ToBeExploredFrom.count(I); 4151 })) { 4152 Change = ChangeStatus::CHANGED; 4153 ToBeExploredFrom = std::move(NewToBeExploredFrom); 4154 } 4155 4156 // If we know everything is live there is no need to query for liveness. 4157 // Instead, indicating a pessimistic fixpoint will cause the state to be 4158 // "invalid" and all queries to be answered conservatively without lookups. 4159 // To be in this state we have to (1) finished the exploration and (3) not 4160 // discovered any non-trivial dead end and (2) not ruled unreachable code 4161 // dead. 4162 if (ToBeExploredFrom.empty() && 4163 getAnchorScope()->size() == AssumedLiveBlocks.size() && 4164 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { 4165 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; 4166 })) 4167 return indicatePessimisticFixpoint(); 4168 return Change; 4169 } 4170 4171 /// Liveness information for a call sites. 4172 struct AAIsDeadCallSite final : AAIsDeadFunction { 4173 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) 4174 : AAIsDeadFunction(IRP, A) {} 4175 4176 /// See AbstractAttribute::initialize(...). 4177 void initialize(Attributor &A) override { 4178 // TODO: Once we have call site specific value information we can provide 4179 // call site specific liveness information and then it makes 4180 // sense to specialize attributes for call sites instead of 4181 // redirecting requests to the callee. 4182 llvm_unreachable("Abstract attributes for liveness are not " 4183 "supported for call sites yet!"); 4184 } 4185 4186 /// See AbstractAttribute::updateImpl(...). 4187 ChangeStatus updateImpl(Attributor &A) override { 4188 return indicatePessimisticFixpoint(); 4189 } 4190 4191 /// See AbstractAttribute::trackStatistics() 4192 void trackStatistics() const override {} 4193 }; 4194 } // namespace 4195 4196 /// -------------------- Dereferenceable Argument Attribute -------------------- 4197 4198 namespace { 4199 struct AADereferenceableImpl : AADereferenceable { 4200 AADereferenceableImpl(const IRPosition &IRP, Attributor &A) 4201 : AADereferenceable(IRP, A) {} 4202 using StateType = DerefState; 4203 4204 /// See AbstractAttribute::initialize(...). 4205 void initialize(Attributor &A) override { 4206 SmallVector<Attribute, 4> Attrs; 4207 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, 4208 Attrs, /* IgnoreSubsumingPositions */ false, &A); 4209 for (const Attribute &Attr : Attrs) 4210 takeKnownDerefBytesMaximum(Attr.getValueAsInt()); 4211 4212 const IRPosition &IRP = this->getIRPosition(); 4213 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE); 4214 4215 bool CanBeNull, CanBeFreed; 4216 takeKnownDerefBytesMaximum( 4217 IRP.getAssociatedValue().getPointerDereferenceableBytes( 4218 A.getDataLayout(), CanBeNull, CanBeFreed)); 4219 4220 bool IsFnInterface = IRP.isFnInterfaceKind(); 4221 Function *FnScope = IRP.getAnchorScope(); 4222 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { 4223 indicatePessimisticFixpoint(); 4224 return; 4225 } 4226 4227 if (Instruction *CtxI = getCtxI()) 4228 followUsesInMBEC(*this, A, getState(), *CtxI); 4229 } 4230 4231 /// See AbstractAttribute::getState() 4232 /// { 4233 StateType &getState() override { return *this; } 4234 const StateType &getState() const override { return *this; } 4235 /// } 4236 4237 /// Helper function for collecting accessed bytes in must-be-executed-context 4238 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, 4239 DerefState &State) { 4240 const Value *UseV = U->get(); 4241 if (!UseV->getType()->isPointerTy()) 4242 return; 4243 4244 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); 4245 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) 4246 return; 4247 4248 int64_t Offset; 4249 const Value *Base = GetPointerBaseWithConstantOffset( 4250 Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true); 4251 if (Base && Base == &getAssociatedValue()) 4252 State.addAccessedBytes(Offset, Loc->Size.getValue()); 4253 } 4254 4255 /// See followUsesInMBEC 4256 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4257 AADereferenceable::StateType &State) { 4258 bool IsNonNull = false; 4259 bool TrackUse = false; 4260 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( 4261 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); 4262 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes 4263 << " for instruction " << *I << "\n"); 4264 4265 addAccessedBytesForUse(A, U, I, State); 4266 State.takeKnownDerefBytesMaximum(DerefBytes); 4267 return TrackUse; 4268 } 4269 4270 /// See AbstractAttribute::manifest(...). 4271 ChangeStatus manifest(Attributor &A) override { 4272 ChangeStatus Change = AADereferenceable::manifest(A); 4273 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { 4274 removeAttrs({Attribute::DereferenceableOrNull}); 4275 return ChangeStatus::CHANGED; 4276 } 4277 return Change; 4278 } 4279 4280 void getDeducedAttributes(LLVMContext &Ctx, 4281 SmallVectorImpl<Attribute> &Attrs) const override { 4282 // TODO: Add *_globally support 4283 if (isAssumedNonNull()) 4284 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 4285 Ctx, getAssumedDereferenceableBytes())); 4286 else 4287 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 4288 Ctx, getAssumedDereferenceableBytes())); 4289 } 4290 4291 /// See AbstractAttribute::getAsStr(). 4292 const std::string getAsStr() const override { 4293 if (!getAssumedDereferenceableBytes()) 4294 return "unknown-dereferenceable"; 4295 return std::string("dereferenceable") + 4296 (isAssumedNonNull() ? "" : "_or_null") + 4297 (isAssumedGlobal() ? "_globally" : "") + "<" + 4298 std::to_string(getKnownDereferenceableBytes()) + "-" + 4299 std::to_string(getAssumedDereferenceableBytes()) + ">"; 4300 } 4301 }; 4302 4303 /// Dereferenceable attribute for a floating value. 4304 struct AADereferenceableFloating : AADereferenceableImpl { 4305 AADereferenceableFloating(const IRPosition &IRP, Attributor &A) 4306 : AADereferenceableImpl(IRP, A) {} 4307 4308 /// See AbstractAttribute::updateImpl(...). 4309 ChangeStatus updateImpl(Attributor &A) override { 4310 const DataLayout &DL = A.getDataLayout(); 4311 4312 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T, 4313 bool Stripped) -> bool { 4314 unsigned IdxWidth = 4315 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 4316 APInt Offset(IdxWidth, 0); 4317 const Value *Base = stripAndAccumulateOffsets( 4318 A, *this, &V, DL, Offset, /* GetMinOffset */ false, 4319 /* AllowNonInbounds */ true); 4320 4321 const auto &AA = A.getAAFor<AADereferenceable>( 4322 *this, IRPosition::value(*Base), DepClassTy::REQUIRED); 4323 int64_t DerefBytes = 0; 4324 if (!Stripped && this == &AA) { 4325 // Use IR information if we did not strip anything. 4326 // TODO: track globally. 4327 bool CanBeNull, CanBeFreed; 4328 DerefBytes = 4329 Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); 4330 T.GlobalState.indicatePessimisticFixpoint(); 4331 } else { 4332 const DerefState &DS = AA.getState(); 4333 DerefBytes = DS.DerefBytesState.getAssumed(); 4334 T.GlobalState &= DS.GlobalState; 4335 } 4336 4337 // For now we do not try to "increase" dereferenceability due to negative 4338 // indices as we first have to come up with code to deal with loops and 4339 // for overflows of the dereferenceable bytes. 4340 int64_t OffsetSExt = Offset.getSExtValue(); 4341 if (OffsetSExt < 0) 4342 OffsetSExt = 0; 4343 4344 T.takeAssumedDerefBytesMinimum( 4345 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4346 4347 if (this == &AA) { 4348 if (!Stripped) { 4349 // If nothing was stripped IR information is all we got. 4350 T.takeKnownDerefBytesMaximum( 4351 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4352 T.indicatePessimisticFixpoint(); 4353 } else if (OffsetSExt > 0) { 4354 // If something was stripped but there is circular reasoning we look 4355 // for the offset. If it is positive we basically decrease the 4356 // dereferenceable bytes in a circluar loop now, which will simply 4357 // drive them down to the known value in a very slow way which we 4358 // can accelerate. 4359 T.indicatePessimisticFixpoint(); 4360 } 4361 } 4362 4363 return T.isValidState(); 4364 }; 4365 4366 DerefState T; 4367 bool UsedAssumedInformation = false; 4368 if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T, 4369 VisitValueCB, getCtxI(), 4370 UsedAssumedInformation)) 4371 return indicatePessimisticFixpoint(); 4372 4373 return clampStateAndIndicateChange(getState(), T); 4374 } 4375 4376 /// See AbstractAttribute::trackStatistics() 4377 void trackStatistics() const override { 4378 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) 4379 } 4380 }; 4381 4382 /// Dereferenceable attribute for a return value. 4383 struct AADereferenceableReturned final 4384 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { 4385 AADereferenceableReturned(const IRPosition &IRP, Attributor &A) 4386 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( 4387 IRP, A) {} 4388 4389 /// See AbstractAttribute::trackStatistics() 4390 void trackStatistics() const override { 4391 STATS_DECLTRACK_FNRET_ATTR(dereferenceable) 4392 } 4393 }; 4394 4395 /// Dereferenceable attribute for an argument 4396 struct AADereferenceableArgument final 4397 : AAArgumentFromCallSiteArguments<AADereferenceable, 4398 AADereferenceableImpl> { 4399 using Base = 4400 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; 4401 AADereferenceableArgument(const IRPosition &IRP, Attributor &A) 4402 : Base(IRP, A) {} 4403 4404 /// See AbstractAttribute::trackStatistics() 4405 void trackStatistics() const override { 4406 STATS_DECLTRACK_ARG_ATTR(dereferenceable) 4407 } 4408 }; 4409 4410 /// Dereferenceable attribute for a call site argument. 4411 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { 4412 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) 4413 : AADereferenceableFloating(IRP, A) {} 4414 4415 /// See AbstractAttribute::trackStatistics() 4416 void trackStatistics() const override { 4417 STATS_DECLTRACK_CSARG_ATTR(dereferenceable) 4418 } 4419 }; 4420 4421 /// Dereferenceable attribute deduction for a call site return value. 4422 struct AADereferenceableCallSiteReturned final 4423 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { 4424 using Base = 4425 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; 4426 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) 4427 : Base(IRP, A) {} 4428 4429 /// See AbstractAttribute::trackStatistics() 4430 void trackStatistics() const override { 4431 STATS_DECLTRACK_CS_ATTR(dereferenceable); 4432 } 4433 }; 4434 } // namespace 4435 4436 // ------------------------ Align Argument Attribute ------------------------ 4437 4438 namespace { 4439 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA, 4440 Value &AssociatedValue, const Use *U, 4441 const Instruction *I, bool &TrackUse) { 4442 // We need to follow common pointer manipulation uses to the accesses they 4443 // feed into. 4444 if (isa<CastInst>(I)) { 4445 // Follow all but ptr2int casts. 4446 TrackUse = !isa<PtrToIntInst>(I); 4447 return 0; 4448 } 4449 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 4450 if (GEP->hasAllConstantIndices()) 4451 TrackUse = true; 4452 return 0; 4453 } 4454 4455 MaybeAlign MA; 4456 if (const auto *CB = dyn_cast<CallBase>(I)) { 4457 if (CB->isBundleOperand(U) || CB->isCallee(U)) 4458 return 0; 4459 4460 unsigned ArgNo = CB->getArgOperandNo(U); 4461 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 4462 // As long as we only use known information there is no need to track 4463 // dependences here. 4464 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE); 4465 MA = MaybeAlign(AlignAA.getKnownAlign()); 4466 } 4467 4468 const DataLayout &DL = A.getDataLayout(); 4469 const Value *UseV = U->get(); 4470 if (auto *SI = dyn_cast<StoreInst>(I)) { 4471 if (SI->getPointerOperand() == UseV) 4472 MA = SI->getAlign(); 4473 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 4474 if (LI->getPointerOperand() == UseV) 4475 MA = LI->getAlign(); 4476 } 4477 4478 if (!MA || *MA <= QueryingAA.getKnownAlign()) 4479 return 0; 4480 4481 unsigned Alignment = MA->value(); 4482 int64_t Offset; 4483 4484 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { 4485 if (Base == &AssociatedValue) { 4486 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4487 // So we can say that the maximum power of two which is a divisor of 4488 // gcd(Offset, Alignment) is an alignment. 4489 4490 uint32_t gcd = 4491 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment); 4492 Alignment = llvm::PowerOf2Floor(gcd); 4493 } 4494 } 4495 4496 return Alignment; 4497 } 4498 4499 struct AAAlignImpl : AAAlign { 4500 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} 4501 4502 /// See AbstractAttribute::initialize(...). 4503 void initialize(Attributor &A) override { 4504 SmallVector<Attribute, 4> Attrs; 4505 getAttrs({Attribute::Alignment}, Attrs); 4506 for (const Attribute &Attr : Attrs) 4507 takeKnownMaximum(Attr.getValueAsInt()); 4508 4509 Value &V = getAssociatedValue(); 4510 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value()); 4511 4512 if (getIRPosition().isFnInterfaceKind() && 4513 (!getAnchorScope() || 4514 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { 4515 indicatePessimisticFixpoint(); 4516 return; 4517 } 4518 4519 if (Instruction *CtxI = getCtxI()) 4520 followUsesInMBEC(*this, A, getState(), *CtxI); 4521 } 4522 4523 /// See AbstractAttribute::manifest(...). 4524 ChangeStatus manifest(Attributor &A) override { 4525 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; 4526 4527 // Check for users that allow alignment annotations. 4528 Value &AssociatedValue = getAssociatedValue(); 4529 for (const Use &U : AssociatedValue.uses()) { 4530 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { 4531 if (SI->getPointerOperand() == &AssociatedValue) 4532 if (SI->getAlignment() < getAssumedAlign()) { 4533 STATS_DECLTRACK(AAAlign, Store, 4534 "Number of times alignment added to a store"); 4535 SI->setAlignment(Align(getAssumedAlign())); 4536 LoadStoreChanged = ChangeStatus::CHANGED; 4537 } 4538 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { 4539 if (LI->getPointerOperand() == &AssociatedValue) 4540 if (LI->getAlignment() < getAssumedAlign()) { 4541 LI->setAlignment(Align(getAssumedAlign())); 4542 STATS_DECLTRACK(AAAlign, Load, 4543 "Number of times alignment added to a load"); 4544 LoadStoreChanged = ChangeStatus::CHANGED; 4545 } 4546 } 4547 } 4548 4549 ChangeStatus Changed = AAAlign::manifest(A); 4550 4551 Align InheritAlign = 4552 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4553 if (InheritAlign >= getAssumedAlign()) 4554 return LoadStoreChanged; 4555 return Changed | LoadStoreChanged; 4556 } 4557 4558 // TODO: Provide a helper to determine the implied ABI alignment and check in 4559 // the existing manifest method and a new one for AAAlignImpl that value 4560 // to avoid making the alignment explicit if it did not improve. 4561 4562 /// See AbstractAttribute::getDeducedAttributes 4563 virtual void 4564 getDeducedAttributes(LLVMContext &Ctx, 4565 SmallVectorImpl<Attribute> &Attrs) const override { 4566 if (getAssumedAlign() > 1) 4567 Attrs.emplace_back( 4568 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); 4569 } 4570 4571 /// See followUsesInMBEC 4572 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4573 AAAlign::StateType &State) { 4574 bool TrackUse = false; 4575 4576 unsigned int KnownAlign = 4577 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); 4578 State.takeKnownMaximum(KnownAlign); 4579 4580 return TrackUse; 4581 } 4582 4583 /// See AbstractAttribute::getAsStr(). 4584 const std::string getAsStr() const override { 4585 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + 4586 "-" + std::to_string(getAssumedAlign()) + ">") 4587 : "unknown-align"; 4588 } 4589 }; 4590 4591 /// Align attribute for a floating value. 4592 struct AAAlignFloating : AAAlignImpl { 4593 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} 4594 4595 /// See AbstractAttribute::updateImpl(...). 4596 ChangeStatus updateImpl(Attributor &A) override { 4597 const DataLayout &DL = A.getDataLayout(); 4598 4599 auto VisitValueCB = [&](Value &V, const Instruction *, 4600 AAAlign::StateType &T, bool Stripped) -> bool { 4601 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V)) 4602 return true; 4603 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V), 4604 DepClassTy::REQUIRED); 4605 if (!Stripped && this == &AA) { 4606 int64_t Offset; 4607 unsigned Alignment = 1; 4608 if (const Value *Base = 4609 GetPointerBaseWithConstantOffset(&V, Offset, DL)) { 4610 // TODO: Use AAAlign for the base too. 4611 Align PA = Base->getPointerAlignment(DL); 4612 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4613 // So we can say that the maximum power of two which is a divisor of 4614 // gcd(Offset, Alignment) is an alignment. 4615 4616 uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), 4617 uint32_t(PA.value())); 4618 Alignment = llvm::PowerOf2Floor(gcd); 4619 } else { 4620 Alignment = V.getPointerAlignment(DL).value(); 4621 } 4622 // Use only IR information if we did not strip anything. 4623 T.takeKnownMaximum(Alignment); 4624 T.indicatePessimisticFixpoint(); 4625 } else { 4626 // Use abstract attribute information. 4627 const AAAlign::StateType &DS = AA.getState(); 4628 T ^= DS; 4629 } 4630 return T.isValidState(); 4631 }; 4632 4633 StateType T; 4634 bool UsedAssumedInformation = false; 4635 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 4636 VisitValueCB, getCtxI(), 4637 UsedAssumedInformation)) 4638 return indicatePessimisticFixpoint(); 4639 4640 // TODO: If we know we visited all incoming values, thus no are assumed 4641 // dead, we can take the known information from the state T. 4642 return clampStateAndIndicateChange(getState(), T); 4643 } 4644 4645 /// See AbstractAttribute::trackStatistics() 4646 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } 4647 }; 4648 4649 /// Align attribute for function return value. 4650 struct AAAlignReturned final 4651 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { 4652 using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>; 4653 AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4654 4655 /// See AbstractAttribute::initialize(...). 4656 void initialize(Attributor &A) override { 4657 Base::initialize(A); 4658 Function *F = getAssociatedFunction(); 4659 if (!F || F->isDeclaration()) 4660 indicatePessimisticFixpoint(); 4661 } 4662 4663 /// See AbstractAttribute::trackStatistics() 4664 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } 4665 }; 4666 4667 /// Align attribute for function argument. 4668 struct AAAlignArgument final 4669 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { 4670 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; 4671 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4672 4673 /// See AbstractAttribute::manifest(...). 4674 ChangeStatus manifest(Attributor &A) override { 4675 // If the associated argument is involved in a must-tail call we give up 4676 // because we would need to keep the argument alignments of caller and 4677 // callee in-sync. Just does not seem worth the trouble right now. 4678 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) 4679 return ChangeStatus::UNCHANGED; 4680 return Base::manifest(A); 4681 } 4682 4683 /// See AbstractAttribute::trackStatistics() 4684 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } 4685 }; 4686 4687 struct AAAlignCallSiteArgument final : AAAlignFloating { 4688 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) 4689 : AAAlignFloating(IRP, A) {} 4690 4691 /// See AbstractAttribute::manifest(...). 4692 ChangeStatus manifest(Attributor &A) override { 4693 // If the associated argument is involved in a must-tail call we give up 4694 // because we would need to keep the argument alignments of caller and 4695 // callee in-sync. Just does not seem worth the trouble right now. 4696 if (Argument *Arg = getAssociatedArgument()) 4697 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) 4698 return ChangeStatus::UNCHANGED; 4699 ChangeStatus Changed = AAAlignImpl::manifest(A); 4700 Align InheritAlign = 4701 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4702 if (InheritAlign >= getAssumedAlign()) 4703 Changed = ChangeStatus::UNCHANGED; 4704 return Changed; 4705 } 4706 4707 /// See AbstractAttribute::updateImpl(Attributor &A). 4708 ChangeStatus updateImpl(Attributor &A) override { 4709 ChangeStatus Changed = AAAlignFloating::updateImpl(A); 4710 if (Argument *Arg = getAssociatedArgument()) { 4711 // We only take known information from the argument 4712 // so we do not need to track a dependence. 4713 const auto &ArgAlignAA = A.getAAFor<AAAlign>( 4714 *this, IRPosition::argument(*Arg), DepClassTy::NONE); 4715 takeKnownMaximum(ArgAlignAA.getKnownAlign()); 4716 } 4717 return Changed; 4718 } 4719 4720 /// See AbstractAttribute::trackStatistics() 4721 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } 4722 }; 4723 4724 /// Align attribute deduction for a call site return value. 4725 struct AAAlignCallSiteReturned final 4726 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { 4727 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; 4728 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) 4729 : Base(IRP, A) {} 4730 4731 /// See AbstractAttribute::initialize(...). 4732 void initialize(Attributor &A) override { 4733 Base::initialize(A); 4734 Function *F = getAssociatedFunction(); 4735 if (!F || F->isDeclaration()) 4736 indicatePessimisticFixpoint(); 4737 } 4738 4739 /// See AbstractAttribute::trackStatistics() 4740 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } 4741 }; 4742 } // namespace 4743 4744 /// ------------------ Function No-Return Attribute ---------------------------- 4745 namespace { 4746 struct AANoReturnImpl : public AANoReturn { 4747 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} 4748 4749 /// See AbstractAttribute::initialize(...). 4750 void initialize(Attributor &A) override { 4751 AANoReturn::initialize(A); 4752 Function *F = getAssociatedFunction(); 4753 if (!F || F->isDeclaration()) 4754 indicatePessimisticFixpoint(); 4755 } 4756 4757 /// See AbstractAttribute::getAsStr(). 4758 const std::string getAsStr() const override { 4759 return getAssumed() ? "noreturn" : "may-return"; 4760 } 4761 4762 /// See AbstractAttribute::updateImpl(Attributor &A). 4763 virtual ChangeStatus updateImpl(Attributor &A) override { 4764 auto CheckForNoReturn = [](Instruction &) { return false; }; 4765 bool UsedAssumedInformation = false; 4766 if (!A.checkForAllInstructions(CheckForNoReturn, *this, 4767 {(unsigned)Instruction::Ret}, 4768 UsedAssumedInformation)) 4769 return indicatePessimisticFixpoint(); 4770 return ChangeStatus::UNCHANGED; 4771 } 4772 }; 4773 4774 struct AANoReturnFunction final : AANoReturnImpl { 4775 AANoReturnFunction(const IRPosition &IRP, Attributor &A) 4776 : AANoReturnImpl(IRP, A) {} 4777 4778 /// See AbstractAttribute::trackStatistics() 4779 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } 4780 }; 4781 4782 /// NoReturn attribute deduction for a call sites. 4783 struct AANoReturnCallSite final : AANoReturnImpl { 4784 AANoReturnCallSite(const IRPosition &IRP, Attributor &A) 4785 : AANoReturnImpl(IRP, A) {} 4786 4787 /// See AbstractAttribute::initialize(...). 4788 void initialize(Attributor &A) override { 4789 AANoReturnImpl::initialize(A); 4790 if (Function *F = getAssociatedFunction()) { 4791 const IRPosition &FnPos = IRPosition::function(*F); 4792 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4793 if (!FnAA.isAssumedNoReturn()) 4794 indicatePessimisticFixpoint(); 4795 } 4796 } 4797 4798 /// See AbstractAttribute::updateImpl(...). 4799 ChangeStatus updateImpl(Attributor &A) override { 4800 // TODO: Once we have call site specific value information we can provide 4801 // call site specific liveness information and then it makes 4802 // sense to specialize attributes for call sites arguments instead of 4803 // redirecting requests to the callee argument. 4804 Function *F = getAssociatedFunction(); 4805 const IRPosition &FnPos = IRPosition::function(*F); 4806 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4807 return clampStateAndIndicateChange(getState(), FnAA.getState()); 4808 } 4809 4810 /// See AbstractAttribute::trackStatistics() 4811 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } 4812 }; 4813 } // namespace 4814 4815 /// ----------------------- Variable Capturing --------------------------------- 4816 4817 namespace { 4818 /// A class to hold the state of for no-capture attributes. 4819 struct AANoCaptureImpl : public AANoCapture { 4820 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} 4821 4822 /// See AbstractAttribute::initialize(...). 4823 void initialize(Attributor &A) override { 4824 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { 4825 indicateOptimisticFixpoint(); 4826 return; 4827 } 4828 Function *AnchorScope = getAnchorScope(); 4829 if (isFnInterfaceKind() && 4830 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { 4831 indicatePessimisticFixpoint(); 4832 return; 4833 } 4834 4835 // You cannot "capture" null in the default address space. 4836 if (isa<ConstantPointerNull>(getAssociatedValue()) && 4837 getAssociatedValue().getType()->getPointerAddressSpace() == 0) { 4838 indicateOptimisticFixpoint(); 4839 return; 4840 } 4841 4842 const Function *F = 4843 isArgumentPosition() ? getAssociatedFunction() : AnchorScope; 4844 4845 // Check what state the associated function can actually capture. 4846 if (F) 4847 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 4848 else 4849 indicatePessimisticFixpoint(); 4850 } 4851 4852 /// See AbstractAttribute::updateImpl(...). 4853 ChangeStatus updateImpl(Attributor &A) override; 4854 4855 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). 4856 virtual void 4857 getDeducedAttributes(LLVMContext &Ctx, 4858 SmallVectorImpl<Attribute> &Attrs) const override { 4859 if (!isAssumedNoCaptureMaybeReturned()) 4860 return; 4861 4862 if (isArgumentPosition()) { 4863 if (isAssumedNoCapture()) 4864 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); 4865 else if (ManifestInternal) 4866 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); 4867 } 4868 } 4869 4870 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known 4871 /// depending on the ability of the function associated with \p IRP to capture 4872 /// state in memory and through "returning/throwing", respectively. 4873 static void determineFunctionCaptureCapabilities(const IRPosition &IRP, 4874 const Function &F, 4875 BitIntegerState &State) { 4876 // TODO: Once we have memory behavior attributes we should use them here. 4877 4878 // If we know we cannot communicate or write to memory, we do not care about 4879 // ptr2int anymore. 4880 if (F.onlyReadsMemory() && F.doesNotThrow() && 4881 F.getReturnType()->isVoidTy()) { 4882 State.addKnownBits(NO_CAPTURE); 4883 return; 4884 } 4885 4886 // A function cannot capture state in memory if it only reads memory, it can 4887 // however return/throw state and the state might be influenced by the 4888 // pointer value, e.g., loading from a returned pointer might reveal a bit. 4889 if (F.onlyReadsMemory()) 4890 State.addKnownBits(NOT_CAPTURED_IN_MEM); 4891 4892 // A function cannot communicate state back if it does not through 4893 // exceptions and doesn not return values. 4894 if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) 4895 State.addKnownBits(NOT_CAPTURED_IN_RET); 4896 4897 // Check existing "returned" attributes. 4898 int ArgNo = IRP.getCalleeArgNo(); 4899 if (F.doesNotThrow() && ArgNo >= 0) { 4900 for (unsigned u = 0, e = F.arg_size(); u < e; ++u) 4901 if (F.hasParamAttribute(u, Attribute::Returned)) { 4902 if (u == unsigned(ArgNo)) 4903 State.removeAssumedBits(NOT_CAPTURED_IN_RET); 4904 else if (F.onlyReadsMemory()) 4905 State.addKnownBits(NO_CAPTURE); 4906 else 4907 State.addKnownBits(NOT_CAPTURED_IN_RET); 4908 break; 4909 } 4910 } 4911 } 4912 4913 /// See AbstractState::getAsStr(). 4914 const std::string getAsStr() const override { 4915 if (isKnownNoCapture()) 4916 return "known not-captured"; 4917 if (isAssumedNoCapture()) 4918 return "assumed not-captured"; 4919 if (isKnownNoCaptureMaybeReturned()) 4920 return "known not-captured-maybe-returned"; 4921 if (isAssumedNoCaptureMaybeReturned()) 4922 return "assumed not-captured-maybe-returned"; 4923 return "assumed-captured"; 4924 } 4925 4926 /// Check the use \p U and update \p State accordingly. Return true if we 4927 /// should continue to update the state. 4928 bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U, 4929 bool &Follow) { 4930 Instruction *UInst = cast<Instruction>(U.getUser()); 4931 LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in " 4932 << *UInst << "\n"); 4933 4934 // Deal with ptr2int by following uses. 4935 if (isa<PtrToIntInst>(UInst)) { 4936 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n"); 4937 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 4938 /* Return */ true); 4939 } 4940 4941 // For stores we already checked if we can follow them, if they make it 4942 // here we give up. 4943 if (isa<StoreInst>(UInst)) 4944 return isCapturedIn(State, /* Memory */ true, /* Integer */ false, 4945 /* Return */ false); 4946 4947 // Explicitly catch return instructions. 4948 if (isa<ReturnInst>(UInst)) { 4949 if (UInst->getFunction() == getAnchorScope()) 4950 return isCapturedIn(State, /* Memory */ false, /* Integer */ false, 4951 /* Return */ true); 4952 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 4953 /* Return */ true); 4954 } 4955 4956 // For now we only use special logic for call sites. However, the tracker 4957 // itself knows about a lot of other non-capturing cases already. 4958 auto *CB = dyn_cast<CallBase>(UInst); 4959 if (!CB || !CB->isArgOperand(&U)) 4960 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 4961 /* Return */ true); 4962 4963 unsigned ArgNo = CB->getArgOperandNo(&U); 4964 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); 4965 // If we have a abstract no-capture attribute for the argument we can use 4966 // it to justify a non-capture attribute here. This allows recursion! 4967 auto &ArgNoCaptureAA = 4968 A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED); 4969 if (ArgNoCaptureAA.isAssumedNoCapture()) 4970 return isCapturedIn(State, /* Memory */ false, /* Integer */ false, 4971 /* Return */ false); 4972 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 4973 Follow = true; 4974 return isCapturedIn(State, /* Memory */ false, /* Integer */ false, 4975 /* Return */ false); 4976 } 4977 4978 // Lastly, we could not find a reason no-capture can be assumed so we don't. 4979 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 4980 /* Return */ true); 4981 } 4982 4983 /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and 4984 /// \p CapturedInRet, then return true if we should continue updating the 4985 /// state. 4986 static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem, 4987 bool CapturedInInt, bool CapturedInRet) { 4988 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " 4989 << CapturedInInt << "|Ret " << CapturedInRet << "]\n"); 4990 if (CapturedInMem) 4991 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); 4992 if (CapturedInInt) 4993 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); 4994 if (CapturedInRet) 4995 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); 4996 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4997 } 4998 }; 4999 5000 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { 5001 const IRPosition &IRP = getIRPosition(); 5002 Value *V = isArgumentPosition() ? IRP.getAssociatedArgument() 5003 : &IRP.getAssociatedValue(); 5004 if (!V) 5005 return indicatePessimisticFixpoint(); 5006 5007 const Function *F = 5008 isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 5009 assert(F && "Expected a function!"); 5010 const IRPosition &FnPos = IRPosition::function(*F); 5011 5012 AANoCapture::StateType T; 5013 5014 // Readonly means we cannot capture through memory. 5015 bool IsKnown; 5016 if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) { 5017 T.addKnownBits(NOT_CAPTURED_IN_MEM); 5018 if (IsKnown) 5019 addKnownBits(NOT_CAPTURED_IN_MEM); 5020 } 5021 5022 // Make sure all returned values are different than the underlying value. 5023 // TODO: we could do this in a more sophisticated way inside 5024 // AAReturnedValues, e.g., track all values that escape through returns 5025 // directly somehow. 5026 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { 5027 bool SeenConstant = false; 5028 for (auto &It : RVAA.returned_values()) { 5029 if (isa<Constant>(It.first)) { 5030 if (SeenConstant) 5031 return false; 5032 SeenConstant = true; 5033 } else if (!isa<Argument>(It.first) || 5034 It.first == getAssociatedArgument()) 5035 return false; 5036 } 5037 return true; 5038 }; 5039 5040 const auto &NoUnwindAA = 5041 A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL); 5042 if (NoUnwindAA.isAssumedNoUnwind()) { 5043 bool IsVoidTy = F->getReturnType()->isVoidTy(); 5044 const AAReturnedValues *RVAA = 5045 IsVoidTy ? nullptr 5046 : &A.getAAFor<AAReturnedValues>(*this, FnPos, 5047 5048 DepClassTy::OPTIONAL); 5049 if (IsVoidTy || CheckReturnedArgs(*RVAA)) { 5050 T.addKnownBits(NOT_CAPTURED_IN_RET); 5051 if (T.isKnown(NOT_CAPTURED_IN_MEM)) 5052 return ChangeStatus::UNCHANGED; 5053 if (NoUnwindAA.isKnownNoUnwind() && 5054 (IsVoidTy || RVAA->getState().isAtFixpoint())) { 5055 addKnownBits(NOT_CAPTURED_IN_RET); 5056 if (isKnown(NOT_CAPTURED_IN_MEM)) 5057 return indicateOptimisticFixpoint(); 5058 } 5059 } 5060 } 5061 5062 auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) { 5063 const auto &DerefAA = A.getAAFor<AADereferenceable>( 5064 *this, IRPosition::value(*O), DepClassTy::OPTIONAL); 5065 return DerefAA.getAssumedDereferenceableBytes(); 5066 }; 5067 5068 auto UseCheck = [&](const Use &U, bool &Follow) -> bool { 5069 switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) { 5070 case UseCaptureKind::NO_CAPTURE: 5071 return true; 5072 case UseCaptureKind::MAY_CAPTURE: 5073 return checkUse(A, T, U, Follow); 5074 case UseCaptureKind::PASSTHROUGH: 5075 Follow = true; 5076 return true; 5077 } 5078 llvm_unreachable("Unexpected use capture kind!"); 5079 }; 5080 5081 if (!A.checkForAllUses(UseCheck, *this, *V)) 5082 return indicatePessimisticFixpoint(); 5083 5084 AANoCapture::StateType &S = getState(); 5085 auto Assumed = S.getAssumed(); 5086 S.intersectAssumedBits(T.getAssumed()); 5087 if (!isAssumedNoCaptureMaybeReturned()) 5088 return indicatePessimisticFixpoint(); 5089 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 5090 : ChangeStatus::CHANGED; 5091 } 5092 5093 /// NoCapture attribute for function arguments. 5094 struct AANoCaptureArgument final : AANoCaptureImpl { 5095 AANoCaptureArgument(const IRPosition &IRP, Attributor &A) 5096 : AANoCaptureImpl(IRP, A) {} 5097 5098 /// See AbstractAttribute::trackStatistics() 5099 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } 5100 }; 5101 5102 /// NoCapture attribute for call site arguments. 5103 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { 5104 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) 5105 : AANoCaptureImpl(IRP, A) {} 5106 5107 /// See AbstractAttribute::initialize(...). 5108 void initialize(Attributor &A) override { 5109 if (Argument *Arg = getAssociatedArgument()) 5110 if (Arg->hasByValAttr()) 5111 indicateOptimisticFixpoint(); 5112 AANoCaptureImpl::initialize(A); 5113 } 5114 5115 /// See AbstractAttribute::updateImpl(...). 5116 ChangeStatus updateImpl(Attributor &A) override { 5117 // TODO: Once we have call site specific value information we can provide 5118 // call site specific liveness information and then it makes 5119 // sense to specialize attributes for call sites arguments instead of 5120 // redirecting requests to the callee argument. 5121 Argument *Arg = getAssociatedArgument(); 5122 if (!Arg) 5123 return indicatePessimisticFixpoint(); 5124 const IRPosition &ArgPos = IRPosition::argument(*Arg); 5125 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED); 5126 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 5127 } 5128 5129 /// See AbstractAttribute::trackStatistics() 5130 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)}; 5131 }; 5132 5133 /// NoCapture attribute for floating values. 5134 struct AANoCaptureFloating final : AANoCaptureImpl { 5135 AANoCaptureFloating(const IRPosition &IRP, Attributor &A) 5136 : AANoCaptureImpl(IRP, A) {} 5137 5138 /// See AbstractAttribute::trackStatistics() 5139 void trackStatistics() const override { 5140 STATS_DECLTRACK_FLOATING_ATTR(nocapture) 5141 } 5142 }; 5143 5144 /// NoCapture attribute for function return value. 5145 struct AANoCaptureReturned final : AANoCaptureImpl { 5146 AANoCaptureReturned(const IRPosition &IRP, Attributor &A) 5147 : AANoCaptureImpl(IRP, A) { 5148 llvm_unreachable("NoCapture is not applicable to function returns!"); 5149 } 5150 5151 /// See AbstractAttribute::initialize(...). 5152 void initialize(Attributor &A) override { 5153 llvm_unreachable("NoCapture is not applicable to function returns!"); 5154 } 5155 5156 /// See AbstractAttribute::updateImpl(...). 5157 ChangeStatus updateImpl(Attributor &A) override { 5158 llvm_unreachable("NoCapture is not applicable to function returns!"); 5159 } 5160 5161 /// See AbstractAttribute::trackStatistics() 5162 void trackStatistics() const override {} 5163 }; 5164 5165 /// NoCapture attribute deduction for a call site return value. 5166 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { 5167 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) 5168 : AANoCaptureImpl(IRP, A) {} 5169 5170 /// See AbstractAttribute::initialize(...). 5171 void initialize(Attributor &A) override { 5172 const Function *F = getAnchorScope(); 5173 // Check what state the associated function can actually capture. 5174 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 5175 } 5176 5177 /// See AbstractAttribute::trackStatistics() 5178 void trackStatistics() const override { 5179 STATS_DECLTRACK_CSRET_ATTR(nocapture) 5180 } 5181 }; 5182 } // namespace 5183 5184 /// ------------------ Value Simplify Attribute ---------------------------- 5185 5186 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) { 5187 // FIXME: Add a typecast support. 5188 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5189 SimplifiedAssociatedValue, Other, Ty); 5190 if (SimplifiedAssociatedValue == Optional<Value *>(nullptr)) 5191 return false; 5192 5193 LLVM_DEBUG({ 5194 if (SimplifiedAssociatedValue.hasValue()) 5195 dbgs() << "[ValueSimplify] is assumed to be " 5196 << **SimplifiedAssociatedValue << "\n"; 5197 else 5198 dbgs() << "[ValueSimplify] is assumed to be <none>\n"; 5199 }); 5200 return true; 5201 } 5202 5203 namespace { 5204 struct AAValueSimplifyImpl : AAValueSimplify { 5205 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) 5206 : AAValueSimplify(IRP, A) {} 5207 5208 /// See AbstractAttribute::initialize(...). 5209 void initialize(Attributor &A) override { 5210 if (getAssociatedValue().getType()->isVoidTy()) 5211 indicatePessimisticFixpoint(); 5212 if (A.hasSimplificationCallback(getIRPosition())) 5213 indicatePessimisticFixpoint(); 5214 } 5215 5216 /// See AbstractAttribute::getAsStr(). 5217 const std::string getAsStr() const override { 5218 LLVM_DEBUG({ 5219 errs() << "SAV: " << (bool)SimplifiedAssociatedValue << " "; 5220 if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue) 5221 errs() << "SAV: " << **SimplifiedAssociatedValue << " "; 5222 }); 5223 return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple") 5224 : "not-simple"; 5225 } 5226 5227 /// See AbstractAttribute::trackStatistics() 5228 void trackStatistics() const override {} 5229 5230 /// See AAValueSimplify::getAssumedSimplifiedValue() 5231 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5232 return SimplifiedAssociatedValue; 5233 } 5234 5235 /// Ensure the return value is \p V with type \p Ty, if not possible return 5236 /// nullptr. If \p Check is true we will only verify such an operation would 5237 /// suceed and return a non-nullptr value if that is the case. No IR is 5238 /// generated or modified. 5239 static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI, 5240 bool Check) { 5241 if (auto *TypedV = AA::getWithType(V, Ty)) 5242 return TypedV; 5243 if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty)) 5244 return Check ? &V 5245 : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(&V, &Ty, 5246 "", CtxI); 5247 return nullptr; 5248 } 5249 5250 /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble. 5251 /// If \p Check is true we will only verify such an operation would suceed and 5252 /// return a non-nullptr value if that is the case. No IR is generated or 5253 /// modified. 5254 static Value *reproduceInst(Attributor &A, 5255 const AbstractAttribute &QueryingAA, 5256 Instruction &I, Type &Ty, Instruction *CtxI, 5257 bool Check, ValueToValueMapTy &VMap) { 5258 assert(CtxI && "Cannot reproduce an instruction without context!"); 5259 if (Check && (I.mayReadFromMemory() || 5260 !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr, 5261 /* TLI */ nullptr))) 5262 return nullptr; 5263 for (Value *Op : I.operands()) { 5264 Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap); 5265 if (!NewOp) { 5266 assert(Check && "Manifest of new value unexpectedly failed!"); 5267 return nullptr; 5268 } 5269 if (!Check) 5270 VMap[Op] = NewOp; 5271 } 5272 if (Check) 5273 return &I; 5274 5275 Instruction *CloneI = I.clone(); 5276 VMap[&I] = CloneI; 5277 CloneI->insertBefore(CtxI); 5278 RemapInstruction(CloneI, VMap); 5279 return CloneI; 5280 } 5281 5282 /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble. 5283 /// If \p Check is true we will only verify such an operation would suceed and 5284 /// return a non-nullptr value if that is the case. No IR is generated or 5285 /// modified. 5286 static Value *reproduceValue(Attributor &A, 5287 const AbstractAttribute &QueryingAA, Value &V, 5288 Type &Ty, Instruction *CtxI, bool Check, 5289 ValueToValueMapTy &VMap) { 5290 if (const auto &NewV = VMap.lookup(&V)) 5291 return NewV; 5292 bool UsedAssumedInformation = false; 5293 Optional<Value *> SimpleV = 5294 A.getAssumedSimplified(V, QueryingAA, UsedAssumedInformation); 5295 if (!SimpleV.hasValue()) 5296 return PoisonValue::get(&Ty); 5297 Value *EffectiveV = &V; 5298 if (SimpleV.getValue()) 5299 EffectiveV = SimpleV.getValue(); 5300 if (auto *C = dyn_cast<Constant>(EffectiveV)) 5301 if (!C->canTrap()) 5302 return C; 5303 if (CtxI && AA::isValidAtPosition(*EffectiveV, *CtxI, A.getInfoCache())) 5304 return ensureType(A, *EffectiveV, Ty, CtxI, Check); 5305 if (auto *I = dyn_cast<Instruction>(EffectiveV)) 5306 if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap)) 5307 return ensureType(A, *NewV, Ty, CtxI, Check); 5308 return nullptr; 5309 } 5310 5311 /// Return a value we can use as replacement for the associated one, or 5312 /// nullptr if we don't have one that makes sense. 5313 Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const { 5314 Value *NewV = SimplifiedAssociatedValue.hasValue() 5315 ? SimplifiedAssociatedValue.getValue() 5316 : UndefValue::get(getAssociatedType()); 5317 if (NewV && NewV != &getAssociatedValue()) { 5318 ValueToValueMapTy VMap; 5319 // First verify we can reprduce the value with the required type at the 5320 // context location before we actually start modifying the IR. 5321 if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI, 5322 /* CheckOnly */ true, VMap)) 5323 return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI, 5324 /* CheckOnly */ false, VMap); 5325 } 5326 return nullptr; 5327 } 5328 5329 /// Helper function for querying AAValueSimplify and updating candicate. 5330 /// \param IRP The value position we are trying to unify with SimplifiedValue 5331 bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, 5332 const IRPosition &IRP, bool Simplify = true) { 5333 bool UsedAssumedInformation = false; 5334 Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue(); 5335 if (Simplify) 5336 QueryingValueSimplified = 5337 A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation); 5338 return unionAssumed(QueryingValueSimplified); 5339 } 5340 5341 /// Returns a candidate is found or not 5342 template <typename AAType> bool askSimplifiedValueFor(Attributor &A) { 5343 if (!getAssociatedValue().getType()->isIntegerTy()) 5344 return false; 5345 5346 // This will also pass the call base context. 5347 const auto &AA = 5348 A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE); 5349 5350 Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A); 5351 5352 if (!COpt.hasValue()) { 5353 SimplifiedAssociatedValue = llvm::None; 5354 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5355 return true; 5356 } 5357 if (auto *C = COpt.getValue()) { 5358 SimplifiedAssociatedValue = C; 5359 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5360 return true; 5361 } 5362 return false; 5363 } 5364 5365 bool askSimplifiedValueForOtherAAs(Attributor &A) { 5366 if (askSimplifiedValueFor<AAValueConstantRange>(A)) 5367 return true; 5368 if (askSimplifiedValueFor<AAPotentialValues>(A)) 5369 return true; 5370 return false; 5371 } 5372 5373 /// See AbstractAttribute::manifest(...). 5374 ChangeStatus manifest(Attributor &A) override { 5375 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5376 for (auto &U : getAssociatedValue().uses()) { 5377 // Check if we need to adjust the insertion point to make sure the IR is 5378 // valid. 5379 Instruction *IP = dyn_cast<Instruction>(U.getUser()); 5380 if (auto *PHI = dyn_cast_or_null<PHINode>(IP)) 5381 IP = PHI->getIncomingBlock(U)->getTerminator(); 5382 if (auto *NewV = manifestReplacementValue(A, IP)) { 5383 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() 5384 << " -> " << *NewV << " :: " << *this << "\n"); 5385 if (A.changeUseAfterManifest(U, *NewV)) 5386 Changed = ChangeStatus::CHANGED; 5387 } 5388 } 5389 5390 return Changed | AAValueSimplify::manifest(A); 5391 } 5392 5393 /// See AbstractState::indicatePessimisticFixpoint(...). 5394 ChangeStatus indicatePessimisticFixpoint() override { 5395 SimplifiedAssociatedValue = &getAssociatedValue(); 5396 return AAValueSimplify::indicatePessimisticFixpoint(); 5397 } 5398 5399 static bool handleLoad(Attributor &A, const AbstractAttribute &AA, 5400 LoadInst &L, function_ref<bool(Value &)> Union) { 5401 auto UnionWrapper = [&](Value &V, Value &Obj) { 5402 if (isa<AllocaInst>(Obj)) 5403 return Union(V); 5404 if (!AA::isDynamicallyUnique(A, AA, V)) 5405 return false; 5406 ValueToValueMapTy VMap; 5407 if (!reproduceValue(A, AA, V, *L.getType(), &L, /* CheckOnly */ true, 5408 VMap)) 5409 return false; 5410 return Union(V); 5411 }; 5412 5413 Value &Ptr = *L.getPointerOperand(); 5414 SmallVector<Value *, 8> Objects; 5415 bool UsedAssumedInformation = false; 5416 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L, 5417 UsedAssumedInformation)) 5418 return false; 5419 5420 const auto *TLI = 5421 A.getInfoCache().getTargetLibraryInfoForFunction(*L.getFunction()); 5422 for (Value *Obj : Objects) { 5423 LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n"); 5424 if (isa<UndefValue>(Obj)) 5425 continue; 5426 if (isa<ConstantPointerNull>(Obj)) { 5427 // A null pointer access can be undefined but any offset from null may 5428 // be OK. We do not try to optimize the latter. 5429 if (!NullPointerIsDefined(L.getFunction(), 5430 Ptr.getType()->getPointerAddressSpace()) && 5431 A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj) 5432 continue; 5433 return false; 5434 } 5435 Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType(), TLI); 5436 if (!InitialVal || !Union(*InitialVal)) 5437 return false; 5438 5439 LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store " 5440 "propagation, checking accesses next.\n"); 5441 5442 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) { 5443 LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n"); 5444 if (Acc.isWrittenValueYetUndetermined()) 5445 return true; 5446 Value *Content = Acc.getWrittenValue(); 5447 if (!Content) 5448 return false; 5449 Value *CastedContent = 5450 AA::getWithType(*Content, *AA.getAssociatedType()); 5451 if (!CastedContent) 5452 return false; 5453 if (IsExact) 5454 return UnionWrapper(*CastedContent, *Obj); 5455 if (auto *C = dyn_cast<Constant>(CastedContent)) 5456 if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C)) 5457 return UnionWrapper(*CastedContent, *Obj); 5458 return false; 5459 }; 5460 5461 auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj), 5462 DepClassTy::REQUIRED); 5463 if (!PI.forallInterferingAccesses(A, AA, L, CheckAccess)) 5464 return false; 5465 } 5466 return true; 5467 } 5468 }; 5469 5470 struct AAValueSimplifyArgument final : AAValueSimplifyImpl { 5471 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) 5472 : AAValueSimplifyImpl(IRP, A) {} 5473 5474 void initialize(Attributor &A) override { 5475 AAValueSimplifyImpl::initialize(A); 5476 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) 5477 indicatePessimisticFixpoint(); 5478 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated, 5479 Attribute::StructRet, Attribute::Nest, Attribute::ByVal}, 5480 /* IgnoreSubsumingPositions */ true)) 5481 indicatePessimisticFixpoint(); 5482 } 5483 5484 /// See AbstractAttribute::updateImpl(...). 5485 ChangeStatus updateImpl(Attributor &A) override { 5486 // Byval is only replacable if it is readonly otherwise we would write into 5487 // the replaced value and not the copy that byval creates implicitly. 5488 Argument *Arg = getAssociatedArgument(); 5489 if (Arg->hasByValAttr()) { 5490 // TODO: We probably need to verify synchronization is not an issue, e.g., 5491 // there is no race by not copying a constant byval. 5492 bool IsKnown; 5493 if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 5494 return indicatePessimisticFixpoint(); 5495 } 5496 5497 auto Before = SimplifiedAssociatedValue; 5498 5499 auto PredForCallSite = [&](AbstractCallSite ACS) { 5500 const IRPosition &ACSArgPos = 5501 IRPosition::callsite_argument(ACS, getCallSiteArgNo()); 5502 // Check if a coresponding argument was found or if it is on not 5503 // associated (which can happen for callback calls). 5504 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 5505 return false; 5506 5507 // Simplify the argument operand explicitly and check if the result is 5508 // valid in the current scope. This avoids refering to simplified values 5509 // in other functions, e.g., we don't want to say a an argument in a 5510 // static function is actually an argument in a different function. 5511 bool UsedAssumedInformation = false; 5512 Optional<Constant *> SimpleArgOp = 5513 A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation); 5514 if (!SimpleArgOp.hasValue()) 5515 return true; 5516 if (!SimpleArgOp.getValue()) 5517 return false; 5518 if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp)) 5519 return false; 5520 return unionAssumed(*SimpleArgOp); 5521 }; 5522 5523 // Generate a answer specific to a call site context. 5524 bool Success; 5525 bool UsedAssumedInformation = false; 5526 if (hasCallBaseContext() && 5527 getCallBaseContext()->getCalledFunction() == Arg->getParent()) 5528 Success = PredForCallSite( 5529 AbstractCallSite(&getCallBaseContext()->getCalledOperandUse())); 5530 else 5531 Success = A.checkForAllCallSites(PredForCallSite, *this, true, 5532 UsedAssumedInformation); 5533 5534 if (!Success) 5535 if (!askSimplifiedValueForOtherAAs(A)) 5536 return indicatePessimisticFixpoint(); 5537 5538 // If a candicate was found in this update, return CHANGED. 5539 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5540 : ChangeStatus ::CHANGED; 5541 } 5542 5543 /// See AbstractAttribute::trackStatistics() 5544 void trackStatistics() const override { 5545 STATS_DECLTRACK_ARG_ATTR(value_simplify) 5546 } 5547 }; 5548 5549 struct AAValueSimplifyReturned : AAValueSimplifyImpl { 5550 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) 5551 : AAValueSimplifyImpl(IRP, A) {} 5552 5553 /// See AAValueSimplify::getAssumedSimplifiedValue() 5554 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5555 if (!isValidState()) 5556 return nullptr; 5557 return SimplifiedAssociatedValue; 5558 } 5559 5560 /// See AbstractAttribute::updateImpl(...). 5561 ChangeStatus updateImpl(Attributor &A) override { 5562 auto Before = SimplifiedAssociatedValue; 5563 5564 auto ReturnInstCB = [&](Instruction &I) { 5565 auto &RI = cast<ReturnInst>(I); 5566 return checkAndUpdate( 5567 A, *this, 5568 IRPosition::value(*RI.getReturnValue(), getCallBaseContext())); 5569 }; 5570 5571 bool UsedAssumedInformation = false; 5572 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret}, 5573 UsedAssumedInformation)) 5574 if (!askSimplifiedValueForOtherAAs(A)) 5575 return indicatePessimisticFixpoint(); 5576 5577 // If a candicate was found in this update, return CHANGED. 5578 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5579 : ChangeStatus ::CHANGED; 5580 } 5581 5582 ChangeStatus manifest(Attributor &A) override { 5583 // We queried AAValueSimplify for the returned values so they will be 5584 // replaced if a simplified form was found. Nothing to do here. 5585 return ChangeStatus::UNCHANGED; 5586 } 5587 5588 /// See AbstractAttribute::trackStatistics() 5589 void trackStatistics() const override { 5590 STATS_DECLTRACK_FNRET_ATTR(value_simplify) 5591 } 5592 }; 5593 5594 struct AAValueSimplifyFloating : AAValueSimplifyImpl { 5595 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) 5596 : AAValueSimplifyImpl(IRP, A) {} 5597 5598 /// See AbstractAttribute::initialize(...). 5599 void initialize(Attributor &A) override { 5600 AAValueSimplifyImpl::initialize(A); 5601 Value &V = getAnchorValue(); 5602 5603 // TODO: add other stuffs 5604 if (isa<Constant>(V)) 5605 indicatePessimisticFixpoint(); 5606 } 5607 5608 /// Check if \p Cmp is a comparison we can simplify. 5609 /// 5610 /// We handle multiple cases, one in which at least one operand is an 5611 /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other 5612 /// operand. Return true if successful, in that case SimplifiedAssociatedValue 5613 /// will be updated. 5614 bool handleCmp(Attributor &A, CmpInst &Cmp) { 5615 auto Union = [&](Value &V) { 5616 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5617 SimplifiedAssociatedValue, &V, V.getType()); 5618 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5619 }; 5620 5621 Value *LHS = Cmp.getOperand(0); 5622 Value *RHS = Cmp.getOperand(1); 5623 5624 // Simplify the operands first. 5625 bool UsedAssumedInformation = false; 5626 const auto &SimplifiedLHS = 5627 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 5628 *this, UsedAssumedInformation); 5629 if (!SimplifiedLHS.hasValue()) 5630 return true; 5631 if (!SimplifiedLHS.getValue()) 5632 return false; 5633 LHS = *SimplifiedLHS; 5634 5635 const auto &SimplifiedRHS = 5636 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 5637 *this, UsedAssumedInformation); 5638 if (!SimplifiedRHS.hasValue()) 5639 return true; 5640 if (!SimplifiedRHS.getValue()) 5641 return false; 5642 RHS = *SimplifiedRHS; 5643 5644 LLVMContext &Ctx = Cmp.getContext(); 5645 // Handle the trivial case first in which we don't even need to think about 5646 // null or non-null. 5647 if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) { 5648 Constant *NewVal = 5649 ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual()); 5650 if (!Union(*NewVal)) 5651 return false; 5652 if (!UsedAssumedInformation) 5653 indicateOptimisticFixpoint(); 5654 return true; 5655 } 5656 5657 // From now on we only handle equalities (==, !=). 5658 ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp); 5659 if (!ICmp || !ICmp->isEquality()) 5660 return false; 5661 5662 bool LHSIsNull = isa<ConstantPointerNull>(LHS); 5663 bool RHSIsNull = isa<ConstantPointerNull>(RHS); 5664 if (!LHSIsNull && !RHSIsNull) 5665 return false; 5666 5667 // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the 5668 // non-nullptr operand and if we assume it's non-null we can conclude the 5669 // result of the comparison. 5670 assert((LHSIsNull || RHSIsNull) && 5671 "Expected nullptr versus non-nullptr comparison at this point"); 5672 5673 // The index is the operand that we assume is not null. 5674 unsigned PtrIdx = LHSIsNull; 5675 auto &PtrNonNullAA = A.getAAFor<AANonNull>( 5676 *this, IRPosition::value(*ICmp->getOperand(PtrIdx)), 5677 DepClassTy::REQUIRED); 5678 if (!PtrNonNullAA.isAssumedNonNull()) 5679 return false; 5680 UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull(); 5681 5682 // The new value depends on the predicate, true for != and false for ==. 5683 Constant *NewVal = ConstantInt::get( 5684 Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE); 5685 if (!Union(*NewVal)) 5686 return false; 5687 5688 if (!UsedAssumedInformation) 5689 indicateOptimisticFixpoint(); 5690 5691 return true; 5692 } 5693 5694 bool updateWithLoad(Attributor &A, LoadInst &L) { 5695 auto Union = [&](Value &V) { 5696 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5697 SimplifiedAssociatedValue, &V, L.getType()); 5698 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5699 }; 5700 return handleLoad(A, *this, L, Union); 5701 } 5702 5703 /// Use the generic, non-optimistic InstSimplfy functionality if we managed to 5704 /// simplify any operand of the instruction \p I. Return true if successful, 5705 /// in that case SimplifiedAssociatedValue will be updated. 5706 bool handleGenericInst(Attributor &A, Instruction &I) { 5707 bool SomeSimplified = false; 5708 bool UsedAssumedInformation = false; 5709 5710 SmallVector<Value *, 8> NewOps(I.getNumOperands()); 5711 int Idx = 0; 5712 for (Value *Op : I.operands()) { 5713 const auto &SimplifiedOp = 5714 A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()), 5715 *this, UsedAssumedInformation); 5716 // If we are not sure about any operand we are not sure about the entire 5717 // instruction, we'll wait. 5718 if (!SimplifiedOp.hasValue()) 5719 return true; 5720 5721 if (SimplifiedOp.getValue()) 5722 NewOps[Idx] = SimplifiedOp.getValue(); 5723 else 5724 NewOps[Idx] = Op; 5725 5726 SomeSimplified |= (NewOps[Idx] != Op); 5727 ++Idx; 5728 } 5729 5730 // We won't bother with the InstSimplify interface if we didn't simplify any 5731 // operand ourselves. 5732 if (!SomeSimplified) 5733 return false; 5734 5735 InformationCache &InfoCache = A.getInfoCache(); 5736 Function *F = I.getFunction(); 5737 const auto *DT = 5738 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 5739 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5740 auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 5741 OptimizationRemarkEmitter *ORE = nullptr; 5742 5743 const DataLayout &DL = I.getModule()->getDataLayout(); 5744 SimplifyQuery Q(DL, TLI, DT, AC, &I); 5745 if (Value *SimplifiedI = 5746 SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) { 5747 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5748 SimplifiedAssociatedValue, SimplifiedI, I.getType()); 5749 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5750 } 5751 return false; 5752 } 5753 5754 /// See AbstractAttribute::updateImpl(...). 5755 ChangeStatus updateImpl(Attributor &A) override { 5756 auto Before = SimplifiedAssociatedValue; 5757 5758 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &, 5759 bool Stripped) -> bool { 5760 auto &AA = A.getAAFor<AAValueSimplify>( 5761 *this, IRPosition::value(V, getCallBaseContext()), 5762 DepClassTy::REQUIRED); 5763 if (!Stripped && this == &AA) { 5764 5765 if (auto *I = dyn_cast<Instruction>(&V)) { 5766 if (auto *LI = dyn_cast<LoadInst>(&V)) 5767 if (updateWithLoad(A, *LI)) 5768 return true; 5769 if (auto *Cmp = dyn_cast<CmpInst>(&V)) 5770 if (handleCmp(A, *Cmp)) 5771 return true; 5772 if (handleGenericInst(A, *I)) 5773 return true; 5774 } 5775 // TODO: Look the instruction and check recursively. 5776 5777 LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V 5778 << "\n"); 5779 return false; 5780 } 5781 return checkAndUpdate(A, *this, 5782 IRPosition::value(V, getCallBaseContext())); 5783 }; 5784 5785 bool Dummy = false; 5786 bool UsedAssumedInformation = false; 5787 if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy, 5788 VisitValueCB, getCtxI(), 5789 UsedAssumedInformation, 5790 /* UseValueSimplify */ false)) 5791 if (!askSimplifiedValueForOtherAAs(A)) 5792 return indicatePessimisticFixpoint(); 5793 5794 // If a candicate was found in this update, return CHANGED. 5795 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5796 : ChangeStatus ::CHANGED; 5797 } 5798 5799 /// See AbstractAttribute::trackStatistics() 5800 void trackStatistics() const override { 5801 STATS_DECLTRACK_FLOATING_ATTR(value_simplify) 5802 } 5803 }; 5804 5805 struct AAValueSimplifyFunction : AAValueSimplifyImpl { 5806 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) 5807 : AAValueSimplifyImpl(IRP, A) {} 5808 5809 /// See AbstractAttribute::initialize(...). 5810 void initialize(Attributor &A) override { 5811 SimplifiedAssociatedValue = nullptr; 5812 indicateOptimisticFixpoint(); 5813 } 5814 /// See AbstractAttribute::initialize(...). 5815 ChangeStatus updateImpl(Attributor &A) override { 5816 llvm_unreachable( 5817 "AAValueSimplify(Function|CallSite)::updateImpl will not be called"); 5818 } 5819 /// See AbstractAttribute::trackStatistics() 5820 void trackStatistics() const override { 5821 STATS_DECLTRACK_FN_ATTR(value_simplify) 5822 } 5823 }; 5824 5825 struct AAValueSimplifyCallSite : AAValueSimplifyFunction { 5826 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) 5827 : AAValueSimplifyFunction(IRP, A) {} 5828 /// See AbstractAttribute::trackStatistics() 5829 void trackStatistics() const override { 5830 STATS_DECLTRACK_CS_ATTR(value_simplify) 5831 } 5832 }; 5833 5834 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl { 5835 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) 5836 : AAValueSimplifyImpl(IRP, A) {} 5837 5838 void initialize(Attributor &A) override { 5839 AAValueSimplifyImpl::initialize(A); 5840 Function *Fn = getAssociatedFunction(); 5841 if (!Fn) { 5842 indicatePessimisticFixpoint(); 5843 return; 5844 } 5845 for (Argument &Arg : Fn->args()) { 5846 if (Arg.hasReturnedAttr()) { 5847 auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()), 5848 Arg.getArgNo()); 5849 if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT && 5850 checkAndUpdate(A, *this, IRP)) 5851 indicateOptimisticFixpoint(); 5852 else 5853 indicatePessimisticFixpoint(); 5854 return; 5855 } 5856 } 5857 } 5858 5859 /// See AbstractAttribute::updateImpl(...). 5860 ChangeStatus updateImpl(Attributor &A) override { 5861 auto Before = SimplifiedAssociatedValue; 5862 auto &RetAA = A.getAAFor<AAReturnedValues>( 5863 *this, IRPosition::function(*getAssociatedFunction()), 5864 DepClassTy::REQUIRED); 5865 auto PredForReturned = 5866 [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 5867 bool UsedAssumedInformation = false; 5868 Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent( 5869 &RetVal, *cast<CallBase>(getCtxI()), *this, 5870 UsedAssumedInformation); 5871 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5872 SimplifiedAssociatedValue, CSRetVal, getAssociatedType()); 5873 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5874 }; 5875 if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned)) 5876 if (!askSimplifiedValueForOtherAAs(A)) 5877 return indicatePessimisticFixpoint(); 5878 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5879 : ChangeStatus ::CHANGED; 5880 } 5881 5882 void trackStatistics() const override { 5883 STATS_DECLTRACK_CSRET_ATTR(value_simplify) 5884 } 5885 }; 5886 5887 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { 5888 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) 5889 : AAValueSimplifyFloating(IRP, A) {} 5890 5891 /// See AbstractAttribute::manifest(...). 5892 ChangeStatus manifest(Attributor &A) override { 5893 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5894 5895 if (auto *NewV = manifestReplacementValue(A, getCtxI())) { 5896 Use &U = cast<CallBase>(&getAnchorValue()) 5897 ->getArgOperandUse(getCallSiteArgNo()); 5898 if (A.changeUseAfterManifest(U, *NewV)) 5899 Changed = ChangeStatus::CHANGED; 5900 } 5901 5902 return Changed | AAValueSimplify::manifest(A); 5903 } 5904 5905 void trackStatistics() const override { 5906 STATS_DECLTRACK_CSARG_ATTR(value_simplify) 5907 } 5908 }; 5909 } // namespace 5910 5911 /// ----------------------- Heap-To-Stack Conversion --------------------------- 5912 namespace { 5913 struct AAHeapToStackFunction final : public AAHeapToStack { 5914 5915 struct AllocationInfo { 5916 /// The call that allocates the memory. 5917 CallBase *const CB; 5918 5919 /// The library function id for the allocation. 5920 LibFunc LibraryFunctionId = NotLibFunc; 5921 5922 /// The status wrt. a rewrite. 5923 enum { 5924 STACK_DUE_TO_USE, 5925 STACK_DUE_TO_FREE, 5926 INVALID, 5927 } Status = STACK_DUE_TO_USE; 5928 5929 /// Flag to indicate if we encountered a use that might free this allocation 5930 /// but which is not in the deallocation infos. 5931 bool HasPotentiallyFreeingUnknownUses = false; 5932 5933 /// The set of free calls that use this allocation. 5934 SmallSetVector<CallBase *, 1> PotentialFreeCalls{}; 5935 }; 5936 5937 struct DeallocationInfo { 5938 /// The call that deallocates the memory. 5939 CallBase *const CB; 5940 5941 /// Flag to indicate if we don't know all objects this deallocation might 5942 /// free. 5943 bool MightFreeUnknownObjects = false; 5944 5945 /// The set of allocation calls that are potentially freed. 5946 SmallSetVector<CallBase *, 1> PotentialAllocationCalls{}; 5947 }; 5948 5949 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) 5950 : AAHeapToStack(IRP, A) {} 5951 5952 ~AAHeapToStackFunction() { 5953 // Ensure we call the destructor so we release any memory allocated in the 5954 // sets. 5955 for (auto &It : AllocationInfos) 5956 It.second->~AllocationInfo(); 5957 for (auto &It : DeallocationInfos) 5958 It.second->~DeallocationInfo(); 5959 } 5960 5961 void initialize(Attributor &A) override { 5962 AAHeapToStack::initialize(A); 5963 5964 const Function *F = getAnchorScope(); 5965 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5966 5967 auto AllocationIdentifierCB = [&](Instruction &I) { 5968 CallBase *CB = dyn_cast<CallBase>(&I); 5969 if (!CB) 5970 return true; 5971 if (isFreeCall(CB, TLI)) { 5972 DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB}; 5973 return true; 5974 } 5975 // To do heap to stack, we need to know that the allocation itself is 5976 // removable once uses are rewritten, and that we can initialize the 5977 // alloca to the same pattern as the original allocation result. 5978 if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) { 5979 auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext()); 5980 if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) { 5981 AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB}; 5982 AllocationInfos[CB] = AI; 5983 if (TLI) 5984 TLI->getLibFunc(*CB, AI->LibraryFunctionId); 5985 } 5986 } 5987 return true; 5988 }; 5989 5990 bool UsedAssumedInformation = false; 5991 bool Success = A.checkForAllCallLikeInstructions( 5992 AllocationIdentifierCB, *this, UsedAssumedInformation, 5993 /* CheckBBLivenessOnly */ false, 5994 /* CheckPotentiallyDead */ true); 5995 (void)Success; 5996 assert(Success && "Did not expect the call base visit callback to fail!"); 5997 5998 Attributor::SimplifictionCallbackTy SCB = 5999 [](const IRPosition &, const AbstractAttribute *, 6000 bool &) -> Optional<Value *> { return nullptr; }; 6001 for (const auto &It : AllocationInfos) 6002 A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first), 6003 SCB); 6004 for (const auto &It : DeallocationInfos) 6005 A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first), 6006 SCB); 6007 } 6008 6009 const std::string getAsStr() const override { 6010 unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0; 6011 for (const auto &It : AllocationInfos) { 6012 if (It.second->Status == AllocationInfo::INVALID) 6013 ++NumInvalidMallocs; 6014 else 6015 ++NumH2SMallocs; 6016 } 6017 return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" + 6018 std::to_string(NumInvalidMallocs); 6019 } 6020 6021 /// See AbstractAttribute::trackStatistics(). 6022 void trackStatistics() const override { 6023 STATS_DECL( 6024 MallocCalls, Function, 6025 "Number of malloc/calloc/aligned_alloc calls converted to allocas"); 6026 for (auto &It : AllocationInfos) 6027 if (It.second->Status != AllocationInfo::INVALID) 6028 ++BUILD_STAT_NAME(MallocCalls, Function); 6029 } 6030 6031 bool isAssumedHeapToStack(const CallBase &CB) const override { 6032 if (isValidState()) 6033 if (AllocationInfo *AI = 6034 AllocationInfos.lookup(const_cast<CallBase *>(&CB))) 6035 return AI->Status != AllocationInfo::INVALID; 6036 return false; 6037 } 6038 6039 bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override { 6040 if (!isValidState()) 6041 return false; 6042 6043 for (auto &It : AllocationInfos) { 6044 AllocationInfo &AI = *It.second; 6045 if (AI.Status == AllocationInfo::INVALID) 6046 continue; 6047 6048 if (AI.PotentialFreeCalls.count(&CB)) 6049 return true; 6050 } 6051 6052 return false; 6053 } 6054 6055 ChangeStatus manifest(Attributor &A) override { 6056 assert(getState().isValidState() && 6057 "Attempted to manifest an invalid state!"); 6058 6059 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 6060 Function *F = getAnchorScope(); 6061 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 6062 6063 for (auto &It : AllocationInfos) { 6064 AllocationInfo &AI = *It.second; 6065 if (AI.Status == AllocationInfo::INVALID) 6066 continue; 6067 6068 for (CallBase *FreeCall : AI.PotentialFreeCalls) { 6069 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); 6070 A.deleteAfterManifest(*FreeCall); 6071 HasChanged = ChangeStatus::CHANGED; 6072 } 6073 6074 LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB 6075 << "\n"); 6076 6077 auto Remark = [&](OptimizationRemark OR) { 6078 LibFunc IsAllocShared; 6079 if (TLI->getLibFunc(*AI.CB, IsAllocShared)) 6080 if (IsAllocShared == LibFunc___kmpc_alloc_shared) 6081 return OR << "Moving globalized variable to the stack."; 6082 return OR << "Moving memory allocation from the heap to the stack."; 6083 }; 6084 if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 6085 A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark); 6086 else 6087 A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark); 6088 6089 const DataLayout &DL = A.getInfoCache().getDL(); 6090 Value *Size; 6091 Optional<APInt> SizeAPI = getSize(A, *this, AI); 6092 if (SizeAPI.hasValue()) { 6093 Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI); 6094 } else { 6095 LLVMContext &Ctx = AI.CB->getContext(); 6096 ObjectSizeOpts Opts; 6097 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts); 6098 SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB); 6099 assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() && 6100 cast<ConstantInt>(SizeOffsetPair.second)->isZero()); 6101 Size = SizeOffsetPair.first; 6102 } 6103 6104 Align Alignment(1); 6105 if (MaybeAlign RetAlign = AI.CB->getRetAlign()) 6106 Alignment = max(Alignment, RetAlign); 6107 if (Value *Align = getAllocAlignment(AI.CB, TLI)) { 6108 Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align); 6109 assert(AlignmentAPI.hasValue() && 6110 "Expected an alignment during manifest!"); 6111 Alignment = 6112 max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue())); 6113 } 6114 6115 // TODO: Hoist the alloca towards the function entry. 6116 unsigned AS = DL.getAllocaAddrSpace(); 6117 Instruction *Alloca = new AllocaInst(Type::getInt8Ty(F->getContext()), AS, 6118 Size, Alignment, "", AI.CB); 6119 6120 if (Alloca->getType() != AI.CB->getType()) 6121 Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 6122 Alloca, AI.CB->getType(), "malloc_cast", AI.CB); 6123 6124 auto *I8Ty = Type::getInt8Ty(F->getContext()); 6125 auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty); 6126 assert(InitVal && 6127 "Must be able to materialize initial memory state of allocation"); 6128 6129 A.changeValueAfterManifest(*AI.CB, *Alloca); 6130 6131 if (auto *II = dyn_cast<InvokeInst>(AI.CB)) { 6132 auto *NBB = II->getNormalDest(); 6133 BranchInst::Create(NBB, AI.CB->getParent()); 6134 A.deleteAfterManifest(*AI.CB); 6135 } else { 6136 A.deleteAfterManifest(*AI.CB); 6137 } 6138 6139 // Initialize the alloca with the same value as used by the allocation 6140 // function. We can skip undef as the initial value of an alloc is 6141 // undef, and the memset would simply end up being DSEd. 6142 if (!isa<UndefValue>(InitVal)) { 6143 IRBuilder<> Builder(Alloca->getNextNode()); 6144 // TODO: Use alignment above if align!=1 6145 Builder.CreateMemSet(Alloca, InitVal, Size, None); 6146 } 6147 HasChanged = ChangeStatus::CHANGED; 6148 } 6149 6150 return HasChanged; 6151 } 6152 6153 Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA, 6154 Value &V) { 6155 bool UsedAssumedInformation = false; 6156 Optional<Constant *> SimpleV = 6157 A.getAssumedConstant(V, AA, UsedAssumedInformation); 6158 if (!SimpleV.hasValue()) 6159 return APInt(64, 0); 6160 if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue())) 6161 return CI->getValue(); 6162 return llvm::None; 6163 } 6164 6165 Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA, 6166 AllocationInfo &AI) { 6167 auto Mapper = [&](const Value *V) -> const Value * { 6168 bool UsedAssumedInformation = false; 6169 if (Optional<Constant *> SimpleV = 6170 A.getAssumedConstant(*V, AA, UsedAssumedInformation)) 6171 if (*SimpleV) 6172 return *SimpleV; 6173 return V; 6174 }; 6175 6176 const Function *F = getAnchorScope(); 6177 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 6178 return getAllocSize(AI.CB, TLI, Mapper); 6179 } 6180 6181 /// Collection of all malloc-like calls in a function with associated 6182 /// information. 6183 MapVector<CallBase *, AllocationInfo *> AllocationInfos; 6184 6185 /// Collection of all free-like calls in a function with associated 6186 /// information. 6187 MapVector<CallBase *, DeallocationInfo *> DeallocationInfos; 6188 6189 ChangeStatus updateImpl(Attributor &A) override; 6190 }; 6191 6192 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) { 6193 ChangeStatus Changed = ChangeStatus::UNCHANGED; 6194 const Function *F = getAnchorScope(); 6195 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 6196 6197 const auto &LivenessAA = 6198 A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE); 6199 6200 MustBeExecutedContextExplorer &Explorer = 6201 A.getInfoCache().getMustBeExecutedContextExplorer(); 6202 6203 bool StackIsAccessibleByOtherThreads = 6204 A.getInfoCache().stackIsAccessibleByOtherThreads(); 6205 6206 // Flag to ensure we update our deallocation information at most once per 6207 // updateImpl call and only if we use the free check reasoning. 6208 bool HasUpdatedFrees = false; 6209 6210 auto UpdateFrees = [&]() { 6211 HasUpdatedFrees = true; 6212 6213 for (auto &It : DeallocationInfos) { 6214 DeallocationInfo &DI = *It.second; 6215 // For now we cannot use deallocations that have unknown inputs, skip 6216 // them. 6217 if (DI.MightFreeUnknownObjects) 6218 continue; 6219 6220 // No need to analyze dead calls, ignore them instead. 6221 bool UsedAssumedInformation = false; 6222 if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation, 6223 /* CheckBBLivenessOnly */ true)) 6224 continue; 6225 6226 // Use the optimistic version to get the freed objects, ignoring dead 6227 // branches etc. 6228 SmallVector<Value *, 8> Objects; 6229 if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects, 6230 *this, DI.CB, 6231 UsedAssumedInformation)) { 6232 LLVM_DEBUG( 6233 dbgs() 6234 << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n"); 6235 DI.MightFreeUnknownObjects = true; 6236 continue; 6237 } 6238 6239 // Check each object explicitly. 6240 for (auto *Obj : Objects) { 6241 // Free of null and undef can be ignored as no-ops (or UB in the latter 6242 // case). 6243 if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj)) 6244 continue; 6245 6246 CallBase *ObjCB = dyn_cast<CallBase>(Obj); 6247 if (!ObjCB) { 6248 LLVM_DEBUG(dbgs() 6249 << "[H2S] Free of a non-call object: " << *Obj << "\n"); 6250 DI.MightFreeUnknownObjects = true; 6251 continue; 6252 } 6253 6254 AllocationInfo *AI = AllocationInfos.lookup(ObjCB); 6255 if (!AI) { 6256 LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj 6257 << "\n"); 6258 DI.MightFreeUnknownObjects = true; 6259 continue; 6260 } 6261 6262 DI.PotentialAllocationCalls.insert(ObjCB); 6263 } 6264 } 6265 }; 6266 6267 auto FreeCheck = [&](AllocationInfo &AI) { 6268 // If the stack is not accessible by other threads, the "must-free" logic 6269 // doesn't apply as the pointer could be shared and needs to be places in 6270 // "shareable" memory. 6271 if (!StackIsAccessibleByOtherThreads) { 6272 auto &NoSyncAA = 6273 A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL); 6274 if (!NoSyncAA.isAssumedNoSync()) { 6275 LLVM_DEBUG( 6276 dbgs() << "[H2S] found an escaping use, stack is not accessible by " 6277 "other threads and function is not nosync:\n"); 6278 return false; 6279 } 6280 } 6281 if (!HasUpdatedFrees) 6282 UpdateFrees(); 6283 6284 // TODO: Allow multi exit functions that have different free calls. 6285 if (AI.PotentialFreeCalls.size() != 1) { 6286 LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but " 6287 << AI.PotentialFreeCalls.size() << "\n"); 6288 return false; 6289 } 6290 CallBase *UniqueFree = *AI.PotentialFreeCalls.begin(); 6291 DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree); 6292 if (!DI) { 6293 LLVM_DEBUG( 6294 dbgs() << "[H2S] unique free call was not known as deallocation call " 6295 << *UniqueFree << "\n"); 6296 return false; 6297 } 6298 if (DI->MightFreeUnknownObjects) { 6299 LLVM_DEBUG( 6300 dbgs() << "[H2S] unique free call might free unknown allocations\n"); 6301 return false; 6302 } 6303 if (DI->PotentialAllocationCalls.size() > 1) { 6304 LLVM_DEBUG(dbgs() << "[H2S] unique free call might free " 6305 << DI->PotentialAllocationCalls.size() 6306 << " different allocations\n"); 6307 return false; 6308 } 6309 if (*DI->PotentialAllocationCalls.begin() != AI.CB) { 6310 LLVM_DEBUG( 6311 dbgs() 6312 << "[H2S] unique free call not known to free this allocation but " 6313 << **DI->PotentialAllocationCalls.begin() << "\n"); 6314 return false; 6315 } 6316 Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode(); 6317 if (!Explorer.findInContextOf(UniqueFree, CtxI)) { 6318 LLVM_DEBUG( 6319 dbgs() 6320 << "[H2S] unique free call might not be executed with the allocation " 6321 << *UniqueFree << "\n"); 6322 return false; 6323 } 6324 return true; 6325 }; 6326 6327 auto UsesCheck = [&](AllocationInfo &AI) { 6328 bool ValidUsesOnly = true; 6329 6330 auto Pred = [&](const Use &U, bool &Follow) -> bool { 6331 Instruction *UserI = cast<Instruction>(U.getUser()); 6332 if (isa<LoadInst>(UserI)) 6333 return true; 6334 if (auto *SI = dyn_cast<StoreInst>(UserI)) { 6335 if (SI->getValueOperand() == U.get()) { 6336 LLVM_DEBUG(dbgs() 6337 << "[H2S] escaping store to memory: " << *UserI << "\n"); 6338 ValidUsesOnly = false; 6339 } else { 6340 // A store into the malloc'ed memory is fine. 6341 } 6342 return true; 6343 } 6344 if (auto *CB = dyn_cast<CallBase>(UserI)) { 6345 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) 6346 return true; 6347 if (DeallocationInfos.count(CB)) { 6348 AI.PotentialFreeCalls.insert(CB); 6349 return true; 6350 } 6351 6352 unsigned ArgNo = CB->getArgOperandNo(&U); 6353 6354 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 6355 *this, IRPosition::callsite_argument(*CB, ArgNo), 6356 DepClassTy::OPTIONAL); 6357 6358 // If a call site argument use is nofree, we are fine. 6359 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( 6360 *this, IRPosition::callsite_argument(*CB, ArgNo), 6361 DepClassTy::OPTIONAL); 6362 6363 bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture(); 6364 bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree(); 6365 if (MaybeCaptured || 6366 (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared && 6367 MaybeFreed)) { 6368 AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed; 6369 6370 // Emit a missed remark if this is missed OpenMP globalization. 6371 auto Remark = [&](OptimizationRemarkMissed ORM) { 6372 return ORM 6373 << "Could not move globalized variable to the stack. " 6374 "Variable is potentially captured in call. Mark " 6375 "parameter as `__attribute__((noescape))` to override."; 6376 }; 6377 6378 if (ValidUsesOnly && 6379 AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 6380 A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark); 6381 6382 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n"); 6383 ValidUsesOnly = false; 6384 } 6385 return true; 6386 } 6387 6388 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 6389 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 6390 Follow = true; 6391 return true; 6392 } 6393 // Unknown user for which we can not track uses further (in a way that 6394 // makes sense). 6395 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n"); 6396 ValidUsesOnly = false; 6397 return true; 6398 }; 6399 if (!A.checkForAllUses(Pred, *this, *AI.CB)) 6400 return false; 6401 return ValidUsesOnly; 6402 }; 6403 6404 // The actual update starts here. We look at all allocations and depending on 6405 // their status perform the appropriate check(s). 6406 for (auto &It : AllocationInfos) { 6407 AllocationInfo &AI = *It.second; 6408 if (AI.Status == AllocationInfo::INVALID) 6409 continue; 6410 6411 if (Value *Align = getAllocAlignment(AI.CB, TLI)) { 6412 Optional<APInt> APAlign = getAPInt(A, *this, *Align); 6413 if (!APAlign) { 6414 // Can't generate an alloca which respects the required alignment 6415 // on the allocation. 6416 LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB 6417 << "\n"); 6418 AI.Status = AllocationInfo::INVALID; 6419 Changed = ChangeStatus::CHANGED; 6420 continue; 6421 } else { 6422 if (APAlign->ugt(llvm::Value::MaximumAlignment) || 6423 !APAlign->isPowerOf2()) { 6424 LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign 6425 << "\n"); 6426 AI.Status = AllocationInfo::INVALID; 6427 Changed = ChangeStatus::CHANGED; 6428 continue; 6429 } 6430 } 6431 } 6432 6433 if (MaxHeapToStackSize != -1) { 6434 Optional<APInt> Size = getSize(A, *this, AI); 6435 if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) { 6436 LLVM_DEBUG({ 6437 if (!Size.hasValue()) 6438 dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n"; 6439 else 6440 dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " 6441 << MaxHeapToStackSize << "\n"; 6442 }); 6443 6444 AI.Status = AllocationInfo::INVALID; 6445 Changed = ChangeStatus::CHANGED; 6446 continue; 6447 } 6448 } 6449 6450 switch (AI.Status) { 6451 case AllocationInfo::STACK_DUE_TO_USE: 6452 if (UsesCheck(AI)) 6453 continue; 6454 AI.Status = AllocationInfo::STACK_DUE_TO_FREE; 6455 LLVM_FALLTHROUGH; 6456 case AllocationInfo::STACK_DUE_TO_FREE: 6457 if (FreeCheck(AI)) 6458 continue; 6459 AI.Status = AllocationInfo::INVALID; 6460 Changed = ChangeStatus::CHANGED; 6461 continue; 6462 case AllocationInfo::INVALID: 6463 llvm_unreachable("Invalid allocations should never reach this point!"); 6464 }; 6465 } 6466 6467 return Changed; 6468 } 6469 } // namespace 6470 6471 /// ----------------------- Privatizable Pointers ------------------------------ 6472 namespace { 6473 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { 6474 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) 6475 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} 6476 6477 ChangeStatus indicatePessimisticFixpoint() override { 6478 AAPrivatizablePtr::indicatePessimisticFixpoint(); 6479 PrivatizableType = nullptr; 6480 return ChangeStatus::CHANGED; 6481 } 6482 6483 /// Identify the type we can chose for a private copy of the underlying 6484 /// argument. None means it is not clear yet, nullptr means there is none. 6485 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; 6486 6487 /// Return a privatizable type that encloses both T0 and T1. 6488 /// TODO: This is merely a stub for now as we should manage a mapping as well. 6489 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { 6490 if (!T0.hasValue()) 6491 return T1; 6492 if (!T1.hasValue()) 6493 return T0; 6494 if (T0 == T1) 6495 return T0; 6496 return nullptr; 6497 } 6498 6499 Optional<Type *> getPrivatizableType() const override { 6500 return PrivatizableType; 6501 } 6502 6503 const std::string getAsStr() const override { 6504 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; 6505 } 6506 6507 protected: 6508 Optional<Type *> PrivatizableType; 6509 }; 6510 6511 // TODO: Do this for call site arguments (probably also other values) as well. 6512 6513 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { 6514 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) 6515 : AAPrivatizablePtrImpl(IRP, A) {} 6516 6517 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6518 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6519 // If this is a byval argument and we know all the call sites (so we can 6520 // rewrite them), there is no need to check them explicitly. 6521 bool UsedAssumedInformation = false; 6522 SmallVector<Attribute, 1> Attrs; 6523 getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true); 6524 if (!Attrs.empty() && 6525 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, 6526 true, UsedAssumedInformation)) 6527 return Attrs[0].getValueAsType(); 6528 6529 Optional<Type *> Ty; 6530 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 6531 6532 // Make sure the associated call site argument has the same type at all call 6533 // sites and it is an allocation we know is safe to privatize, for now that 6534 // means we only allow alloca instructions. 6535 // TODO: We can additionally analyze the accesses in the callee to create 6536 // the type from that information instead. That is a little more 6537 // involved and will be done in a follow up patch. 6538 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6539 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 6540 // Check if a coresponding argument was found or if it is one not 6541 // associated (which can happen for callback calls). 6542 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 6543 return false; 6544 6545 // Check that all call sites agree on a type. 6546 auto &PrivCSArgAA = 6547 A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED); 6548 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); 6549 6550 LLVM_DEBUG({ 6551 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; 6552 if (CSTy.hasValue() && CSTy.getValue()) 6553 CSTy.getValue()->print(dbgs()); 6554 else if (CSTy.hasValue()) 6555 dbgs() << "<nullptr>"; 6556 else 6557 dbgs() << "<none>"; 6558 }); 6559 6560 Ty = combineTypes(Ty, CSTy); 6561 6562 LLVM_DEBUG({ 6563 dbgs() << " : New Type: "; 6564 if (Ty.hasValue() && Ty.getValue()) 6565 Ty.getValue()->print(dbgs()); 6566 else if (Ty.hasValue()) 6567 dbgs() << "<nullptr>"; 6568 else 6569 dbgs() << "<none>"; 6570 dbgs() << "\n"; 6571 }); 6572 6573 return !Ty.hasValue() || Ty.getValue(); 6574 }; 6575 6576 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, 6577 UsedAssumedInformation)) 6578 return nullptr; 6579 return Ty; 6580 } 6581 6582 /// See AbstractAttribute::updateImpl(...). 6583 ChangeStatus updateImpl(Attributor &A) override { 6584 PrivatizableType = identifyPrivatizableType(A); 6585 if (!PrivatizableType.hasValue()) 6586 return ChangeStatus::UNCHANGED; 6587 if (!PrivatizableType.getValue()) 6588 return indicatePessimisticFixpoint(); 6589 6590 // The dependence is optional so we don't give up once we give up on the 6591 // alignment. 6592 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), 6593 DepClassTy::OPTIONAL); 6594 6595 // Avoid arguments with padding for now. 6596 if (!getIRPosition().hasAttr(Attribute::ByVal) && 6597 !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(), 6598 A.getInfoCache().getDL())) { 6599 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n"); 6600 return indicatePessimisticFixpoint(); 6601 } 6602 6603 // Collect the types that will replace the privatizable type in the function 6604 // signature. 6605 SmallVector<Type *, 16> ReplacementTypes; 6606 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 6607 6608 // Verify callee and caller agree on how the promoted argument would be 6609 // passed. 6610 Function &Fn = *getIRPosition().getAnchorScope(); 6611 const auto *TTI = 6612 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); 6613 if (!TTI) { 6614 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function " 6615 << Fn.getName() << "\n"); 6616 return indicatePessimisticFixpoint(); 6617 } 6618 6619 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6620 CallBase *CB = ACS.getInstruction(); 6621 return TTI->areTypesABICompatible( 6622 CB->getCaller(), CB->getCalledFunction(), ReplacementTypes); 6623 }; 6624 bool UsedAssumedInformation = false; 6625 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, 6626 UsedAssumedInformation)) { 6627 LLVM_DEBUG( 6628 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " 6629 << Fn.getName() << "\n"); 6630 return indicatePessimisticFixpoint(); 6631 } 6632 6633 // Register a rewrite of the argument. 6634 Argument *Arg = getAssociatedArgument(); 6635 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { 6636 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"); 6637 return indicatePessimisticFixpoint(); 6638 } 6639 6640 unsigned ArgNo = Arg->getArgNo(); 6641 6642 // Helper to check if for the given call site the associated argument is 6643 // passed to a callback where the privatization would be different. 6644 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { 6645 SmallVector<const Use *, 4> CallbackUses; 6646 AbstractCallSite::getCallbackUses(CB, CallbackUses); 6647 for (const Use *U : CallbackUses) { 6648 AbstractCallSite CBACS(U); 6649 assert(CBACS && CBACS.isCallbackCall()); 6650 for (Argument &CBArg : CBACS.getCalledFunction()->args()) { 6651 int CBArgNo = CBACS.getCallArgOperandNo(CBArg); 6652 6653 LLVM_DEBUG({ 6654 dbgs() 6655 << "[AAPrivatizablePtr] Argument " << *Arg 6656 << "check if can be privatized in the context of its parent (" 6657 << Arg->getParent()->getName() 6658 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6659 "callback (" 6660 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6661 << ")\n[AAPrivatizablePtr] " << CBArg << " : " 6662 << CBACS.getCallArgOperand(CBArg) << " vs " 6663 << CB.getArgOperand(ArgNo) << "\n" 6664 << "[AAPrivatizablePtr] " << CBArg << " : " 6665 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; 6666 }); 6667 6668 if (CBArgNo != int(ArgNo)) 6669 continue; 6670 const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6671 *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED); 6672 if (CBArgPrivAA.isValidState()) { 6673 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); 6674 if (!CBArgPrivTy.hasValue()) 6675 continue; 6676 if (CBArgPrivTy.getValue() == PrivatizableType) 6677 continue; 6678 } 6679 6680 LLVM_DEBUG({ 6681 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6682 << " cannot be privatized in the context of its parent (" 6683 << Arg->getParent()->getName() 6684 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6685 "callback (" 6686 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6687 << ").\n[AAPrivatizablePtr] for which the argument " 6688 "privatization is not compatible.\n"; 6689 }); 6690 return false; 6691 } 6692 } 6693 return true; 6694 }; 6695 6696 // Helper to check if for the given call site the associated argument is 6697 // passed to a direct call where the privatization would be different. 6698 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { 6699 CallBase *DC = cast<CallBase>(ACS.getInstruction()); 6700 int DCArgNo = ACS.getCallArgOperandNo(ArgNo); 6701 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() && 6702 "Expected a direct call operand for callback call operand"); 6703 6704 LLVM_DEBUG({ 6705 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6706 << " check if be privatized in the context of its parent (" 6707 << Arg->getParent()->getName() 6708 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6709 "direct call of (" 6710 << DCArgNo << "@" << DC->getCalledFunction()->getName() 6711 << ").\n"; 6712 }); 6713 6714 Function *DCCallee = DC->getCalledFunction(); 6715 if (unsigned(DCArgNo) < DCCallee->arg_size()) { 6716 const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6717 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)), 6718 DepClassTy::REQUIRED); 6719 if (DCArgPrivAA.isValidState()) { 6720 auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); 6721 if (!DCArgPrivTy.hasValue()) 6722 return true; 6723 if (DCArgPrivTy.getValue() == PrivatizableType) 6724 return true; 6725 } 6726 } 6727 6728 LLVM_DEBUG({ 6729 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6730 << " cannot be privatized in the context of its parent (" 6731 << Arg->getParent()->getName() 6732 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6733 "direct call of (" 6734 << ACS.getInstruction()->getCalledFunction()->getName() 6735 << ").\n[AAPrivatizablePtr] for which the argument " 6736 "privatization is not compatible.\n"; 6737 }); 6738 return false; 6739 }; 6740 6741 // Helper to check if the associated argument is used at the given abstract 6742 // call site in a way that is incompatible with the privatization assumed 6743 // here. 6744 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { 6745 if (ACS.isDirectCall()) 6746 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); 6747 if (ACS.isCallbackCall()) 6748 return IsCompatiblePrivArgOfDirectCS(ACS); 6749 return false; 6750 }; 6751 6752 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true, 6753 UsedAssumedInformation)) 6754 return indicatePessimisticFixpoint(); 6755 6756 return ChangeStatus::UNCHANGED; 6757 } 6758 6759 /// Given a type to private \p PrivType, collect the constituates (which are 6760 /// used) in \p ReplacementTypes. 6761 static void 6762 identifyReplacementTypes(Type *PrivType, 6763 SmallVectorImpl<Type *> &ReplacementTypes) { 6764 // TODO: For now we expand the privatization type to the fullest which can 6765 // lead to dead arguments that need to be removed later. 6766 assert(PrivType && "Expected privatizable type!"); 6767 6768 // Traverse the type, extract constituate types on the outermost level. 6769 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6770 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) 6771 ReplacementTypes.push_back(PrivStructType->getElementType(u)); 6772 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6773 ReplacementTypes.append(PrivArrayType->getNumElements(), 6774 PrivArrayType->getElementType()); 6775 } else { 6776 ReplacementTypes.push_back(PrivType); 6777 } 6778 } 6779 6780 /// Initialize \p Base according to the type \p PrivType at position \p IP. 6781 /// The values needed are taken from the arguments of \p F starting at 6782 /// position \p ArgNo. 6783 static void createInitialization(Type *PrivType, Value &Base, Function &F, 6784 unsigned ArgNo, Instruction &IP) { 6785 assert(PrivType && "Expected privatizable type!"); 6786 6787 IRBuilder<NoFolder> IRB(&IP); 6788 const DataLayout &DL = F.getParent()->getDataLayout(); 6789 6790 // Traverse the type, build GEPs and stores. 6791 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6792 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6793 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6794 Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo(); 6795 Value *Ptr = 6796 constructPointer(PointeeTy, PrivType, &Base, 6797 PrivStructLayout->getElementOffset(u), IRB, DL); 6798 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6799 } 6800 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6801 Type *PointeeTy = PrivArrayType->getElementType(); 6802 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6803 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6804 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6805 Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base, 6806 u * PointeeTySize, IRB, DL); 6807 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6808 } 6809 } else { 6810 new StoreInst(F.getArg(ArgNo), &Base, &IP); 6811 } 6812 } 6813 6814 /// Extract values from \p Base according to the type \p PrivType at the 6815 /// call position \p ACS. The values are appended to \p ReplacementValues. 6816 void createReplacementValues(Align Alignment, Type *PrivType, 6817 AbstractCallSite ACS, Value *Base, 6818 SmallVectorImpl<Value *> &ReplacementValues) { 6819 assert(Base && "Expected base value!"); 6820 assert(PrivType && "Expected privatizable type!"); 6821 Instruction *IP = ACS.getInstruction(); 6822 6823 IRBuilder<NoFolder> IRB(IP); 6824 const DataLayout &DL = IP->getModule()->getDataLayout(); 6825 6826 Type *PrivPtrType = PrivType->getPointerTo(); 6827 if (Base->getType() != PrivPtrType) 6828 Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 6829 Base, PrivPtrType, "", ACS.getInstruction()); 6830 6831 // Traverse the type, build GEPs and loads. 6832 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6833 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6834 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6835 Type *PointeeTy = PrivStructType->getElementType(u); 6836 Value *Ptr = 6837 constructPointer(PointeeTy->getPointerTo(), PrivType, Base, 6838 PrivStructLayout->getElementOffset(u), IRB, DL); 6839 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6840 L->setAlignment(Alignment); 6841 ReplacementValues.push_back(L); 6842 } 6843 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6844 Type *PointeeTy = PrivArrayType->getElementType(); 6845 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6846 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6847 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6848 Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base, 6849 u * PointeeTySize, IRB, DL); 6850 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6851 L->setAlignment(Alignment); 6852 ReplacementValues.push_back(L); 6853 } 6854 } else { 6855 LoadInst *L = new LoadInst(PrivType, Base, "", IP); 6856 L->setAlignment(Alignment); 6857 ReplacementValues.push_back(L); 6858 } 6859 } 6860 6861 /// See AbstractAttribute::manifest(...) 6862 ChangeStatus manifest(Attributor &A) override { 6863 if (!PrivatizableType.hasValue()) 6864 return ChangeStatus::UNCHANGED; 6865 assert(PrivatizableType.getValue() && "Expected privatizable type!"); 6866 6867 // Collect all tail calls in the function as we cannot allow new allocas to 6868 // escape into tail recursion. 6869 // TODO: Be smarter about new allocas escaping into tail calls. 6870 SmallVector<CallInst *, 16> TailCalls; 6871 bool UsedAssumedInformation = false; 6872 if (!A.checkForAllInstructions( 6873 [&](Instruction &I) { 6874 CallInst &CI = cast<CallInst>(I); 6875 if (CI.isTailCall()) 6876 TailCalls.push_back(&CI); 6877 return true; 6878 }, 6879 *this, {Instruction::Call}, UsedAssumedInformation)) 6880 return ChangeStatus::UNCHANGED; 6881 6882 Argument *Arg = getAssociatedArgument(); 6883 // Query AAAlign attribute for alignment of associated argument to 6884 // determine the best alignment of loads. 6885 const auto &AlignAA = 6886 A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE); 6887 6888 // Callback to repair the associated function. A new alloca is placed at the 6889 // beginning and initialized with the values passed through arguments. The 6890 // new alloca replaces the use of the old pointer argument. 6891 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = 6892 [=](const Attributor::ArgumentReplacementInfo &ARI, 6893 Function &ReplacementFn, Function::arg_iterator ArgIt) { 6894 BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); 6895 Instruction *IP = &*EntryBB.getFirstInsertionPt(); 6896 const DataLayout &DL = IP->getModule()->getDataLayout(); 6897 unsigned AS = DL.getAllocaAddrSpace(); 6898 Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS, 6899 Arg->getName() + ".priv", IP); 6900 createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn, 6901 ArgIt->getArgNo(), *IP); 6902 6903 if (AI->getType() != Arg->getType()) 6904 AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 6905 AI, Arg->getType(), "", IP); 6906 Arg->replaceAllUsesWith(AI); 6907 6908 for (CallInst *CI : TailCalls) 6909 CI->setTailCall(false); 6910 }; 6911 6912 // Callback to repair a call site of the associated function. The elements 6913 // of the privatizable type are loaded prior to the call and passed to the 6914 // new function version. 6915 Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = 6916 [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI, 6917 AbstractCallSite ACS, 6918 SmallVectorImpl<Value *> &NewArgOperands) { 6919 // When no alignment is specified for the load instruction, 6920 // natural alignment is assumed. 6921 createReplacementValues( 6922 assumeAligned(AlignAA.getAssumedAlign()), 6923 PrivatizableType.getValue(), ACS, 6924 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()), 6925 NewArgOperands); 6926 }; 6927 6928 // Collect the types that will replace the privatizable type in the function 6929 // signature. 6930 SmallVector<Type *, 16> ReplacementTypes; 6931 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 6932 6933 // Register a rewrite of the argument. 6934 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes, 6935 std::move(FnRepairCB), 6936 std::move(ACSRepairCB))) 6937 return ChangeStatus::CHANGED; 6938 return ChangeStatus::UNCHANGED; 6939 } 6940 6941 /// See AbstractAttribute::trackStatistics() 6942 void trackStatistics() const override { 6943 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr); 6944 } 6945 }; 6946 6947 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { 6948 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) 6949 : AAPrivatizablePtrImpl(IRP, A) {} 6950 6951 /// See AbstractAttribute::initialize(...). 6952 virtual void initialize(Attributor &A) override { 6953 // TODO: We can privatize more than arguments. 6954 indicatePessimisticFixpoint(); 6955 } 6956 6957 ChangeStatus updateImpl(Attributor &A) override { 6958 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" 6959 "updateImpl will not be called"); 6960 } 6961 6962 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6963 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6964 Value *Obj = getUnderlyingObject(&getAssociatedValue()); 6965 if (!Obj) { 6966 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n"); 6967 return nullptr; 6968 } 6969 6970 if (auto *AI = dyn_cast<AllocaInst>(Obj)) 6971 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 6972 if (CI->isOne()) 6973 return AI->getAllocatedType(); 6974 if (auto *Arg = dyn_cast<Argument>(Obj)) { 6975 auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>( 6976 *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED); 6977 if (PrivArgAA.isAssumedPrivatizablePtr()) 6978 return PrivArgAA.getPrivatizableType(); 6979 } 6980 6981 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " 6982 "alloca nor privatizable argument: " 6983 << *Obj << "!\n"); 6984 return nullptr; 6985 } 6986 6987 /// See AbstractAttribute::trackStatistics() 6988 void trackStatistics() const override { 6989 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr); 6990 } 6991 }; 6992 6993 struct AAPrivatizablePtrCallSiteArgument final 6994 : public AAPrivatizablePtrFloating { 6995 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) 6996 : AAPrivatizablePtrFloating(IRP, A) {} 6997 6998 /// See AbstractAttribute::initialize(...). 6999 void initialize(Attributor &A) override { 7000 if (getIRPosition().hasAttr(Attribute::ByVal)) 7001 indicateOptimisticFixpoint(); 7002 } 7003 7004 /// See AbstractAttribute::updateImpl(...). 7005 ChangeStatus updateImpl(Attributor &A) override { 7006 PrivatizableType = identifyPrivatizableType(A); 7007 if (!PrivatizableType.hasValue()) 7008 return ChangeStatus::UNCHANGED; 7009 if (!PrivatizableType.getValue()) 7010 return indicatePessimisticFixpoint(); 7011 7012 const IRPosition &IRP = getIRPosition(); 7013 auto &NoCaptureAA = 7014 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED); 7015 if (!NoCaptureAA.isAssumedNoCapture()) { 7016 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n"); 7017 return indicatePessimisticFixpoint(); 7018 } 7019 7020 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED); 7021 if (!NoAliasAA.isAssumedNoAlias()) { 7022 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n"); 7023 return indicatePessimisticFixpoint(); 7024 } 7025 7026 bool IsKnown; 7027 if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) { 7028 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n"); 7029 return indicatePessimisticFixpoint(); 7030 } 7031 7032 return ChangeStatus::UNCHANGED; 7033 } 7034 7035 /// See AbstractAttribute::trackStatistics() 7036 void trackStatistics() const override { 7037 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr); 7038 } 7039 }; 7040 7041 struct AAPrivatizablePtrCallSiteReturned final 7042 : public AAPrivatizablePtrFloating { 7043 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) 7044 : AAPrivatizablePtrFloating(IRP, A) {} 7045 7046 /// See AbstractAttribute::initialize(...). 7047 void initialize(Attributor &A) override { 7048 // TODO: We can privatize more than arguments. 7049 indicatePessimisticFixpoint(); 7050 } 7051 7052 /// See AbstractAttribute::trackStatistics() 7053 void trackStatistics() const override { 7054 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr); 7055 } 7056 }; 7057 7058 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { 7059 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) 7060 : AAPrivatizablePtrFloating(IRP, A) {} 7061 7062 /// See AbstractAttribute::initialize(...). 7063 void initialize(Attributor &A) override { 7064 // TODO: We can privatize more than arguments. 7065 indicatePessimisticFixpoint(); 7066 } 7067 7068 /// See AbstractAttribute::trackStatistics() 7069 void trackStatistics() const override { 7070 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr); 7071 } 7072 }; 7073 } // namespace 7074 7075 /// -------------------- Memory Behavior Attributes ---------------------------- 7076 /// Includes read-none, read-only, and write-only. 7077 /// ---------------------------------------------------------------------------- 7078 namespace { 7079 struct AAMemoryBehaviorImpl : public AAMemoryBehavior { 7080 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) 7081 : AAMemoryBehavior(IRP, A) {} 7082 7083 /// See AbstractAttribute::initialize(...). 7084 void initialize(Attributor &A) override { 7085 intersectAssumedBits(BEST_STATE); 7086 getKnownStateFromValue(getIRPosition(), getState()); 7087 AAMemoryBehavior::initialize(A); 7088 } 7089 7090 /// Return the memory behavior information encoded in the IR for \p IRP. 7091 static void getKnownStateFromValue(const IRPosition &IRP, 7092 BitIntegerState &State, 7093 bool IgnoreSubsumingPositions = false) { 7094 SmallVector<Attribute, 2> Attrs; 7095 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 7096 for (const Attribute &Attr : Attrs) { 7097 switch (Attr.getKindAsEnum()) { 7098 case Attribute::ReadNone: 7099 State.addKnownBits(NO_ACCESSES); 7100 break; 7101 case Attribute::ReadOnly: 7102 State.addKnownBits(NO_WRITES); 7103 break; 7104 case Attribute::WriteOnly: 7105 State.addKnownBits(NO_READS); 7106 break; 7107 default: 7108 llvm_unreachable("Unexpected attribute!"); 7109 } 7110 } 7111 7112 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) { 7113 if (!I->mayReadFromMemory()) 7114 State.addKnownBits(NO_READS); 7115 if (!I->mayWriteToMemory()) 7116 State.addKnownBits(NO_WRITES); 7117 } 7118 } 7119 7120 /// See AbstractAttribute::getDeducedAttributes(...). 7121 void getDeducedAttributes(LLVMContext &Ctx, 7122 SmallVectorImpl<Attribute> &Attrs) const override { 7123 assert(Attrs.size() == 0); 7124 if (isAssumedReadNone()) 7125 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 7126 else if (isAssumedReadOnly()) 7127 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly)); 7128 else if (isAssumedWriteOnly()) 7129 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly)); 7130 assert(Attrs.size() <= 1); 7131 } 7132 7133 /// See AbstractAttribute::manifest(...). 7134 ChangeStatus manifest(Attributor &A) override { 7135 if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true)) 7136 return ChangeStatus::UNCHANGED; 7137 7138 const IRPosition &IRP = getIRPosition(); 7139 7140 // Check if we would improve the existing attributes first. 7141 SmallVector<Attribute, 4> DeducedAttrs; 7142 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 7143 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 7144 return IRP.hasAttr(Attr.getKindAsEnum(), 7145 /* IgnoreSubsumingPositions */ true); 7146 })) 7147 return ChangeStatus::UNCHANGED; 7148 7149 // Clear existing attributes. 7150 IRP.removeAttrs(AttrKinds); 7151 7152 // Use the generic manifest method. 7153 return IRAttribute::manifest(A); 7154 } 7155 7156 /// See AbstractState::getAsStr(). 7157 const std::string getAsStr() const override { 7158 if (isAssumedReadNone()) 7159 return "readnone"; 7160 if (isAssumedReadOnly()) 7161 return "readonly"; 7162 if (isAssumedWriteOnly()) 7163 return "writeonly"; 7164 return "may-read/write"; 7165 } 7166 7167 /// The set of IR attributes AAMemoryBehavior deals with. 7168 static const Attribute::AttrKind AttrKinds[3]; 7169 }; 7170 7171 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { 7172 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; 7173 7174 /// Memory behavior attribute for a floating value. 7175 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { 7176 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) 7177 : AAMemoryBehaviorImpl(IRP, A) {} 7178 7179 /// See AbstractAttribute::updateImpl(...). 7180 ChangeStatus updateImpl(Attributor &A) override; 7181 7182 /// See AbstractAttribute::trackStatistics() 7183 void trackStatistics() const override { 7184 if (isAssumedReadNone()) 7185 STATS_DECLTRACK_FLOATING_ATTR(readnone) 7186 else if (isAssumedReadOnly()) 7187 STATS_DECLTRACK_FLOATING_ATTR(readonly) 7188 else if (isAssumedWriteOnly()) 7189 STATS_DECLTRACK_FLOATING_ATTR(writeonly) 7190 } 7191 7192 private: 7193 /// Return true if users of \p UserI might access the underlying 7194 /// variable/location described by \p U and should therefore be analyzed. 7195 bool followUsersOfUseIn(Attributor &A, const Use &U, 7196 const Instruction *UserI); 7197 7198 /// Update the state according to the effect of use \p U in \p UserI. 7199 void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI); 7200 }; 7201 7202 /// Memory behavior attribute for function argument. 7203 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { 7204 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) 7205 : AAMemoryBehaviorFloating(IRP, A) {} 7206 7207 /// See AbstractAttribute::initialize(...). 7208 void initialize(Attributor &A) override { 7209 intersectAssumedBits(BEST_STATE); 7210 const IRPosition &IRP = getIRPosition(); 7211 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we 7212 // can query it when we use has/getAttr. That would allow us to reuse the 7213 // initialize of the base class here. 7214 bool HasByVal = 7215 IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true); 7216 getKnownStateFromValue(IRP, getState(), 7217 /* IgnoreSubsumingPositions */ HasByVal); 7218 7219 // Initialize the use vector with all direct uses of the associated value. 7220 Argument *Arg = getAssociatedArgument(); 7221 if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) 7222 indicatePessimisticFixpoint(); 7223 } 7224 7225 ChangeStatus manifest(Attributor &A) override { 7226 // TODO: Pointer arguments are not supported on vectors of pointers yet. 7227 if (!getAssociatedValue().getType()->isPointerTy()) 7228 return ChangeStatus::UNCHANGED; 7229 7230 // TODO: From readattrs.ll: "inalloca parameters are always 7231 // considered written" 7232 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) { 7233 removeKnownBits(NO_WRITES); 7234 removeAssumedBits(NO_WRITES); 7235 } 7236 return AAMemoryBehaviorFloating::manifest(A); 7237 } 7238 7239 /// See AbstractAttribute::trackStatistics() 7240 void trackStatistics() const override { 7241 if (isAssumedReadNone()) 7242 STATS_DECLTRACK_ARG_ATTR(readnone) 7243 else if (isAssumedReadOnly()) 7244 STATS_DECLTRACK_ARG_ATTR(readonly) 7245 else if (isAssumedWriteOnly()) 7246 STATS_DECLTRACK_ARG_ATTR(writeonly) 7247 } 7248 }; 7249 7250 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { 7251 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) 7252 : AAMemoryBehaviorArgument(IRP, A) {} 7253 7254 /// See AbstractAttribute::initialize(...). 7255 void initialize(Attributor &A) override { 7256 // If we don't have an associated attribute this is either a variadic call 7257 // or an indirect call, either way, nothing to do here. 7258 Argument *Arg = getAssociatedArgument(); 7259 if (!Arg) { 7260 indicatePessimisticFixpoint(); 7261 return; 7262 } 7263 if (Arg->hasByValAttr()) { 7264 addKnownBits(NO_WRITES); 7265 removeKnownBits(NO_READS); 7266 removeAssumedBits(NO_READS); 7267 } 7268 AAMemoryBehaviorArgument::initialize(A); 7269 if (getAssociatedFunction()->isDeclaration()) 7270 indicatePessimisticFixpoint(); 7271 } 7272 7273 /// See AbstractAttribute::updateImpl(...). 7274 ChangeStatus updateImpl(Attributor &A) override { 7275 // TODO: Once we have call site specific value information we can provide 7276 // call site specific liveness liveness information and then it makes 7277 // sense to specialize attributes for call sites arguments instead of 7278 // redirecting requests to the callee argument. 7279 Argument *Arg = getAssociatedArgument(); 7280 const IRPosition &ArgPos = IRPosition::argument(*Arg); 7281 auto &ArgAA = 7282 A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED); 7283 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 7284 } 7285 7286 /// See AbstractAttribute::trackStatistics() 7287 void trackStatistics() const override { 7288 if (isAssumedReadNone()) 7289 STATS_DECLTRACK_CSARG_ATTR(readnone) 7290 else if (isAssumedReadOnly()) 7291 STATS_DECLTRACK_CSARG_ATTR(readonly) 7292 else if (isAssumedWriteOnly()) 7293 STATS_DECLTRACK_CSARG_ATTR(writeonly) 7294 } 7295 }; 7296 7297 /// Memory behavior attribute for a call site return position. 7298 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { 7299 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) 7300 : AAMemoryBehaviorFloating(IRP, A) {} 7301 7302 /// See AbstractAttribute::initialize(...). 7303 void initialize(Attributor &A) override { 7304 AAMemoryBehaviorImpl::initialize(A); 7305 Function *F = getAssociatedFunction(); 7306 if (!F || F->isDeclaration()) 7307 indicatePessimisticFixpoint(); 7308 } 7309 7310 /// See AbstractAttribute::manifest(...). 7311 ChangeStatus manifest(Attributor &A) override { 7312 // We do not annotate returned values. 7313 return ChangeStatus::UNCHANGED; 7314 } 7315 7316 /// See AbstractAttribute::trackStatistics() 7317 void trackStatistics() const override {} 7318 }; 7319 7320 /// An AA to represent the memory behavior function attributes. 7321 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { 7322 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) 7323 : AAMemoryBehaviorImpl(IRP, A) {} 7324 7325 /// See AbstractAttribute::updateImpl(Attributor &A). 7326 virtual ChangeStatus updateImpl(Attributor &A) override; 7327 7328 /// See AbstractAttribute::manifest(...). 7329 ChangeStatus manifest(Attributor &A) override { 7330 Function &F = cast<Function>(getAnchorValue()); 7331 if (isAssumedReadNone()) { 7332 F.removeFnAttr(Attribute::ArgMemOnly); 7333 F.removeFnAttr(Attribute::InaccessibleMemOnly); 7334 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); 7335 } 7336 return AAMemoryBehaviorImpl::manifest(A); 7337 } 7338 7339 /// See AbstractAttribute::trackStatistics() 7340 void trackStatistics() const override { 7341 if (isAssumedReadNone()) 7342 STATS_DECLTRACK_FN_ATTR(readnone) 7343 else if (isAssumedReadOnly()) 7344 STATS_DECLTRACK_FN_ATTR(readonly) 7345 else if (isAssumedWriteOnly()) 7346 STATS_DECLTRACK_FN_ATTR(writeonly) 7347 } 7348 }; 7349 7350 /// AAMemoryBehavior attribute for call sites. 7351 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl { 7352 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) 7353 : AAMemoryBehaviorImpl(IRP, A) {} 7354 7355 /// See AbstractAttribute::initialize(...). 7356 void initialize(Attributor &A) override { 7357 AAMemoryBehaviorImpl::initialize(A); 7358 Function *F = getAssociatedFunction(); 7359 if (!F || F->isDeclaration()) 7360 indicatePessimisticFixpoint(); 7361 } 7362 7363 /// See AbstractAttribute::updateImpl(...). 7364 ChangeStatus updateImpl(Attributor &A) override { 7365 // TODO: Once we have call site specific value information we can provide 7366 // call site specific liveness liveness information and then it makes 7367 // sense to specialize attributes for call sites arguments instead of 7368 // redirecting requests to the callee argument. 7369 Function *F = getAssociatedFunction(); 7370 const IRPosition &FnPos = IRPosition::function(*F); 7371 auto &FnAA = 7372 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED); 7373 return clampStateAndIndicateChange(getState(), FnAA.getState()); 7374 } 7375 7376 /// See AbstractAttribute::trackStatistics() 7377 void trackStatistics() const override { 7378 if (isAssumedReadNone()) 7379 STATS_DECLTRACK_CS_ATTR(readnone) 7380 else if (isAssumedReadOnly()) 7381 STATS_DECLTRACK_CS_ATTR(readonly) 7382 else if (isAssumedWriteOnly()) 7383 STATS_DECLTRACK_CS_ATTR(writeonly) 7384 } 7385 }; 7386 7387 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { 7388 7389 // The current assumed state used to determine a change. 7390 auto AssumedState = getAssumed(); 7391 7392 auto CheckRWInst = [&](Instruction &I) { 7393 // If the instruction has an own memory behavior state, use it to restrict 7394 // the local state. No further analysis is required as the other memory 7395 // state is as optimistic as it gets. 7396 if (const auto *CB = dyn_cast<CallBase>(&I)) { 7397 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 7398 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 7399 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7400 return !isAtFixpoint(); 7401 } 7402 7403 // Remove access kind modifiers if necessary. 7404 if (I.mayReadFromMemory()) 7405 removeAssumedBits(NO_READS); 7406 if (I.mayWriteToMemory()) 7407 removeAssumedBits(NO_WRITES); 7408 return !isAtFixpoint(); 7409 }; 7410 7411 bool UsedAssumedInformation = false; 7412 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 7413 UsedAssumedInformation)) 7414 return indicatePessimisticFixpoint(); 7415 7416 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7417 : ChangeStatus::UNCHANGED; 7418 } 7419 7420 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { 7421 7422 const IRPosition &IRP = getIRPosition(); 7423 const IRPosition &FnPos = IRPosition::function_scope(IRP); 7424 AAMemoryBehavior::StateType &S = getState(); 7425 7426 // First, check the function scope. We take the known information and we avoid 7427 // work if the assumed information implies the current assumed information for 7428 // this attribute. This is a valid for all but byval arguments. 7429 Argument *Arg = IRP.getAssociatedArgument(); 7430 AAMemoryBehavior::base_t FnMemAssumedState = 7431 AAMemoryBehavior::StateType::getWorstState(); 7432 if (!Arg || !Arg->hasByValAttr()) { 7433 const auto &FnMemAA = 7434 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL); 7435 FnMemAssumedState = FnMemAA.getAssumed(); 7436 S.addKnownBits(FnMemAA.getKnown()); 7437 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed()) 7438 return ChangeStatus::UNCHANGED; 7439 } 7440 7441 // The current assumed state used to determine a change. 7442 auto AssumedState = S.getAssumed(); 7443 7444 // Make sure the value is not captured (except through "return"), if 7445 // it is, any information derived would be irrelevant anyway as we cannot 7446 // check the potential aliases introduced by the capture. However, no need 7447 // to fall back to anythign less optimistic than the function state. 7448 const auto &ArgNoCaptureAA = 7449 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL); 7450 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 7451 S.intersectAssumedBits(FnMemAssumedState); 7452 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7453 : ChangeStatus::UNCHANGED; 7454 } 7455 7456 // Visit and expand uses until all are analyzed or a fixpoint is reached. 7457 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 7458 Instruction *UserI = cast<Instruction>(U.getUser()); 7459 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI 7460 << " \n"); 7461 7462 // Droppable users, e.g., llvm::assume does not actually perform any action. 7463 if (UserI->isDroppable()) 7464 return true; 7465 7466 // Check if the users of UserI should also be visited. 7467 Follow = followUsersOfUseIn(A, U, UserI); 7468 7469 // If UserI might touch memory we analyze the use in detail. 7470 if (UserI->mayReadOrWriteMemory()) 7471 analyzeUseIn(A, U, UserI); 7472 7473 return !isAtFixpoint(); 7474 }; 7475 7476 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) 7477 return indicatePessimisticFixpoint(); 7478 7479 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7480 : ChangeStatus::UNCHANGED; 7481 } 7482 7483 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U, 7484 const Instruction *UserI) { 7485 // The loaded value is unrelated to the pointer argument, no need to 7486 // follow the users of the load. 7487 if (isa<LoadInst>(UserI)) 7488 return false; 7489 7490 // By default we follow all uses assuming UserI might leak information on U, 7491 // we have special handling for call sites operands though. 7492 const auto *CB = dyn_cast<CallBase>(UserI); 7493 if (!CB || !CB->isArgOperand(&U)) 7494 return true; 7495 7496 // If the use is a call argument known not to be captured, the users of 7497 // the call do not need to be visited because they have to be unrelated to 7498 // the input. Note that this check is not trivial even though we disallow 7499 // general capturing of the underlying argument. The reason is that the 7500 // call might the argument "through return", which we allow and for which we 7501 // need to check call users. 7502 if (U.get()->getType()->isPointerTy()) { 7503 unsigned ArgNo = CB->getArgOperandNo(&U); 7504 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 7505 *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL); 7506 return !ArgNoCaptureAA.isAssumedNoCapture(); 7507 } 7508 7509 return true; 7510 } 7511 7512 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U, 7513 const Instruction *UserI) { 7514 assert(UserI->mayReadOrWriteMemory()); 7515 7516 switch (UserI->getOpcode()) { 7517 default: 7518 // TODO: Handle all atomics and other side-effect operations we know of. 7519 break; 7520 case Instruction::Load: 7521 // Loads cause the NO_READS property to disappear. 7522 removeAssumedBits(NO_READS); 7523 return; 7524 7525 case Instruction::Store: 7526 // Stores cause the NO_WRITES property to disappear if the use is the 7527 // pointer operand. Note that while capturing was taken care of somewhere 7528 // else we need to deal with stores of the value that is not looked through. 7529 if (cast<StoreInst>(UserI)->getPointerOperand() == U.get()) 7530 removeAssumedBits(NO_WRITES); 7531 else 7532 indicatePessimisticFixpoint(); 7533 return; 7534 7535 case Instruction::Call: 7536 case Instruction::CallBr: 7537 case Instruction::Invoke: { 7538 // For call sites we look at the argument memory behavior attribute (this 7539 // could be recursive!) in order to restrict our own state. 7540 const auto *CB = cast<CallBase>(UserI); 7541 7542 // Give up on operand bundles. 7543 if (CB->isBundleOperand(&U)) { 7544 indicatePessimisticFixpoint(); 7545 return; 7546 } 7547 7548 // Calling a function does read the function pointer, maybe write it if the 7549 // function is self-modifying. 7550 if (CB->isCallee(&U)) { 7551 removeAssumedBits(NO_READS); 7552 break; 7553 } 7554 7555 // Adjust the possible access behavior based on the information on the 7556 // argument. 7557 IRPosition Pos; 7558 if (U.get()->getType()->isPointerTy()) 7559 Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)); 7560 else 7561 Pos = IRPosition::callsite_function(*CB); 7562 const auto &MemBehaviorAA = 7563 A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL); 7564 // "assumed" has at most the same bits as the MemBehaviorAA assumed 7565 // and at least "known". 7566 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7567 return; 7568 } 7569 }; 7570 7571 // Generally, look at the "may-properties" and adjust the assumed state if we 7572 // did not trigger special handling before. 7573 if (UserI->mayReadFromMemory()) 7574 removeAssumedBits(NO_READS); 7575 if (UserI->mayWriteToMemory()) 7576 removeAssumedBits(NO_WRITES); 7577 } 7578 } // namespace 7579 7580 /// -------------------- Memory Locations Attributes --------------------------- 7581 /// Includes read-none, argmemonly, inaccessiblememonly, 7582 /// inaccessiblememorargmemonly 7583 /// ---------------------------------------------------------------------------- 7584 7585 std::string AAMemoryLocation::getMemoryLocationsAsStr( 7586 AAMemoryLocation::MemoryLocationsKind MLK) { 7587 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) 7588 return "all memory"; 7589 if (MLK == AAMemoryLocation::NO_LOCATIONS) 7590 return "no memory"; 7591 std::string S = "memory:"; 7592 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) 7593 S += "stack,"; 7594 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) 7595 S += "constant,"; 7596 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) 7597 S += "internal global,"; 7598 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) 7599 S += "external global,"; 7600 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) 7601 S += "argument,"; 7602 if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) 7603 S += "inaccessible,"; 7604 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) 7605 S += "malloced,"; 7606 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) 7607 S += "unknown,"; 7608 S.pop_back(); 7609 return S; 7610 } 7611 7612 namespace { 7613 struct AAMemoryLocationImpl : public AAMemoryLocation { 7614 7615 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) 7616 : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { 7617 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7618 AccessKind2Accesses[u] = nullptr; 7619 } 7620 7621 ~AAMemoryLocationImpl() { 7622 // The AccessSets are allocated via a BumpPtrAllocator, we call 7623 // the destructor manually. 7624 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7625 if (AccessKind2Accesses[u]) 7626 AccessKind2Accesses[u]->~AccessSet(); 7627 } 7628 7629 /// See AbstractAttribute::initialize(...). 7630 void initialize(Attributor &A) override { 7631 intersectAssumedBits(BEST_STATE); 7632 getKnownStateFromValue(A, getIRPosition(), getState()); 7633 AAMemoryLocation::initialize(A); 7634 } 7635 7636 /// Return the memory behavior information encoded in the IR for \p IRP. 7637 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, 7638 BitIntegerState &State, 7639 bool IgnoreSubsumingPositions = false) { 7640 // For internal functions we ignore `argmemonly` and 7641 // `inaccessiblememorargmemonly` as we might break it via interprocedural 7642 // constant propagation. It is unclear if this is the best way but it is 7643 // unlikely this will cause real performance problems. If we are deriving 7644 // attributes for the anchor function we even remove the attribute in 7645 // addition to ignoring it. 7646 bool UseArgMemOnly = true; 7647 Function *AnchorFn = IRP.getAnchorScope(); 7648 if (AnchorFn && A.isRunOn(*AnchorFn)) 7649 UseArgMemOnly = !AnchorFn->hasLocalLinkage(); 7650 7651 SmallVector<Attribute, 2> Attrs; 7652 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 7653 for (const Attribute &Attr : Attrs) { 7654 switch (Attr.getKindAsEnum()) { 7655 case Attribute::ReadNone: 7656 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM); 7657 break; 7658 case Attribute::InaccessibleMemOnly: 7659 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 7660 break; 7661 case Attribute::ArgMemOnly: 7662 if (UseArgMemOnly) 7663 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true)); 7664 else 7665 IRP.removeAttrs({Attribute::ArgMemOnly}); 7666 break; 7667 case Attribute::InaccessibleMemOrArgMemOnly: 7668 if (UseArgMemOnly) 7669 State.addKnownBits(inverseLocation( 7670 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 7671 else 7672 IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly}); 7673 break; 7674 default: 7675 llvm_unreachable("Unexpected attribute!"); 7676 } 7677 } 7678 } 7679 7680 /// See AbstractAttribute::getDeducedAttributes(...). 7681 void getDeducedAttributes(LLVMContext &Ctx, 7682 SmallVectorImpl<Attribute> &Attrs) const override { 7683 assert(Attrs.size() == 0); 7684 if (isAssumedReadNone()) { 7685 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 7686 } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { 7687 if (isAssumedInaccessibleMemOnly()) 7688 Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly)); 7689 else if (isAssumedArgMemOnly()) 7690 Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly)); 7691 else if (isAssumedInaccessibleOrArgMemOnly()) 7692 Attrs.push_back( 7693 Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly)); 7694 } 7695 assert(Attrs.size() <= 1); 7696 } 7697 7698 /// See AbstractAttribute::manifest(...). 7699 ChangeStatus manifest(Attributor &A) override { 7700 const IRPosition &IRP = getIRPosition(); 7701 7702 // Check if we would improve the existing attributes first. 7703 SmallVector<Attribute, 4> DeducedAttrs; 7704 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 7705 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 7706 return IRP.hasAttr(Attr.getKindAsEnum(), 7707 /* IgnoreSubsumingPositions */ true); 7708 })) 7709 return ChangeStatus::UNCHANGED; 7710 7711 // Clear existing attributes. 7712 IRP.removeAttrs(AttrKinds); 7713 if (isAssumedReadNone()) 7714 IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds); 7715 7716 // Use the generic manifest method. 7717 return IRAttribute::manifest(A); 7718 } 7719 7720 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). 7721 bool checkForAllAccessesToMemoryKind( 7722 function_ref<bool(const Instruction *, const Value *, AccessKind, 7723 MemoryLocationsKind)> 7724 Pred, 7725 MemoryLocationsKind RequestedMLK) const override { 7726 if (!isValidState()) 7727 return false; 7728 7729 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); 7730 if (AssumedMLK == NO_LOCATIONS) 7731 return true; 7732 7733 unsigned Idx = 0; 7734 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; 7735 CurMLK *= 2, ++Idx) { 7736 if (CurMLK & RequestedMLK) 7737 continue; 7738 7739 if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) 7740 for (const AccessInfo &AI : *Accesses) 7741 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) 7742 return false; 7743 } 7744 7745 return true; 7746 } 7747 7748 ChangeStatus indicatePessimisticFixpoint() override { 7749 // If we give up and indicate a pessimistic fixpoint this instruction will 7750 // become an access for all potential access kinds: 7751 // TODO: Add pointers for argmemonly and globals to improve the results of 7752 // checkForAllAccessesToMemoryKind. 7753 bool Changed = false; 7754 MemoryLocationsKind KnownMLK = getKnown(); 7755 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 7756 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) 7757 if (!(CurMLK & KnownMLK)) 7758 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed, 7759 getAccessKindFromInst(I)); 7760 return AAMemoryLocation::indicatePessimisticFixpoint(); 7761 } 7762 7763 protected: 7764 /// Helper struct to tie together an instruction that has a read or write 7765 /// effect with the pointer it accesses (if any). 7766 struct AccessInfo { 7767 7768 /// The instruction that caused the access. 7769 const Instruction *I; 7770 7771 /// The base pointer that is accessed, or null if unknown. 7772 const Value *Ptr; 7773 7774 /// The kind of access (read/write/read+write). 7775 AccessKind Kind; 7776 7777 bool operator==(const AccessInfo &RHS) const { 7778 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; 7779 } 7780 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { 7781 if (LHS.I != RHS.I) 7782 return LHS.I < RHS.I; 7783 if (LHS.Ptr != RHS.Ptr) 7784 return LHS.Ptr < RHS.Ptr; 7785 if (LHS.Kind != RHS.Kind) 7786 return LHS.Kind < RHS.Kind; 7787 return false; 7788 } 7789 }; 7790 7791 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the 7792 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. 7793 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; 7794 AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()]; 7795 7796 /// Categorize the pointer arguments of CB that might access memory in 7797 /// AccessedLoc and update the state and access map accordingly. 7798 void 7799 categorizeArgumentPointerLocations(Attributor &A, CallBase &CB, 7800 AAMemoryLocation::StateType &AccessedLocs, 7801 bool &Changed); 7802 7803 /// Return the kind(s) of location that may be accessed by \p V. 7804 AAMemoryLocation::MemoryLocationsKind 7805 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); 7806 7807 /// Return the access kind as determined by \p I. 7808 AccessKind getAccessKindFromInst(const Instruction *I) { 7809 AccessKind AK = READ_WRITE; 7810 if (I) { 7811 AK = I->mayReadFromMemory() ? READ : NONE; 7812 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); 7813 } 7814 return AK; 7815 } 7816 7817 /// Update the state \p State and the AccessKind2Accesses given that \p I is 7818 /// an access of kind \p AK to a \p MLK memory location with the access 7819 /// pointer \p Ptr. 7820 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, 7821 MemoryLocationsKind MLK, const Instruction *I, 7822 const Value *Ptr, bool &Changed, 7823 AccessKind AK = READ_WRITE) { 7824 7825 assert(isPowerOf2_32(MLK) && "Expected a single location set!"); 7826 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)]; 7827 if (!Accesses) 7828 Accesses = new (Allocator) AccessSet(); 7829 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second; 7830 State.removeAssumedBits(MLK); 7831 } 7832 7833 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or 7834 /// arguments, and update the state and access map accordingly. 7835 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, 7836 AAMemoryLocation::StateType &State, bool &Changed); 7837 7838 /// Used to allocate access sets. 7839 BumpPtrAllocator &Allocator; 7840 7841 /// The set of IR attributes AAMemoryLocation deals with. 7842 static const Attribute::AttrKind AttrKinds[4]; 7843 }; 7844 7845 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = { 7846 Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly, 7847 Attribute::InaccessibleMemOrArgMemOnly}; 7848 7849 void AAMemoryLocationImpl::categorizePtrValue( 7850 Attributor &A, const Instruction &I, const Value &Ptr, 7851 AAMemoryLocation::StateType &State, bool &Changed) { 7852 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for " 7853 << Ptr << " [" 7854 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n"); 7855 7856 SmallVector<Value *, 8> Objects; 7857 bool UsedAssumedInformation = false; 7858 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I, 7859 UsedAssumedInformation, 7860 /* Intraprocedural */ true)) { 7861 LLVM_DEBUG( 7862 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"); 7863 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed, 7864 getAccessKindFromInst(&I)); 7865 return; 7866 } 7867 7868 for (Value *Obj : Objects) { 7869 // TODO: recognize the TBAA used for constant accesses. 7870 MemoryLocationsKind MLK = NO_LOCATIONS; 7871 if (isa<UndefValue>(Obj)) 7872 continue; 7873 if (isa<Argument>(Obj)) { 7874 // TODO: For now we do not treat byval arguments as local copies performed 7875 // on the call edge, though, we should. To make that happen we need to 7876 // teach various passes, e.g., DSE, about the copy effect of a byval. That 7877 // would also allow us to mark functions only accessing byval arguments as 7878 // readnone again, atguably their acceses have no effect outside of the 7879 // function, like accesses to allocas. 7880 MLK = NO_ARGUMENT_MEM; 7881 } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) { 7882 // Reading constant memory is not treated as a read "effect" by the 7883 // function attr pass so we won't neither. Constants defined by TBAA are 7884 // similar. (We know we do not write it because it is constant.) 7885 if (auto *GVar = dyn_cast<GlobalVariable>(GV)) 7886 if (GVar->isConstant()) 7887 continue; 7888 7889 if (GV->hasLocalLinkage()) 7890 MLK = NO_GLOBAL_INTERNAL_MEM; 7891 else 7892 MLK = NO_GLOBAL_EXTERNAL_MEM; 7893 } else if (isa<ConstantPointerNull>(Obj) && 7894 !NullPointerIsDefined(getAssociatedFunction(), 7895 Ptr.getType()->getPointerAddressSpace())) { 7896 continue; 7897 } else if (isa<AllocaInst>(Obj)) { 7898 MLK = NO_LOCAL_MEM; 7899 } else if (const auto *CB = dyn_cast<CallBase>(Obj)) { 7900 const auto &NoAliasAA = A.getAAFor<AANoAlias>( 7901 *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL); 7902 if (NoAliasAA.isAssumedNoAlias()) 7903 MLK = NO_MALLOCED_MEM; 7904 else 7905 MLK = NO_UNKOWN_MEM; 7906 } else { 7907 MLK = NO_UNKOWN_MEM; 7908 } 7909 7910 assert(MLK != NO_LOCATIONS && "No location specified!"); 7911 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: " 7912 << *Obj << " -> " << getMemoryLocationsAsStr(MLK) 7913 << "\n"); 7914 updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed, 7915 getAccessKindFromInst(&I)); 7916 } 7917 7918 LLVM_DEBUG( 7919 dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: " 7920 << getMemoryLocationsAsStr(State.getAssumed()) << "\n"); 7921 } 7922 7923 void AAMemoryLocationImpl::categorizeArgumentPointerLocations( 7924 Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs, 7925 bool &Changed) { 7926 for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) { 7927 7928 // Skip non-pointer arguments. 7929 const Value *ArgOp = CB.getArgOperand(ArgNo); 7930 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 7931 continue; 7932 7933 // Skip readnone arguments. 7934 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo); 7935 const auto &ArgOpMemLocationAA = 7936 A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL); 7937 7938 if (ArgOpMemLocationAA.isAssumedReadNone()) 7939 continue; 7940 7941 // Categorize potentially accessed pointer arguments as if there was an 7942 // access instruction with them as pointer. 7943 categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed); 7944 } 7945 } 7946 7947 AAMemoryLocation::MemoryLocationsKind 7948 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, 7949 bool &Changed) { 7950 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for " 7951 << I << "\n"); 7952 7953 AAMemoryLocation::StateType AccessedLocs; 7954 AccessedLocs.intersectAssumedBits(NO_LOCATIONS); 7955 7956 if (auto *CB = dyn_cast<CallBase>(&I)) { 7957 7958 // First check if we assume any memory is access is visible. 7959 const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>( 7960 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 7961 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I 7962 << " [" << CBMemLocationAA << "]\n"); 7963 7964 if (CBMemLocationAA.isAssumedReadNone()) 7965 return NO_LOCATIONS; 7966 7967 if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) { 7968 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr, 7969 Changed, getAccessKindFromInst(&I)); 7970 return AccessedLocs.getAssumed(); 7971 } 7972 7973 uint32_t CBAssumedNotAccessedLocs = 7974 CBMemLocationAA.getAssumedNotAccessedLocation(); 7975 7976 // Set the argmemonly and global bit as we handle them separately below. 7977 uint32_t CBAssumedNotAccessedLocsNoArgMem = 7978 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; 7979 7980 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { 7981 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) 7982 continue; 7983 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed, 7984 getAccessKindFromInst(&I)); 7985 } 7986 7987 // Now handle global memory if it might be accessed. This is slightly tricky 7988 // as NO_GLOBAL_MEM has multiple bits set. 7989 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); 7990 if (HasGlobalAccesses) { 7991 auto AccessPred = [&](const Instruction *, const Value *Ptr, 7992 AccessKind Kind, MemoryLocationsKind MLK) { 7993 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed, 7994 getAccessKindFromInst(&I)); 7995 return true; 7996 }; 7997 if (!CBMemLocationAA.checkForAllAccessesToMemoryKind( 7998 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false))) 7999 return AccessedLocs.getWorstState(); 8000 } 8001 8002 LLVM_DEBUG( 8003 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " 8004 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 8005 8006 // Now handle argument memory if it might be accessed. 8007 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); 8008 if (HasArgAccesses) 8009 categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed); 8010 8011 LLVM_DEBUG( 8012 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " 8013 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 8014 8015 return AccessedLocs.getAssumed(); 8016 } 8017 8018 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) { 8019 LLVM_DEBUG( 8020 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " 8021 << I << " [" << *Ptr << "]\n"); 8022 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed); 8023 return AccessedLocs.getAssumed(); 8024 } 8025 8026 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " 8027 << I << "\n"); 8028 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed, 8029 getAccessKindFromInst(&I)); 8030 return AccessedLocs.getAssumed(); 8031 } 8032 8033 /// An AA to represent the memory behavior function attributes. 8034 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { 8035 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) 8036 : AAMemoryLocationImpl(IRP, A) {} 8037 8038 /// See AbstractAttribute::updateImpl(Attributor &A). 8039 virtual ChangeStatus updateImpl(Attributor &A) override { 8040 8041 const auto &MemBehaviorAA = 8042 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 8043 if (MemBehaviorAA.isAssumedReadNone()) { 8044 if (MemBehaviorAA.isKnownReadNone()) 8045 return indicateOptimisticFixpoint(); 8046 assert(isAssumedReadNone() && 8047 "AAMemoryLocation was not read-none but AAMemoryBehavior was!"); 8048 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 8049 return ChangeStatus::UNCHANGED; 8050 } 8051 8052 // The current assumed state used to determine a change. 8053 auto AssumedState = getAssumed(); 8054 bool Changed = false; 8055 8056 auto CheckRWInst = [&](Instruction &I) { 8057 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); 8058 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I 8059 << ": " << getMemoryLocationsAsStr(MLK) << "\n"); 8060 removeAssumedBits(inverseLocation(MLK, false, false)); 8061 // Stop once only the valid bit set in the *not assumed location*, thus 8062 // once we don't actually exclude any memory locations in the state. 8063 return getAssumedNotAccessedLocation() != VALID_STATE; 8064 }; 8065 8066 bool UsedAssumedInformation = false; 8067 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 8068 UsedAssumedInformation)) 8069 return indicatePessimisticFixpoint(); 8070 8071 Changed |= AssumedState != getAssumed(); 8072 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 8073 } 8074 8075 /// See AbstractAttribute::trackStatistics() 8076 void trackStatistics() const override { 8077 if (isAssumedReadNone()) 8078 STATS_DECLTRACK_FN_ATTR(readnone) 8079 else if (isAssumedArgMemOnly()) 8080 STATS_DECLTRACK_FN_ATTR(argmemonly) 8081 else if (isAssumedInaccessibleMemOnly()) 8082 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly) 8083 else if (isAssumedInaccessibleOrArgMemOnly()) 8084 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly) 8085 } 8086 }; 8087 8088 /// AAMemoryLocation attribute for call sites. 8089 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { 8090 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) 8091 : AAMemoryLocationImpl(IRP, A) {} 8092 8093 /// See AbstractAttribute::initialize(...). 8094 void initialize(Attributor &A) override { 8095 AAMemoryLocationImpl::initialize(A); 8096 Function *F = getAssociatedFunction(); 8097 if (!F || F->isDeclaration()) 8098 indicatePessimisticFixpoint(); 8099 } 8100 8101 /// See AbstractAttribute::updateImpl(...). 8102 ChangeStatus updateImpl(Attributor &A) override { 8103 // TODO: Once we have call site specific value information we can provide 8104 // call site specific liveness liveness information and then it makes 8105 // sense to specialize attributes for call sites arguments instead of 8106 // redirecting requests to the callee argument. 8107 Function *F = getAssociatedFunction(); 8108 const IRPosition &FnPos = IRPosition::function(*F); 8109 auto &FnAA = 8110 A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED); 8111 bool Changed = false; 8112 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 8113 AccessKind Kind, MemoryLocationsKind MLK) { 8114 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed, 8115 getAccessKindFromInst(I)); 8116 return true; 8117 }; 8118 if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS)) 8119 return indicatePessimisticFixpoint(); 8120 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 8121 } 8122 8123 /// See AbstractAttribute::trackStatistics() 8124 void trackStatistics() const override { 8125 if (isAssumedReadNone()) 8126 STATS_DECLTRACK_CS_ATTR(readnone) 8127 } 8128 }; 8129 } // namespace 8130 8131 /// ------------------ Value Constant Range Attribute ------------------------- 8132 8133 namespace { 8134 struct AAValueConstantRangeImpl : AAValueConstantRange { 8135 using StateType = IntegerRangeState; 8136 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) 8137 : AAValueConstantRange(IRP, A) {} 8138 8139 /// See AbstractAttribute::initialize(..). 8140 void initialize(Attributor &A) override { 8141 if (A.hasSimplificationCallback(getIRPosition())) { 8142 indicatePessimisticFixpoint(); 8143 return; 8144 } 8145 8146 // Intersect a range given by SCEV. 8147 intersectKnown(getConstantRangeFromSCEV(A, getCtxI())); 8148 8149 // Intersect a range given by LVI. 8150 intersectKnown(getConstantRangeFromLVI(A, getCtxI())); 8151 } 8152 8153 /// See AbstractAttribute::getAsStr(). 8154 const std::string getAsStr() const override { 8155 std::string Str; 8156 llvm::raw_string_ostream OS(Str); 8157 OS << "range(" << getBitWidth() << ")<"; 8158 getKnown().print(OS); 8159 OS << " / "; 8160 getAssumed().print(OS); 8161 OS << ">"; 8162 return OS.str(); 8163 } 8164 8165 /// Helper function to get a SCEV expr for the associated value at program 8166 /// point \p I. 8167 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { 8168 if (!getAnchorScope()) 8169 return nullptr; 8170 8171 ScalarEvolution *SE = 8172 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 8173 *getAnchorScope()); 8174 8175 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( 8176 *getAnchorScope()); 8177 8178 if (!SE || !LI) 8179 return nullptr; 8180 8181 const SCEV *S = SE->getSCEV(&getAssociatedValue()); 8182 if (!I) 8183 return S; 8184 8185 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent())); 8186 } 8187 8188 /// Helper function to get a range from SCEV for the associated value at 8189 /// program point \p I. 8190 ConstantRange getConstantRangeFromSCEV(Attributor &A, 8191 const Instruction *I = nullptr) const { 8192 if (!getAnchorScope()) 8193 return getWorstState(getBitWidth()); 8194 8195 ScalarEvolution *SE = 8196 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 8197 *getAnchorScope()); 8198 8199 const SCEV *S = getSCEV(A, I); 8200 if (!SE || !S) 8201 return getWorstState(getBitWidth()); 8202 8203 return SE->getUnsignedRange(S); 8204 } 8205 8206 /// Helper function to get a range from LVI for the associated value at 8207 /// program point \p I. 8208 ConstantRange 8209 getConstantRangeFromLVI(Attributor &A, 8210 const Instruction *CtxI = nullptr) const { 8211 if (!getAnchorScope()) 8212 return getWorstState(getBitWidth()); 8213 8214 LazyValueInfo *LVI = 8215 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( 8216 *getAnchorScope()); 8217 8218 if (!LVI || !CtxI) 8219 return getWorstState(getBitWidth()); 8220 return LVI->getConstantRange(&getAssociatedValue(), 8221 const_cast<Instruction *>(CtxI)); 8222 } 8223 8224 /// Return true if \p CtxI is valid for querying outside analyses. 8225 /// This basically makes sure we do not ask intra-procedural analysis 8226 /// about a context in the wrong function or a context that violates 8227 /// dominance assumptions they might have. The \p AllowAACtxI flag indicates 8228 /// if the original context of this AA is OK or should be considered invalid. 8229 bool isValidCtxInstructionForOutsideAnalysis(Attributor &A, 8230 const Instruction *CtxI, 8231 bool AllowAACtxI) const { 8232 if (!CtxI || (!AllowAACtxI && CtxI == getCtxI())) 8233 return false; 8234 8235 // Our context might be in a different function, neither intra-procedural 8236 // analysis (ScalarEvolution nor LazyValueInfo) can handle that. 8237 if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction())) 8238 return false; 8239 8240 // If the context is not dominated by the value there are paths to the 8241 // context that do not define the value. This cannot be handled by 8242 // LazyValueInfo so we need to bail. 8243 if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) { 8244 InformationCache &InfoCache = A.getInfoCache(); 8245 const DominatorTree *DT = 8246 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( 8247 *I->getFunction()); 8248 return DT && DT->dominates(I, CtxI); 8249 } 8250 8251 return true; 8252 } 8253 8254 /// See AAValueConstantRange::getKnownConstantRange(..). 8255 ConstantRange 8256 getKnownConstantRange(Attributor &A, 8257 const Instruction *CtxI = nullptr) const override { 8258 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8259 /* AllowAACtxI */ false)) 8260 return getKnown(); 8261 8262 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8263 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8264 return getKnown().intersectWith(SCEVR).intersectWith(LVIR); 8265 } 8266 8267 /// See AAValueConstantRange::getAssumedConstantRange(..). 8268 ConstantRange 8269 getAssumedConstantRange(Attributor &A, 8270 const Instruction *CtxI = nullptr) const override { 8271 // TODO: Make SCEV use Attributor assumption. 8272 // We may be able to bound a variable range via assumptions in 8273 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to 8274 // evolve to x^2 + x, then we can say that y is in [2, 12]. 8275 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8276 /* AllowAACtxI */ false)) 8277 return getAssumed(); 8278 8279 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8280 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8281 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR); 8282 } 8283 8284 /// Helper function to create MDNode for range metadata. 8285 static MDNode * 8286 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, 8287 const ConstantRange &AssumedConstantRange) { 8288 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get( 8289 Ty, AssumedConstantRange.getLower())), 8290 ConstantAsMetadata::get(ConstantInt::get( 8291 Ty, AssumedConstantRange.getUpper()))}; 8292 return MDNode::get(Ctx, LowAndHigh); 8293 } 8294 8295 /// Return true if \p Assumed is included in \p KnownRanges. 8296 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { 8297 8298 if (Assumed.isFullSet()) 8299 return false; 8300 8301 if (!KnownRanges) 8302 return true; 8303 8304 // If multiple ranges are annotated in IR, we give up to annotate assumed 8305 // range for now. 8306 8307 // TODO: If there exists a known range which containts assumed range, we 8308 // can say assumed range is better. 8309 if (KnownRanges->getNumOperands() > 2) 8310 return false; 8311 8312 ConstantInt *Lower = 8313 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0)); 8314 ConstantInt *Upper = 8315 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1)); 8316 8317 ConstantRange Known(Lower->getValue(), Upper->getValue()); 8318 return Known.contains(Assumed) && Known != Assumed; 8319 } 8320 8321 /// Helper function to set range metadata. 8322 static bool 8323 setRangeMetadataIfisBetterRange(Instruction *I, 8324 const ConstantRange &AssumedConstantRange) { 8325 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range); 8326 if (isBetterRange(AssumedConstantRange, OldRangeMD)) { 8327 if (!AssumedConstantRange.isEmptySet()) { 8328 I->setMetadata(LLVMContext::MD_range, 8329 getMDNodeForConstantRange(I->getType(), I->getContext(), 8330 AssumedConstantRange)); 8331 return true; 8332 } 8333 } 8334 return false; 8335 } 8336 8337 /// See AbstractAttribute::manifest() 8338 ChangeStatus manifest(Attributor &A) override { 8339 ChangeStatus Changed = ChangeStatus::UNCHANGED; 8340 ConstantRange AssumedConstantRange = getAssumedConstantRange(A); 8341 assert(!AssumedConstantRange.isFullSet() && "Invalid state"); 8342 8343 auto &V = getAssociatedValue(); 8344 if (!AssumedConstantRange.isEmptySet() && 8345 !AssumedConstantRange.isSingleElement()) { 8346 if (Instruction *I = dyn_cast<Instruction>(&V)) { 8347 assert(I == getCtxI() && "Should not annotate an instruction which is " 8348 "not the context instruction"); 8349 if (isa<CallInst>(I) || isa<LoadInst>(I)) 8350 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) 8351 Changed = ChangeStatus::CHANGED; 8352 } 8353 } 8354 8355 return Changed; 8356 } 8357 }; 8358 8359 struct AAValueConstantRangeArgument final 8360 : AAArgumentFromCallSiteArguments< 8361 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8362 true /* BridgeCallBaseContext */> { 8363 using Base = AAArgumentFromCallSiteArguments< 8364 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8365 true /* BridgeCallBaseContext */>; 8366 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) 8367 : Base(IRP, A) {} 8368 8369 /// See AbstractAttribute::initialize(..). 8370 void initialize(Attributor &A) override { 8371 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8372 indicatePessimisticFixpoint(); 8373 } else { 8374 Base::initialize(A); 8375 } 8376 } 8377 8378 /// See AbstractAttribute::trackStatistics() 8379 void trackStatistics() const override { 8380 STATS_DECLTRACK_ARG_ATTR(value_range) 8381 } 8382 }; 8383 8384 struct AAValueConstantRangeReturned 8385 : AAReturnedFromReturnedValues<AAValueConstantRange, 8386 AAValueConstantRangeImpl, 8387 AAValueConstantRangeImpl::StateType, 8388 /* PropogateCallBaseContext */ true> { 8389 using Base = 8390 AAReturnedFromReturnedValues<AAValueConstantRange, 8391 AAValueConstantRangeImpl, 8392 AAValueConstantRangeImpl::StateType, 8393 /* PropogateCallBaseContext */ true>; 8394 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) 8395 : Base(IRP, A) {} 8396 8397 /// See AbstractAttribute::initialize(...). 8398 void initialize(Attributor &A) override {} 8399 8400 /// See AbstractAttribute::trackStatistics() 8401 void trackStatistics() const override { 8402 STATS_DECLTRACK_FNRET_ATTR(value_range) 8403 } 8404 }; 8405 8406 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { 8407 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) 8408 : AAValueConstantRangeImpl(IRP, A) {} 8409 8410 /// See AbstractAttribute::initialize(...). 8411 void initialize(Attributor &A) override { 8412 AAValueConstantRangeImpl::initialize(A); 8413 if (isAtFixpoint()) 8414 return; 8415 8416 Value &V = getAssociatedValue(); 8417 8418 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8419 unionAssumed(ConstantRange(C->getValue())); 8420 indicateOptimisticFixpoint(); 8421 return; 8422 } 8423 8424 if (isa<UndefValue>(&V)) { 8425 // Collapse the undef state to 0. 8426 unionAssumed(ConstantRange(APInt(getBitWidth(), 0))); 8427 indicateOptimisticFixpoint(); 8428 return; 8429 } 8430 8431 if (isa<CallBase>(&V)) 8432 return; 8433 8434 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V)) 8435 return; 8436 8437 // If it is a load instruction with range metadata, use it. 8438 if (LoadInst *LI = dyn_cast<LoadInst>(&V)) 8439 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) { 8440 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8441 return; 8442 } 8443 8444 // We can work with PHI and select instruction as we traverse their operands 8445 // during update. 8446 if (isa<SelectInst>(V) || isa<PHINode>(V)) 8447 return; 8448 8449 // Otherwise we give up. 8450 indicatePessimisticFixpoint(); 8451 8452 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: " 8453 << getAssociatedValue() << "\n"); 8454 } 8455 8456 bool calculateBinaryOperator( 8457 Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, 8458 const Instruction *CtxI, 8459 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8460 Value *LHS = BinOp->getOperand(0); 8461 Value *RHS = BinOp->getOperand(1); 8462 8463 // Simplify the operands first. 8464 bool UsedAssumedInformation = false; 8465 const auto &SimplifiedLHS = 8466 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8467 *this, UsedAssumedInformation); 8468 if (!SimplifiedLHS.hasValue()) 8469 return true; 8470 if (!SimplifiedLHS.getValue()) 8471 return false; 8472 LHS = *SimplifiedLHS; 8473 8474 const auto &SimplifiedRHS = 8475 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8476 *this, UsedAssumedInformation); 8477 if (!SimplifiedRHS.hasValue()) 8478 return true; 8479 if (!SimplifiedRHS.getValue()) 8480 return false; 8481 RHS = *SimplifiedRHS; 8482 8483 // TODO: Allow non integers as well. 8484 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8485 return false; 8486 8487 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8488 *this, IRPosition::value(*LHS, getCallBaseContext()), 8489 DepClassTy::REQUIRED); 8490 QuerriedAAs.push_back(&LHSAA); 8491 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8492 8493 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8494 *this, IRPosition::value(*RHS, getCallBaseContext()), 8495 DepClassTy::REQUIRED); 8496 QuerriedAAs.push_back(&RHSAA); 8497 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8498 8499 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange); 8500 8501 T.unionAssumed(AssumedRange); 8502 8503 // TODO: Track a known state too. 8504 8505 return T.isValidState(); 8506 } 8507 8508 bool calculateCastInst( 8509 Attributor &A, CastInst *CastI, IntegerRangeState &T, 8510 const Instruction *CtxI, 8511 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8512 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!"); 8513 // TODO: Allow non integers as well. 8514 Value *OpV = CastI->getOperand(0); 8515 8516 // Simplify the operand first. 8517 bool UsedAssumedInformation = false; 8518 const auto &SimplifiedOpV = 8519 A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()), 8520 *this, UsedAssumedInformation); 8521 if (!SimplifiedOpV.hasValue()) 8522 return true; 8523 if (!SimplifiedOpV.getValue()) 8524 return false; 8525 OpV = *SimplifiedOpV; 8526 8527 if (!OpV->getType()->isIntegerTy()) 8528 return false; 8529 8530 auto &OpAA = A.getAAFor<AAValueConstantRange>( 8531 *this, IRPosition::value(*OpV, getCallBaseContext()), 8532 DepClassTy::REQUIRED); 8533 QuerriedAAs.push_back(&OpAA); 8534 T.unionAssumed( 8535 OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth())); 8536 return T.isValidState(); 8537 } 8538 8539 bool 8540 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, 8541 const Instruction *CtxI, 8542 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8543 Value *LHS = CmpI->getOperand(0); 8544 Value *RHS = CmpI->getOperand(1); 8545 8546 // Simplify the operands first. 8547 bool UsedAssumedInformation = false; 8548 const auto &SimplifiedLHS = 8549 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8550 *this, UsedAssumedInformation); 8551 if (!SimplifiedLHS.hasValue()) 8552 return true; 8553 if (!SimplifiedLHS.getValue()) 8554 return false; 8555 LHS = *SimplifiedLHS; 8556 8557 const auto &SimplifiedRHS = 8558 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8559 *this, UsedAssumedInformation); 8560 if (!SimplifiedRHS.hasValue()) 8561 return true; 8562 if (!SimplifiedRHS.getValue()) 8563 return false; 8564 RHS = *SimplifiedRHS; 8565 8566 // TODO: Allow non integers as well. 8567 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8568 return false; 8569 8570 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8571 *this, IRPosition::value(*LHS, getCallBaseContext()), 8572 DepClassTy::REQUIRED); 8573 QuerriedAAs.push_back(&LHSAA); 8574 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8575 *this, IRPosition::value(*RHS, getCallBaseContext()), 8576 DepClassTy::REQUIRED); 8577 QuerriedAAs.push_back(&RHSAA); 8578 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8579 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8580 8581 // If one of them is empty set, we can't decide. 8582 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) 8583 return true; 8584 8585 bool MustTrue = false, MustFalse = false; 8586 8587 auto AllowedRegion = 8588 ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange); 8589 8590 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet()) 8591 MustFalse = true; 8592 8593 if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange)) 8594 MustTrue = true; 8595 8596 assert((!MustTrue || !MustFalse) && 8597 "Either MustTrue or MustFalse should be false!"); 8598 8599 if (MustTrue) 8600 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); 8601 else if (MustFalse) 8602 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); 8603 else 8604 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); 8605 8606 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA 8607 << " " << RHSAA << "\n"); 8608 8609 // TODO: Track a known state too. 8610 return T.isValidState(); 8611 } 8612 8613 /// See AbstractAttribute::updateImpl(...). 8614 ChangeStatus updateImpl(Attributor &A) override { 8615 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 8616 IntegerRangeState &T, bool Stripped) -> bool { 8617 Instruction *I = dyn_cast<Instruction>(&V); 8618 if (!I || isa<CallBase>(I)) { 8619 8620 // Simplify the operand first. 8621 bool UsedAssumedInformation = false; 8622 const auto &SimplifiedOpV = 8623 A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()), 8624 *this, UsedAssumedInformation); 8625 if (!SimplifiedOpV.hasValue()) 8626 return true; 8627 if (!SimplifiedOpV.getValue()) 8628 return false; 8629 Value *VPtr = *SimplifiedOpV; 8630 8631 // If the value is not instruction, we query AA to Attributor. 8632 const auto &AA = A.getAAFor<AAValueConstantRange>( 8633 *this, IRPosition::value(*VPtr, getCallBaseContext()), 8634 DepClassTy::REQUIRED); 8635 8636 // Clamp operator is not used to utilize a program point CtxI. 8637 T.unionAssumed(AA.getAssumedConstantRange(A, CtxI)); 8638 8639 return T.isValidState(); 8640 } 8641 8642 SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; 8643 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) { 8644 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) 8645 return false; 8646 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) { 8647 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) 8648 return false; 8649 } else if (auto *CastI = dyn_cast<CastInst>(I)) { 8650 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) 8651 return false; 8652 } else { 8653 // Give up with other instructions. 8654 // TODO: Add other instructions 8655 8656 T.indicatePessimisticFixpoint(); 8657 return false; 8658 } 8659 8660 // Catch circular reasoning in a pessimistic way for now. 8661 // TODO: Check how the range evolves and if we stripped anything, see also 8662 // AADereferenceable or AAAlign for similar situations. 8663 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { 8664 if (QueriedAA != this) 8665 continue; 8666 // If we are in a stady state we do not need to worry. 8667 if (T.getAssumed() == getState().getAssumed()) 8668 continue; 8669 T.indicatePessimisticFixpoint(); 8670 } 8671 8672 return T.isValidState(); 8673 }; 8674 8675 IntegerRangeState T(getBitWidth()); 8676 8677 bool UsedAssumedInformation = false; 8678 if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T, 8679 VisitValueCB, getCtxI(), 8680 UsedAssumedInformation, 8681 /* UseValueSimplify */ false)) 8682 return indicatePessimisticFixpoint(); 8683 8684 // Ensure that long def-use chains can't cause circular reasoning either by 8685 // introducing a cutoff below. 8686 if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED) 8687 return ChangeStatus::UNCHANGED; 8688 if (++NumChanges > MaxNumChanges) { 8689 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges 8690 << " but only " << MaxNumChanges 8691 << " are allowed to avoid cyclic reasoning."); 8692 return indicatePessimisticFixpoint(); 8693 } 8694 return ChangeStatus::CHANGED; 8695 } 8696 8697 /// See AbstractAttribute::trackStatistics() 8698 void trackStatistics() const override { 8699 STATS_DECLTRACK_FLOATING_ATTR(value_range) 8700 } 8701 8702 /// Tracker to bail after too many widening steps of the constant range. 8703 int NumChanges = 0; 8704 8705 /// Upper bound for the number of allowed changes (=widening steps) for the 8706 /// constant range before we give up. 8707 static constexpr int MaxNumChanges = 5; 8708 }; 8709 8710 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { 8711 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) 8712 : AAValueConstantRangeImpl(IRP, A) {} 8713 8714 /// See AbstractAttribute::initialize(...). 8715 ChangeStatus updateImpl(Attributor &A) override { 8716 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will " 8717 "not be called"); 8718 } 8719 8720 /// See AbstractAttribute::trackStatistics() 8721 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) } 8722 }; 8723 8724 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { 8725 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) 8726 : AAValueConstantRangeFunction(IRP, A) {} 8727 8728 /// See AbstractAttribute::trackStatistics() 8729 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) } 8730 }; 8731 8732 struct AAValueConstantRangeCallSiteReturned 8733 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8734 AAValueConstantRangeImpl, 8735 AAValueConstantRangeImpl::StateType, 8736 /* IntroduceCallBaseContext */ true> { 8737 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) 8738 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8739 AAValueConstantRangeImpl, 8740 AAValueConstantRangeImpl::StateType, 8741 /* IntroduceCallBaseContext */ true>(IRP, 8742 A) { 8743 } 8744 8745 /// See AbstractAttribute::initialize(...). 8746 void initialize(Attributor &A) override { 8747 // If it is a load instruction with range metadata, use the metadata. 8748 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue())) 8749 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range)) 8750 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8751 8752 AAValueConstantRangeImpl::initialize(A); 8753 } 8754 8755 /// See AbstractAttribute::trackStatistics() 8756 void trackStatistics() const override { 8757 STATS_DECLTRACK_CSRET_ATTR(value_range) 8758 } 8759 }; 8760 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { 8761 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) 8762 : AAValueConstantRangeFloating(IRP, A) {} 8763 8764 /// See AbstractAttribute::manifest() 8765 ChangeStatus manifest(Attributor &A) override { 8766 return ChangeStatus::UNCHANGED; 8767 } 8768 8769 /// See AbstractAttribute::trackStatistics() 8770 void trackStatistics() const override { 8771 STATS_DECLTRACK_CSARG_ATTR(value_range) 8772 } 8773 }; 8774 } // namespace 8775 8776 /// ------------------ Potential Values Attribute ------------------------- 8777 8778 namespace { 8779 struct AAPotentialValuesImpl : AAPotentialValues { 8780 using StateType = PotentialConstantIntValuesState; 8781 8782 AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A) 8783 : AAPotentialValues(IRP, A) {} 8784 8785 /// See AbstractAttribute::initialize(..). 8786 void initialize(Attributor &A) override { 8787 if (A.hasSimplificationCallback(getIRPosition())) 8788 indicatePessimisticFixpoint(); 8789 else 8790 AAPotentialValues::initialize(A); 8791 } 8792 8793 /// See AbstractAttribute::getAsStr(). 8794 const std::string getAsStr() const override { 8795 std::string Str; 8796 llvm::raw_string_ostream OS(Str); 8797 OS << getState(); 8798 return OS.str(); 8799 } 8800 8801 /// See AbstractAttribute::updateImpl(...). 8802 ChangeStatus updateImpl(Attributor &A) override { 8803 return indicatePessimisticFixpoint(); 8804 } 8805 }; 8806 8807 struct AAPotentialValuesArgument final 8808 : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 8809 PotentialConstantIntValuesState> { 8810 using Base = 8811 AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 8812 PotentialConstantIntValuesState>; 8813 AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A) 8814 : Base(IRP, A) {} 8815 8816 /// See AbstractAttribute::initialize(..). 8817 void initialize(Attributor &A) override { 8818 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8819 indicatePessimisticFixpoint(); 8820 } else { 8821 Base::initialize(A); 8822 } 8823 } 8824 8825 /// See AbstractAttribute::trackStatistics() 8826 void trackStatistics() const override { 8827 STATS_DECLTRACK_ARG_ATTR(potential_values) 8828 } 8829 }; 8830 8831 struct AAPotentialValuesReturned 8832 : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> { 8833 using Base = 8834 AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>; 8835 AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A) 8836 : Base(IRP, A) {} 8837 8838 /// See AbstractAttribute::trackStatistics() 8839 void trackStatistics() const override { 8840 STATS_DECLTRACK_FNRET_ATTR(potential_values) 8841 } 8842 }; 8843 8844 struct AAPotentialValuesFloating : AAPotentialValuesImpl { 8845 AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A) 8846 : AAPotentialValuesImpl(IRP, A) {} 8847 8848 /// See AbstractAttribute::initialize(..). 8849 void initialize(Attributor &A) override { 8850 AAPotentialValuesImpl::initialize(A); 8851 if (isAtFixpoint()) 8852 return; 8853 8854 Value &V = getAssociatedValue(); 8855 8856 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8857 unionAssumed(C->getValue()); 8858 indicateOptimisticFixpoint(); 8859 return; 8860 } 8861 8862 if (isa<UndefValue>(&V)) { 8863 unionAssumedWithUndef(); 8864 indicateOptimisticFixpoint(); 8865 return; 8866 } 8867 8868 if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V)) 8869 return; 8870 8871 if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V)) 8872 return; 8873 8874 indicatePessimisticFixpoint(); 8875 8876 LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: " 8877 << getAssociatedValue() << "\n"); 8878 } 8879 8880 static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS, 8881 const APInt &RHS) { 8882 return ICmpInst::compare(LHS, RHS, ICI->getPredicate()); 8883 } 8884 8885 static APInt calculateCastInst(const CastInst *CI, const APInt &Src, 8886 uint32_t ResultBitWidth) { 8887 Instruction::CastOps CastOp = CI->getOpcode(); 8888 switch (CastOp) { 8889 default: 8890 llvm_unreachable("unsupported or not integer cast"); 8891 case Instruction::Trunc: 8892 return Src.trunc(ResultBitWidth); 8893 case Instruction::SExt: 8894 return Src.sext(ResultBitWidth); 8895 case Instruction::ZExt: 8896 return Src.zext(ResultBitWidth); 8897 case Instruction::BitCast: 8898 return Src; 8899 } 8900 } 8901 8902 static APInt calculateBinaryOperator(const BinaryOperator *BinOp, 8903 const APInt &LHS, const APInt &RHS, 8904 bool &SkipOperation, bool &Unsupported) { 8905 Instruction::BinaryOps BinOpcode = BinOp->getOpcode(); 8906 // Unsupported is set to true when the binary operator is not supported. 8907 // SkipOperation is set to true when UB occur with the given operand pair 8908 // (LHS, RHS). 8909 // TODO: we should look at nsw and nuw keywords to handle operations 8910 // that create poison or undef value. 8911 switch (BinOpcode) { 8912 default: 8913 Unsupported = true; 8914 return LHS; 8915 case Instruction::Add: 8916 return LHS + RHS; 8917 case Instruction::Sub: 8918 return LHS - RHS; 8919 case Instruction::Mul: 8920 return LHS * RHS; 8921 case Instruction::UDiv: 8922 if (RHS.isZero()) { 8923 SkipOperation = true; 8924 return LHS; 8925 } 8926 return LHS.udiv(RHS); 8927 case Instruction::SDiv: 8928 if (RHS.isZero()) { 8929 SkipOperation = true; 8930 return LHS; 8931 } 8932 return LHS.sdiv(RHS); 8933 case Instruction::URem: 8934 if (RHS.isZero()) { 8935 SkipOperation = true; 8936 return LHS; 8937 } 8938 return LHS.urem(RHS); 8939 case Instruction::SRem: 8940 if (RHS.isZero()) { 8941 SkipOperation = true; 8942 return LHS; 8943 } 8944 return LHS.srem(RHS); 8945 case Instruction::Shl: 8946 return LHS.shl(RHS); 8947 case Instruction::LShr: 8948 return LHS.lshr(RHS); 8949 case Instruction::AShr: 8950 return LHS.ashr(RHS); 8951 case Instruction::And: 8952 return LHS & RHS; 8953 case Instruction::Or: 8954 return LHS | RHS; 8955 case Instruction::Xor: 8956 return LHS ^ RHS; 8957 } 8958 } 8959 8960 bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp, 8961 const APInt &LHS, const APInt &RHS) { 8962 bool SkipOperation = false; 8963 bool Unsupported = false; 8964 APInt Result = 8965 calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported); 8966 if (Unsupported) 8967 return false; 8968 // If SkipOperation is true, we can ignore this operand pair (L, R). 8969 if (!SkipOperation) 8970 unionAssumed(Result); 8971 return isValidState(); 8972 } 8973 8974 ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) { 8975 auto AssumedBefore = getAssumed(); 8976 Value *LHS = ICI->getOperand(0); 8977 Value *RHS = ICI->getOperand(1); 8978 8979 // Simplify the operands first. 8980 bool UsedAssumedInformation = false; 8981 const auto &SimplifiedLHS = 8982 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8983 *this, UsedAssumedInformation); 8984 if (!SimplifiedLHS.hasValue()) 8985 return ChangeStatus::UNCHANGED; 8986 if (!SimplifiedLHS.getValue()) 8987 return indicatePessimisticFixpoint(); 8988 LHS = *SimplifiedLHS; 8989 8990 const auto &SimplifiedRHS = 8991 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8992 *this, UsedAssumedInformation); 8993 if (!SimplifiedRHS.hasValue()) 8994 return ChangeStatus::UNCHANGED; 8995 if (!SimplifiedRHS.getValue()) 8996 return indicatePessimisticFixpoint(); 8997 RHS = *SimplifiedRHS; 8998 8999 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 9000 return indicatePessimisticFixpoint(); 9001 9002 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 9003 DepClassTy::REQUIRED); 9004 if (!LHSAA.isValidState()) 9005 return indicatePessimisticFixpoint(); 9006 9007 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 9008 DepClassTy::REQUIRED); 9009 if (!RHSAA.isValidState()) 9010 return indicatePessimisticFixpoint(); 9011 9012 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 9013 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 9014 9015 // TODO: make use of undef flag to limit potential values aggressively. 9016 bool MaybeTrue = false, MaybeFalse = false; 9017 const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0); 9018 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 9019 // The result of any comparison between undefs can be soundly replaced 9020 // with undef. 9021 unionAssumedWithUndef(); 9022 } else if (LHSAA.undefIsContained()) { 9023 for (const APInt &R : RHSAAPVS) { 9024 bool CmpResult = calculateICmpInst(ICI, Zero, R); 9025 MaybeTrue |= CmpResult; 9026 MaybeFalse |= !CmpResult; 9027 if (MaybeTrue & MaybeFalse) 9028 return indicatePessimisticFixpoint(); 9029 } 9030 } else if (RHSAA.undefIsContained()) { 9031 for (const APInt &L : LHSAAPVS) { 9032 bool CmpResult = calculateICmpInst(ICI, L, Zero); 9033 MaybeTrue |= CmpResult; 9034 MaybeFalse |= !CmpResult; 9035 if (MaybeTrue & MaybeFalse) 9036 return indicatePessimisticFixpoint(); 9037 } 9038 } else { 9039 for (const APInt &L : LHSAAPVS) { 9040 for (const APInt &R : RHSAAPVS) { 9041 bool CmpResult = calculateICmpInst(ICI, L, R); 9042 MaybeTrue |= CmpResult; 9043 MaybeFalse |= !CmpResult; 9044 if (MaybeTrue & MaybeFalse) 9045 return indicatePessimisticFixpoint(); 9046 } 9047 } 9048 } 9049 if (MaybeTrue) 9050 unionAssumed(APInt(/* numBits */ 1, /* val */ 1)); 9051 if (MaybeFalse) 9052 unionAssumed(APInt(/* numBits */ 1, /* val */ 0)); 9053 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9054 : ChangeStatus::CHANGED; 9055 } 9056 9057 ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) { 9058 auto AssumedBefore = getAssumed(); 9059 Value *LHS = SI->getTrueValue(); 9060 Value *RHS = SI->getFalseValue(); 9061 9062 // Simplify the operands first. 9063 bool UsedAssumedInformation = false; 9064 const auto &SimplifiedLHS = 9065 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 9066 *this, UsedAssumedInformation); 9067 if (!SimplifiedLHS.hasValue()) 9068 return ChangeStatus::UNCHANGED; 9069 if (!SimplifiedLHS.getValue()) 9070 return indicatePessimisticFixpoint(); 9071 LHS = *SimplifiedLHS; 9072 9073 const auto &SimplifiedRHS = 9074 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 9075 *this, UsedAssumedInformation); 9076 if (!SimplifiedRHS.hasValue()) 9077 return ChangeStatus::UNCHANGED; 9078 if (!SimplifiedRHS.getValue()) 9079 return indicatePessimisticFixpoint(); 9080 RHS = *SimplifiedRHS; 9081 9082 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 9083 return indicatePessimisticFixpoint(); 9084 9085 Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this, 9086 UsedAssumedInformation); 9087 9088 // Check if we only need one operand. 9089 bool OnlyLeft = false, OnlyRight = false; 9090 if (C.hasValue() && *C && (*C)->isOneValue()) 9091 OnlyLeft = true; 9092 else if (C.hasValue() && *C && (*C)->isZeroValue()) 9093 OnlyRight = true; 9094 9095 const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr; 9096 if (!OnlyRight) { 9097 LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 9098 DepClassTy::REQUIRED); 9099 if (!LHSAA->isValidState()) 9100 return indicatePessimisticFixpoint(); 9101 } 9102 if (!OnlyLeft) { 9103 RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 9104 DepClassTy::REQUIRED); 9105 if (!RHSAA->isValidState()) 9106 return indicatePessimisticFixpoint(); 9107 } 9108 9109 if (!LHSAA || !RHSAA) { 9110 // select (true/false), lhs, rhs 9111 auto *OpAA = LHSAA ? LHSAA : RHSAA; 9112 9113 if (OpAA->undefIsContained()) 9114 unionAssumedWithUndef(); 9115 else 9116 unionAssumed(*OpAA); 9117 9118 } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) { 9119 // select i1 *, undef , undef => undef 9120 unionAssumedWithUndef(); 9121 } else { 9122 unionAssumed(*LHSAA); 9123 unionAssumed(*RHSAA); 9124 } 9125 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9126 : ChangeStatus::CHANGED; 9127 } 9128 9129 ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) { 9130 auto AssumedBefore = getAssumed(); 9131 if (!CI->isIntegerCast()) 9132 return indicatePessimisticFixpoint(); 9133 assert(CI->getNumOperands() == 1 && "Expected cast to be unary!"); 9134 uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth(); 9135 Value *Src = CI->getOperand(0); 9136 9137 // Simplify the operand first. 9138 bool UsedAssumedInformation = false; 9139 const auto &SimplifiedSrc = 9140 A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()), 9141 *this, UsedAssumedInformation); 9142 if (!SimplifiedSrc.hasValue()) 9143 return ChangeStatus::UNCHANGED; 9144 if (!SimplifiedSrc.getValue()) 9145 return indicatePessimisticFixpoint(); 9146 Src = *SimplifiedSrc; 9147 9148 auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src), 9149 DepClassTy::REQUIRED); 9150 if (!SrcAA.isValidState()) 9151 return indicatePessimisticFixpoint(); 9152 const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet(); 9153 if (SrcAA.undefIsContained()) 9154 unionAssumedWithUndef(); 9155 else { 9156 for (const APInt &S : SrcAAPVS) { 9157 APInt T = calculateCastInst(CI, S, ResultBitWidth); 9158 unionAssumed(T); 9159 } 9160 } 9161 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9162 : ChangeStatus::CHANGED; 9163 } 9164 9165 ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) { 9166 auto AssumedBefore = getAssumed(); 9167 Value *LHS = BinOp->getOperand(0); 9168 Value *RHS = BinOp->getOperand(1); 9169 9170 // Simplify the operands first. 9171 bool UsedAssumedInformation = false; 9172 const auto &SimplifiedLHS = 9173 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 9174 *this, UsedAssumedInformation); 9175 if (!SimplifiedLHS.hasValue()) 9176 return ChangeStatus::UNCHANGED; 9177 if (!SimplifiedLHS.getValue()) 9178 return indicatePessimisticFixpoint(); 9179 LHS = *SimplifiedLHS; 9180 9181 const auto &SimplifiedRHS = 9182 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 9183 *this, UsedAssumedInformation); 9184 if (!SimplifiedRHS.hasValue()) 9185 return ChangeStatus::UNCHANGED; 9186 if (!SimplifiedRHS.getValue()) 9187 return indicatePessimisticFixpoint(); 9188 RHS = *SimplifiedRHS; 9189 9190 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 9191 return indicatePessimisticFixpoint(); 9192 9193 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 9194 DepClassTy::REQUIRED); 9195 if (!LHSAA.isValidState()) 9196 return indicatePessimisticFixpoint(); 9197 9198 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 9199 DepClassTy::REQUIRED); 9200 if (!RHSAA.isValidState()) 9201 return indicatePessimisticFixpoint(); 9202 9203 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 9204 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 9205 const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0); 9206 9207 // TODO: make use of undef flag to limit potential values aggressively. 9208 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 9209 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero)) 9210 return indicatePessimisticFixpoint(); 9211 } else if (LHSAA.undefIsContained()) { 9212 for (const APInt &R : RHSAAPVS) { 9213 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R)) 9214 return indicatePessimisticFixpoint(); 9215 } 9216 } else if (RHSAA.undefIsContained()) { 9217 for (const APInt &L : LHSAAPVS) { 9218 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero)) 9219 return indicatePessimisticFixpoint(); 9220 } 9221 } else { 9222 for (const APInt &L : LHSAAPVS) { 9223 for (const APInt &R : RHSAAPVS) { 9224 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R)) 9225 return indicatePessimisticFixpoint(); 9226 } 9227 } 9228 } 9229 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9230 : ChangeStatus::CHANGED; 9231 } 9232 9233 ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) { 9234 auto AssumedBefore = getAssumed(); 9235 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 9236 Value *IncomingValue = PHI->getIncomingValue(u); 9237 9238 // Simplify the operand first. 9239 bool UsedAssumedInformation = false; 9240 const auto &SimplifiedIncomingValue = A.getAssumedSimplified( 9241 IRPosition::value(*IncomingValue, getCallBaseContext()), *this, 9242 UsedAssumedInformation); 9243 if (!SimplifiedIncomingValue.hasValue()) 9244 continue; 9245 if (!SimplifiedIncomingValue.getValue()) 9246 return indicatePessimisticFixpoint(); 9247 IncomingValue = *SimplifiedIncomingValue; 9248 9249 auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>( 9250 *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED); 9251 if (!PotentialValuesAA.isValidState()) 9252 return indicatePessimisticFixpoint(); 9253 if (PotentialValuesAA.undefIsContained()) 9254 unionAssumedWithUndef(); 9255 else 9256 unionAssumed(PotentialValuesAA.getAssumed()); 9257 } 9258 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9259 : ChangeStatus::CHANGED; 9260 } 9261 9262 ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) { 9263 if (!L.getType()->isIntegerTy()) 9264 return indicatePessimisticFixpoint(); 9265 9266 auto Union = [&](Value &V) { 9267 if (isa<UndefValue>(V)) { 9268 unionAssumedWithUndef(); 9269 return true; 9270 } 9271 if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) { 9272 unionAssumed(CI->getValue()); 9273 return true; 9274 } 9275 return false; 9276 }; 9277 auto AssumedBefore = getAssumed(); 9278 9279 if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union)) 9280 return indicatePessimisticFixpoint(); 9281 9282 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9283 : ChangeStatus::CHANGED; 9284 } 9285 9286 /// See AbstractAttribute::updateImpl(...). 9287 ChangeStatus updateImpl(Attributor &A) override { 9288 Value &V = getAssociatedValue(); 9289 Instruction *I = dyn_cast<Instruction>(&V); 9290 9291 if (auto *ICI = dyn_cast<ICmpInst>(I)) 9292 return updateWithICmpInst(A, ICI); 9293 9294 if (auto *SI = dyn_cast<SelectInst>(I)) 9295 return updateWithSelectInst(A, SI); 9296 9297 if (auto *CI = dyn_cast<CastInst>(I)) 9298 return updateWithCastInst(A, CI); 9299 9300 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) 9301 return updateWithBinaryOperator(A, BinOp); 9302 9303 if (auto *PHI = dyn_cast<PHINode>(I)) 9304 return updateWithPHINode(A, PHI); 9305 9306 if (auto *L = dyn_cast<LoadInst>(I)) 9307 return updateWithLoad(A, *L); 9308 9309 return indicatePessimisticFixpoint(); 9310 } 9311 9312 /// See AbstractAttribute::trackStatistics() 9313 void trackStatistics() const override { 9314 STATS_DECLTRACK_FLOATING_ATTR(potential_values) 9315 } 9316 }; 9317 9318 struct AAPotentialValuesFunction : AAPotentialValuesImpl { 9319 AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A) 9320 : AAPotentialValuesImpl(IRP, A) {} 9321 9322 /// See AbstractAttribute::initialize(...). 9323 ChangeStatus updateImpl(Attributor &A) override { 9324 llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will " 9325 "not be called"); 9326 } 9327 9328 /// See AbstractAttribute::trackStatistics() 9329 void trackStatistics() const override { 9330 STATS_DECLTRACK_FN_ATTR(potential_values) 9331 } 9332 }; 9333 9334 struct AAPotentialValuesCallSite : AAPotentialValuesFunction { 9335 AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A) 9336 : AAPotentialValuesFunction(IRP, A) {} 9337 9338 /// See AbstractAttribute::trackStatistics() 9339 void trackStatistics() const override { 9340 STATS_DECLTRACK_CS_ATTR(potential_values) 9341 } 9342 }; 9343 9344 struct AAPotentialValuesCallSiteReturned 9345 : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> { 9346 AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A) 9347 : AACallSiteReturnedFromReturned<AAPotentialValues, 9348 AAPotentialValuesImpl>(IRP, A) {} 9349 9350 /// See AbstractAttribute::trackStatistics() 9351 void trackStatistics() const override { 9352 STATS_DECLTRACK_CSRET_ATTR(potential_values) 9353 } 9354 }; 9355 9356 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating { 9357 AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A) 9358 : AAPotentialValuesFloating(IRP, A) {} 9359 9360 /// See AbstractAttribute::initialize(..). 9361 void initialize(Attributor &A) override { 9362 AAPotentialValuesImpl::initialize(A); 9363 if (isAtFixpoint()) 9364 return; 9365 9366 Value &V = getAssociatedValue(); 9367 9368 if (auto *C = dyn_cast<ConstantInt>(&V)) { 9369 unionAssumed(C->getValue()); 9370 indicateOptimisticFixpoint(); 9371 return; 9372 } 9373 9374 if (isa<UndefValue>(&V)) { 9375 unionAssumedWithUndef(); 9376 indicateOptimisticFixpoint(); 9377 return; 9378 } 9379 } 9380 9381 /// See AbstractAttribute::updateImpl(...). 9382 ChangeStatus updateImpl(Attributor &A) override { 9383 Value &V = getAssociatedValue(); 9384 auto AssumedBefore = getAssumed(); 9385 auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V), 9386 DepClassTy::REQUIRED); 9387 const auto &S = AA.getAssumed(); 9388 unionAssumed(S); 9389 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9390 : ChangeStatus::CHANGED; 9391 } 9392 9393 /// See AbstractAttribute::trackStatistics() 9394 void trackStatistics() const override { 9395 STATS_DECLTRACK_CSARG_ATTR(potential_values) 9396 } 9397 }; 9398 9399 /// ------------------------ NoUndef Attribute --------------------------------- 9400 struct AANoUndefImpl : AANoUndef { 9401 AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {} 9402 9403 /// See AbstractAttribute::initialize(...). 9404 void initialize(Attributor &A) override { 9405 if (getIRPosition().hasAttr({Attribute::NoUndef})) { 9406 indicateOptimisticFixpoint(); 9407 return; 9408 } 9409 Value &V = getAssociatedValue(); 9410 if (isa<UndefValue>(V)) 9411 indicatePessimisticFixpoint(); 9412 else if (isa<FreezeInst>(V)) 9413 indicateOptimisticFixpoint(); 9414 else if (getPositionKind() != IRPosition::IRP_RETURNED && 9415 isGuaranteedNotToBeUndefOrPoison(&V)) 9416 indicateOptimisticFixpoint(); 9417 else 9418 AANoUndef::initialize(A); 9419 } 9420 9421 /// See followUsesInMBEC 9422 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 9423 AANoUndef::StateType &State) { 9424 const Value *UseV = U->get(); 9425 const DominatorTree *DT = nullptr; 9426 AssumptionCache *AC = nullptr; 9427 InformationCache &InfoCache = A.getInfoCache(); 9428 if (Function *F = getAnchorScope()) { 9429 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 9430 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 9431 } 9432 State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT)); 9433 bool TrackUse = false; 9434 // Track use for instructions which must produce undef or poison bits when 9435 // at least one operand contains such bits. 9436 if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I)) 9437 TrackUse = true; 9438 return TrackUse; 9439 } 9440 9441 /// See AbstractAttribute::getAsStr(). 9442 const std::string getAsStr() const override { 9443 return getAssumed() ? "noundef" : "may-undef-or-poison"; 9444 } 9445 9446 ChangeStatus manifest(Attributor &A) override { 9447 // We don't manifest noundef attribute for dead positions because the 9448 // associated values with dead positions would be replaced with undef 9449 // values. 9450 bool UsedAssumedInformation = false; 9451 if (A.isAssumedDead(getIRPosition(), nullptr, nullptr, 9452 UsedAssumedInformation)) 9453 return ChangeStatus::UNCHANGED; 9454 // A position whose simplified value does not have any value is 9455 // considered to be dead. We don't manifest noundef in such positions for 9456 // the same reason above. 9457 if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation) 9458 .hasValue()) 9459 return ChangeStatus::UNCHANGED; 9460 return AANoUndef::manifest(A); 9461 } 9462 }; 9463 9464 struct AANoUndefFloating : public AANoUndefImpl { 9465 AANoUndefFloating(const IRPosition &IRP, Attributor &A) 9466 : AANoUndefImpl(IRP, A) {} 9467 9468 /// See AbstractAttribute::initialize(...). 9469 void initialize(Attributor &A) override { 9470 AANoUndefImpl::initialize(A); 9471 if (!getState().isAtFixpoint()) 9472 if (Instruction *CtxI = getCtxI()) 9473 followUsesInMBEC(*this, A, getState(), *CtxI); 9474 } 9475 9476 /// See AbstractAttribute::updateImpl(...). 9477 ChangeStatus updateImpl(Attributor &A) override { 9478 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 9479 AANoUndef::StateType &T, bool Stripped) -> bool { 9480 const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V), 9481 DepClassTy::REQUIRED); 9482 if (!Stripped && this == &AA) { 9483 T.indicatePessimisticFixpoint(); 9484 } else { 9485 const AANoUndef::StateType &S = 9486 static_cast<const AANoUndef::StateType &>(AA.getState()); 9487 T ^= S; 9488 } 9489 return T.isValidState(); 9490 }; 9491 9492 StateType T; 9493 bool UsedAssumedInformation = false; 9494 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 9495 VisitValueCB, getCtxI(), 9496 UsedAssumedInformation)) 9497 return indicatePessimisticFixpoint(); 9498 9499 return clampStateAndIndicateChange(getState(), T); 9500 } 9501 9502 /// See AbstractAttribute::trackStatistics() 9503 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9504 }; 9505 9506 struct AANoUndefReturned final 9507 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> { 9508 AANoUndefReturned(const IRPosition &IRP, Attributor &A) 9509 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {} 9510 9511 /// See AbstractAttribute::trackStatistics() 9512 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9513 }; 9514 9515 struct AANoUndefArgument final 9516 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> { 9517 AANoUndefArgument(const IRPosition &IRP, Attributor &A) 9518 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {} 9519 9520 /// See AbstractAttribute::trackStatistics() 9521 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) } 9522 }; 9523 9524 struct AANoUndefCallSiteArgument final : AANoUndefFloating { 9525 AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A) 9526 : AANoUndefFloating(IRP, A) {} 9527 9528 /// See AbstractAttribute::trackStatistics() 9529 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) } 9530 }; 9531 9532 struct AANoUndefCallSiteReturned final 9533 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> { 9534 AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A) 9535 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {} 9536 9537 /// See AbstractAttribute::trackStatistics() 9538 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) } 9539 }; 9540 9541 struct AACallEdgesImpl : public AACallEdges { 9542 AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {} 9543 9544 virtual const SetVector<Function *> &getOptimisticEdges() const override { 9545 return CalledFunctions; 9546 } 9547 9548 virtual bool hasUnknownCallee() const override { return HasUnknownCallee; } 9549 9550 virtual bool hasNonAsmUnknownCallee() const override { 9551 return HasUnknownCalleeNonAsm; 9552 } 9553 9554 const std::string getAsStr() const override { 9555 return "CallEdges[" + std::to_string(HasUnknownCallee) + "," + 9556 std::to_string(CalledFunctions.size()) + "]"; 9557 } 9558 9559 void trackStatistics() const override {} 9560 9561 protected: 9562 void addCalledFunction(Function *Fn, ChangeStatus &Change) { 9563 if (CalledFunctions.insert(Fn)) { 9564 Change = ChangeStatus::CHANGED; 9565 LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName() 9566 << "\n"); 9567 } 9568 } 9569 9570 void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) { 9571 if (!HasUnknownCallee) 9572 Change = ChangeStatus::CHANGED; 9573 if (NonAsm && !HasUnknownCalleeNonAsm) 9574 Change = ChangeStatus::CHANGED; 9575 HasUnknownCalleeNonAsm |= NonAsm; 9576 HasUnknownCallee = true; 9577 } 9578 9579 private: 9580 /// Optimistic set of functions that might be called by this position. 9581 SetVector<Function *> CalledFunctions; 9582 9583 /// Is there any call with a unknown callee. 9584 bool HasUnknownCallee = false; 9585 9586 /// Is there any call with a unknown callee, excluding any inline asm. 9587 bool HasUnknownCalleeNonAsm = false; 9588 }; 9589 9590 struct AACallEdgesCallSite : public AACallEdgesImpl { 9591 AACallEdgesCallSite(const IRPosition &IRP, Attributor &A) 9592 : AACallEdgesImpl(IRP, A) {} 9593 /// See AbstractAttribute::updateImpl(...). 9594 ChangeStatus updateImpl(Attributor &A) override { 9595 ChangeStatus Change = ChangeStatus::UNCHANGED; 9596 9597 auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown, 9598 bool Stripped) -> bool { 9599 if (Function *Fn = dyn_cast<Function>(&V)) { 9600 addCalledFunction(Fn, Change); 9601 } else { 9602 LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n"); 9603 setHasUnknownCallee(true, Change); 9604 } 9605 9606 // Explore all values. 9607 return true; 9608 }; 9609 9610 // Process any value that we might call. 9611 auto ProcessCalledOperand = [&](Value *V) { 9612 bool DummyValue = false; 9613 bool UsedAssumedInformation = false; 9614 if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this, 9615 DummyValue, VisitValue, nullptr, 9616 UsedAssumedInformation, false)) { 9617 // If we haven't gone through all values, assume that there are unknown 9618 // callees. 9619 setHasUnknownCallee(true, Change); 9620 } 9621 }; 9622 9623 CallBase *CB = cast<CallBase>(getCtxI()); 9624 9625 if (CB->isInlineAsm()) { 9626 if (!hasAssumption(*CB->getCaller(), "ompx_no_call_asm") && 9627 !hasAssumption(*CB, "ompx_no_call_asm")) 9628 setHasUnknownCallee(false, Change); 9629 return Change; 9630 } 9631 9632 // Process callee metadata if available. 9633 if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) { 9634 for (auto &Op : MD->operands()) { 9635 Function *Callee = mdconst::dyn_extract_or_null<Function>(Op); 9636 if (Callee) 9637 addCalledFunction(Callee, Change); 9638 } 9639 return Change; 9640 } 9641 9642 // The most simple case. 9643 ProcessCalledOperand(CB->getCalledOperand()); 9644 9645 // Process callback functions. 9646 SmallVector<const Use *, 4u> CallbackUses; 9647 AbstractCallSite::getCallbackUses(*CB, CallbackUses); 9648 for (const Use *U : CallbackUses) 9649 ProcessCalledOperand(U->get()); 9650 9651 return Change; 9652 } 9653 }; 9654 9655 struct AACallEdgesFunction : public AACallEdgesImpl { 9656 AACallEdgesFunction(const IRPosition &IRP, Attributor &A) 9657 : AACallEdgesImpl(IRP, A) {} 9658 9659 /// See AbstractAttribute::updateImpl(...). 9660 ChangeStatus updateImpl(Attributor &A) override { 9661 ChangeStatus Change = ChangeStatus::UNCHANGED; 9662 9663 auto ProcessCallInst = [&](Instruction &Inst) { 9664 CallBase &CB = cast<CallBase>(Inst); 9665 9666 auto &CBEdges = A.getAAFor<AACallEdges>( 9667 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9668 if (CBEdges.hasNonAsmUnknownCallee()) 9669 setHasUnknownCallee(true, Change); 9670 if (CBEdges.hasUnknownCallee()) 9671 setHasUnknownCallee(false, Change); 9672 9673 for (Function *F : CBEdges.getOptimisticEdges()) 9674 addCalledFunction(F, Change); 9675 9676 return true; 9677 }; 9678 9679 // Visit all callable instructions. 9680 bool UsedAssumedInformation = false; 9681 if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this, 9682 UsedAssumedInformation, 9683 /* CheckBBLivenessOnly */ true)) { 9684 // If we haven't looked at all call like instructions, assume that there 9685 // are unknown callees. 9686 setHasUnknownCallee(true, Change); 9687 } 9688 9689 return Change; 9690 } 9691 }; 9692 9693 struct AAFunctionReachabilityFunction : public AAFunctionReachability { 9694 private: 9695 struct QuerySet { 9696 void markReachable(const Function &Fn) { 9697 Reachable.insert(&Fn); 9698 Unreachable.erase(&Fn); 9699 } 9700 9701 /// If there is no information about the function None is returned. 9702 Optional<bool> isCachedReachable(const Function &Fn) { 9703 // Assume that we can reach the function. 9704 // TODO: Be more specific with the unknown callee. 9705 if (CanReachUnknownCallee) 9706 return true; 9707 9708 if (Reachable.count(&Fn)) 9709 return true; 9710 9711 if (Unreachable.count(&Fn)) 9712 return false; 9713 9714 return llvm::None; 9715 } 9716 9717 /// Set of functions that we know for sure is reachable. 9718 DenseSet<const Function *> Reachable; 9719 9720 /// Set of functions that are unreachable, but might become reachable. 9721 DenseSet<const Function *> Unreachable; 9722 9723 /// If we can reach a function with a call to a unknown function we assume 9724 /// that we can reach any function. 9725 bool CanReachUnknownCallee = false; 9726 }; 9727 9728 struct QueryResolver : public QuerySet { 9729 ChangeStatus update(Attributor &A, const AAFunctionReachability &AA, 9730 ArrayRef<const AACallEdges *> AAEdgesList) { 9731 ChangeStatus Change = ChangeStatus::UNCHANGED; 9732 9733 for (auto *AAEdges : AAEdgesList) { 9734 if (AAEdges->hasUnknownCallee()) { 9735 if (!CanReachUnknownCallee) 9736 Change = ChangeStatus::CHANGED; 9737 CanReachUnknownCallee = true; 9738 return Change; 9739 } 9740 } 9741 9742 for (const Function *Fn : make_early_inc_range(Unreachable)) { 9743 if (checkIfReachable(A, AA, AAEdgesList, *Fn)) { 9744 Change = ChangeStatus::CHANGED; 9745 markReachable(*Fn); 9746 } 9747 } 9748 return Change; 9749 } 9750 9751 bool isReachable(Attributor &A, AAFunctionReachability &AA, 9752 ArrayRef<const AACallEdges *> AAEdgesList, 9753 const Function &Fn) { 9754 Optional<bool> Cached = isCachedReachable(Fn); 9755 if (Cached.hasValue()) 9756 return Cached.getValue(); 9757 9758 // The query was not cached, thus it is new. We need to request an update 9759 // explicitly to make sure this the information is properly run to a 9760 // fixpoint. 9761 A.registerForUpdate(AA); 9762 9763 // We need to assume that this function can't reach Fn to prevent 9764 // an infinite loop if this function is recursive. 9765 Unreachable.insert(&Fn); 9766 9767 bool Result = checkIfReachable(A, AA, AAEdgesList, Fn); 9768 if (Result) 9769 markReachable(Fn); 9770 return Result; 9771 } 9772 9773 bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA, 9774 ArrayRef<const AACallEdges *> AAEdgesList, 9775 const Function &Fn) const { 9776 9777 // Handle the most trivial case first. 9778 for (auto *AAEdges : AAEdgesList) { 9779 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9780 9781 if (Edges.count(const_cast<Function *>(&Fn))) 9782 return true; 9783 } 9784 9785 SmallVector<const AAFunctionReachability *, 8> Deps; 9786 for (auto &AAEdges : AAEdgesList) { 9787 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9788 9789 for (Function *Edge : Edges) { 9790 // Functions that do not call back into the module can be ignored. 9791 if (Edge->hasFnAttribute(Attribute::NoCallback)) 9792 continue; 9793 9794 // We don't need a dependency if the result is reachable. 9795 const AAFunctionReachability &EdgeReachability = 9796 A.getAAFor<AAFunctionReachability>( 9797 AA, IRPosition::function(*Edge), DepClassTy::NONE); 9798 Deps.push_back(&EdgeReachability); 9799 9800 if (EdgeReachability.canReach(A, Fn)) 9801 return true; 9802 } 9803 } 9804 9805 // The result is false for now, set dependencies and leave. 9806 for (auto *Dep : Deps) 9807 A.recordDependence(*Dep, AA, DepClassTy::REQUIRED); 9808 9809 return false; 9810 } 9811 }; 9812 9813 /// Get call edges that can be reached by this instruction. 9814 bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability, 9815 const Instruction &Inst, 9816 SmallVector<const AACallEdges *> &Result) const { 9817 // Determine call like instructions that we can reach from the inst. 9818 auto CheckCallBase = [&](Instruction &CBInst) { 9819 if (!Reachability.isAssumedReachable(A, Inst, CBInst)) 9820 return true; 9821 9822 auto &CB = cast<CallBase>(CBInst); 9823 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9824 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9825 9826 Result.push_back(&AAEdges); 9827 return true; 9828 }; 9829 9830 bool UsedAssumedInformation = false; 9831 return A.checkForAllCallLikeInstructions(CheckCallBase, *this, 9832 UsedAssumedInformation, 9833 /* CheckBBLivenessOnly */ true); 9834 } 9835 9836 public: 9837 AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A) 9838 : AAFunctionReachability(IRP, A) {} 9839 9840 bool canReach(Attributor &A, const Function &Fn) const override { 9841 if (!isValidState()) 9842 return true; 9843 9844 const AACallEdges &AAEdges = 9845 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9846 9847 // Attributor returns attributes as const, so this function has to be 9848 // const for users of this attribute to use it without having to do 9849 // a const_cast. 9850 // This is a hack for us to be able to cache queries. 9851 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9852 bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis, 9853 {&AAEdges}, Fn); 9854 9855 return Result; 9856 } 9857 9858 /// Can \p CB reach \p Fn 9859 bool canReach(Attributor &A, CallBase &CB, 9860 const Function &Fn) const override { 9861 if (!isValidState()) 9862 return true; 9863 9864 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9865 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9866 9867 // Attributor returns attributes as const, so this function has to be 9868 // const for users of this attribute to use it without having to do 9869 // a const_cast. 9870 // This is a hack for us to be able to cache queries. 9871 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9872 QueryResolver &CBQuery = NonConstThis->CBQueries[&CB]; 9873 9874 bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn); 9875 9876 return Result; 9877 } 9878 9879 bool instructionCanReach(Attributor &A, const Instruction &Inst, 9880 const Function &Fn, 9881 bool UseBackwards) const override { 9882 if (!isValidState()) 9883 return true; 9884 9885 if (UseBackwards) 9886 return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr); 9887 9888 const auto &Reachability = A.getAAFor<AAReachability>( 9889 *this, IRPosition::function(*getAssociatedFunction()), 9890 DepClassTy::REQUIRED); 9891 9892 SmallVector<const AACallEdges *> CallEdges; 9893 bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges); 9894 // Attributor returns attributes as const, so this function has to be 9895 // const for users of this attribute to use it without having to do 9896 // a const_cast. 9897 // This is a hack for us to be able to cache queries. 9898 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9899 QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst]; 9900 if (!AllKnown) 9901 InstQSet.CanReachUnknownCallee = true; 9902 9903 return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn); 9904 } 9905 9906 /// See AbstractAttribute::updateImpl(...). 9907 ChangeStatus updateImpl(Attributor &A) override { 9908 const AACallEdges &AAEdges = 9909 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9910 ChangeStatus Change = ChangeStatus::UNCHANGED; 9911 9912 Change |= WholeFunction.update(A, *this, {&AAEdges}); 9913 9914 for (auto &CBPair : CBQueries) { 9915 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9916 *this, IRPosition::callsite_function(*CBPair.first), 9917 DepClassTy::REQUIRED); 9918 9919 Change |= CBPair.second.update(A, *this, {&AAEdges}); 9920 } 9921 9922 // Update the Instruction queries. 9923 if (!InstQueries.empty()) { 9924 const AAReachability *Reachability = &A.getAAFor<AAReachability>( 9925 *this, IRPosition::function(*getAssociatedFunction()), 9926 DepClassTy::REQUIRED); 9927 9928 // Check for local callbases first. 9929 for (auto &InstPair : InstQueries) { 9930 SmallVector<const AACallEdges *> CallEdges; 9931 bool AllKnown = 9932 getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges); 9933 // Update will return change if we this effects any queries. 9934 if (!AllKnown) 9935 InstPair.second.CanReachUnknownCallee = true; 9936 Change |= InstPair.second.update(A, *this, CallEdges); 9937 } 9938 } 9939 9940 return Change; 9941 } 9942 9943 const std::string getAsStr() const override { 9944 size_t QueryCount = 9945 WholeFunction.Reachable.size() + WholeFunction.Unreachable.size(); 9946 9947 return "FunctionReachability [" + 9948 std::to_string(WholeFunction.Reachable.size()) + "," + 9949 std::to_string(QueryCount) + "]"; 9950 } 9951 9952 void trackStatistics() const override {} 9953 9954 private: 9955 bool canReachUnknownCallee() const override { 9956 return WholeFunction.CanReachUnknownCallee; 9957 } 9958 9959 /// Used to answer if a the whole function can reacha a specific function. 9960 QueryResolver WholeFunction; 9961 9962 /// Used to answer if a call base inside this function can reach a specific 9963 /// function. 9964 MapVector<const CallBase *, QueryResolver> CBQueries; 9965 9966 /// This is for instruction queries than scan "forward". 9967 MapVector<const Instruction *, QueryResolver> InstQueries; 9968 }; 9969 } // namespace 9970 9971 /// ---------------------- Assumption Propagation ------------------------------ 9972 namespace { 9973 struct AAAssumptionInfoImpl : public AAAssumptionInfo { 9974 AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A, 9975 const DenseSet<StringRef> &Known) 9976 : AAAssumptionInfo(IRP, A, Known) {} 9977 9978 bool hasAssumption(const StringRef Assumption) const override { 9979 return isValidState() && setContains(Assumption); 9980 } 9981 9982 /// See AbstractAttribute::getAsStr() 9983 const std::string getAsStr() const override { 9984 const SetContents &Known = getKnown(); 9985 const SetContents &Assumed = getAssumed(); 9986 9987 const std::string KnownStr = 9988 llvm::join(Known.getSet().begin(), Known.getSet().end(), ","); 9989 const std::string AssumedStr = 9990 (Assumed.isUniversal()) 9991 ? "Universal" 9992 : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ","); 9993 9994 return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]"; 9995 } 9996 }; 9997 9998 /// Propagates assumption information from parent functions to all of their 9999 /// successors. An assumption can be propagated if the containing function 10000 /// dominates the called function. 10001 /// 10002 /// We start with a "known" set of assumptions already valid for the associated 10003 /// function and an "assumed" set that initially contains all possible 10004 /// assumptions. The assumed set is inter-procedurally updated by narrowing its 10005 /// contents as concrete values are known. The concrete values are seeded by the 10006 /// first nodes that are either entries into the call graph, or contains no 10007 /// assumptions. Each node is updated as the intersection of the assumed state 10008 /// with all of its predecessors. 10009 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl { 10010 AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A) 10011 : AAAssumptionInfoImpl(IRP, A, 10012 getAssumptions(*IRP.getAssociatedFunction())) {} 10013 10014 /// See AbstractAttribute::manifest(...). 10015 ChangeStatus manifest(Attributor &A) override { 10016 const auto &Assumptions = getKnown(); 10017 10018 // Don't manifest a universal set if it somehow made it here. 10019 if (Assumptions.isUniversal()) 10020 return ChangeStatus::UNCHANGED; 10021 10022 Function *AssociatedFunction = getAssociatedFunction(); 10023 10024 bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet()); 10025 10026 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 10027 } 10028 10029 /// See AbstractAttribute::updateImpl(...). 10030 ChangeStatus updateImpl(Attributor &A) override { 10031 bool Changed = false; 10032 10033 auto CallSitePred = [&](AbstractCallSite ACS) { 10034 const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>( 10035 *this, IRPosition::callsite_function(*ACS.getInstruction()), 10036 DepClassTy::REQUIRED); 10037 // Get the set of assumptions shared by all of this function's callers. 10038 Changed |= getIntersection(AssumptionAA.getAssumed()); 10039 return !getAssumed().empty() || !getKnown().empty(); 10040 }; 10041 10042 bool UsedAssumedInformation = false; 10043 // Get the intersection of all assumptions held by this node's predecessors. 10044 // If we don't know all the call sites then this is either an entry into the 10045 // call graph or an empty node. This node is known to only contain its own 10046 // assumptions and can be propagated to its successors. 10047 if (!A.checkForAllCallSites(CallSitePred, *this, true, 10048 UsedAssumedInformation)) 10049 return indicatePessimisticFixpoint(); 10050 10051 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 10052 } 10053 10054 void trackStatistics() const override {} 10055 }; 10056 10057 /// Assumption Info defined for call sites. 10058 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl { 10059 10060 AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A) 10061 : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {} 10062 10063 /// See AbstractAttribute::initialize(...). 10064 void initialize(Attributor &A) override { 10065 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 10066 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 10067 } 10068 10069 /// See AbstractAttribute::manifest(...). 10070 ChangeStatus manifest(Attributor &A) override { 10071 // Don't manifest a universal set if it somehow made it here. 10072 if (getKnown().isUniversal()) 10073 return ChangeStatus::UNCHANGED; 10074 10075 CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue()); 10076 bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet()); 10077 10078 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 10079 } 10080 10081 /// See AbstractAttribute::updateImpl(...). 10082 ChangeStatus updateImpl(Attributor &A) override { 10083 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 10084 auto &AssumptionAA = 10085 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 10086 bool Changed = getIntersection(AssumptionAA.getAssumed()); 10087 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 10088 } 10089 10090 /// See AbstractAttribute::trackStatistics() 10091 void trackStatistics() const override {} 10092 10093 private: 10094 /// Helper to initialized the known set as all the assumptions this call and 10095 /// the callee contain. 10096 DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) { 10097 const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue()); 10098 auto Assumptions = getAssumptions(CB); 10099 if (Function *F = IRP.getAssociatedFunction()) 10100 set_union(Assumptions, getAssumptions(*F)); 10101 if (Function *F = IRP.getAssociatedFunction()) 10102 set_union(Assumptions, getAssumptions(*F)); 10103 return Assumptions; 10104 } 10105 }; 10106 } // namespace 10107 10108 AACallGraphNode *AACallEdgeIterator::operator*() const { 10109 return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>( 10110 &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I)))); 10111 } 10112 10113 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); } 10114 10115 const char AAReturnedValues::ID = 0; 10116 const char AANoUnwind::ID = 0; 10117 const char AANoSync::ID = 0; 10118 const char AANoFree::ID = 0; 10119 const char AANonNull::ID = 0; 10120 const char AANoRecurse::ID = 0; 10121 const char AAWillReturn::ID = 0; 10122 const char AAUndefinedBehavior::ID = 0; 10123 const char AANoAlias::ID = 0; 10124 const char AAReachability::ID = 0; 10125 const char AANoReturn::ID = 0; 10126 const char AAIsDead::ID = 0; 10127 const char AADereferenceable::ID = 0; 10128 const char AAAlign::ID = 0; 10129 const char AANoCapture::ID = 0; 10130 const char AAValueSimplify::ID = 0; 10131 const char AAHeapToStack::ID = 0; 10132 const char AAPrivatizablePtr::ID = 0; 10133 const char AAMemoryBehavior::ID = 0; 10134 const char AAMemoryLocation::ID = 0; 10135 const char AAValueConstantRange::ID = 0; 10136 const char AAPotentialValues::ID = 0; 10137 const char AANoUndef::ID = 0; 10138 const char AACallEdges::ID = 0; 10139 const char AAFunctionReachability::ID = 0; 10140 const char AAPointerInfo::ID = 0; 10141 const char AAAssumptionInfo::ID = 0; 10142 10143 // Macro magic to create the static generator function for attributes that 10144 // follow the naming scheme. 10145 10146 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ 10147 case IRPosition::PK: \ 10148 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); 10149 10150 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ 10151 case IRPosition::PK: \ 10152 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ 10153 ++NumAAs; \ 10154 break; 10155 10156 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10157 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10158 CLASS *AA = nullptr; \ 10159 switch (IRP.getPositionKind()) { \ 10160 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10161 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 10162 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 10163 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10164 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 10165 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 10166 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10167 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10168 } \ 10169 return *AA; \ 10170 } 10171 10172 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10173 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10174 CLASS *AA = nullptr; \ 10175 switch (IRP.getPositionKind()) { \ 10176 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10177 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ 10178 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 10179 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10180 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10181 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 10182 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10183 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10184 } \ 10185 return *AA; \ 10186 } 10187 10188 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10189 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10190 CLASS *AA = nullptr; \ 10191 switch (IRP.getPositionKind()) { \ 10192 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10193 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10194 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10195 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10196 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10197 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 10198 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10199 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10200 } \ 10201 return *AA; \ 10202 } 10203 10204 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10205 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10206 CLASS *AA = nullptr; \ 10207 switch (IRP.getPositionKind()) { \ 10208 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10209 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 10210 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 10211 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10212 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 10213 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 10214 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 10215 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10216 } \ 10217 return *AA; \ 10218 } 10219 10220 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10221 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10222 CLASS *AA = nullptr; \ 10223 switch (IRP.getPositionKind()) { \ 10224 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10225 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10226 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10227 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10228 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10229 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10230 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10231 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10232 } \ 10233 return *AA; \ 10234 } 10235 10236 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) 10237 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) 10238 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) 10239 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) 10240 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) 10241 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) 10242 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) 10243 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges) 10244 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo) 10245 10246 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) 10247 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) 10248 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) 10249 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) 10250 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) 10251 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) 10252 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) 10253 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues) 10254 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef) 10255 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo) 10256 10257 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) 10258 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) 10259 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) 10260 10261 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) 10262 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability) 10263 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) 10264 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability) 10265 10266 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) 10267 10268 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION 10269 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION 10270 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION 10271 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION 10272 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION 10273 #undef SWITCH_PK_CREATE 10274 #undef SWITCH_PK_INV 10275