1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // See the Attributor.h file comment and the class descriptions in that file for 10 // more information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/ADT/SetVector.h" 15 #include "llvm/Transforms/IPO/Attributor.h" 16 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/MapVector.h" 19 #include "llvm/ADT/SCCIterator.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetOperations.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/Analysis/AssumeBundleQueries.h" 26 #include "llvm/Analysis/AssumptionCache.h" 27 #include "llvm/Analysis/CaptureTracking.h" 28 #include "llvm/Analysis/InstructionSimplify.h" 29 #include "llvm/Analysis/LazyValueInfo.h" 30 #include "llvm/Analysis/MemoryBuiltins.h" 31 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 32 #include "llvm/Analysis/ScalarEvolution.h" 33 #include "llvm/Analysis/TargetTransformInfo.h" 34 #include "llvm/Analysis/ValueTracking.h" 35 #include "llvm/IR/Argument.h" 36 #include "llvm/IR/Assumptions.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DataLayout.h" 39 #include "llvm/IR/IRBuilder.h" 40 #include "llvm/IR/Instruction.h" 41 #include "llvm/IR/Instructions.h" 42 #include "llvm/IR/IntrinsicInst.h" 43 #include "llvm/IR/NoFolder.h" 44 #include "llvm/IR/Value.h" 45 #include "llvm/Support/Alignment.h" 46 #include "llvm/Support/Casting.h" 47 #include "llvm/Support/CommandLine.h" 48 #include "llvm/Support/ErrorHandling.h" 49 #include "llvm/Support/GraphWriter.h" 50 #include "llvm/Support/MathExtras.h" 51 #include "llvm/Support/raw_ostream.h" 52 #include "llvm/Transforms/IPO/ArgumentPromotion.h" 53 #include "llvm/Transforms/Utils/Local.h" 54 #include <cassert> 55 56 using namespace llvm; 57 58 #define DEBUG_TYPE "attributor" 59 60 static cl::opt<bool> ManifestInternal( 61 "attributor-manifest-internal", cl::Hidden, 62 cl::desc("Manifest Attributor internal string attributes."), 63 cl::init(false)); 64 65 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), 66 cl::Hidden); 67 68 template <> 69 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0; 70 71 static cl::opt<unsigned, true> MaxPotentialValues( 72 "attributor-max-potential-values", cl::Hidden, 73 cl::desc("Maximum number of potential values to be " 74 "tracked for each position."), 75 cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues), 76 cl::init(7)); 77 78 static cl::opt<unsigned> MaxInterferingAccesses( 79 "attributor-max-interfering-accesses", cl::Hidden, 80 cl::desc("Maximum number of interfering accesses to " 81 "check before assuming all might interfere."), 82 cl::init(6)); 83 84 STATISTIC(NumAAs, "Number of abstract attributes created"); 85 86 // Some helper macros to deal with statistics tracking. 87 // 88 // Usage: 89 // For simple IR attribute tracking overload trackStatistics in the abstract 90 // attribute and choose the right STATS_DECLTRACK_********* macro, 91 // e.g.,: 92 // void trackStatistics() const override { 93 // STATS_DECLTRACK_ARG_ATTR(returned) 94 // } 95 // If there is a single "increment" side one can use the macro 96 // STATS_DECLTRACK with a custom message. If there are multiple increment 97 // sides, STATS_DECL and STATS_TRACK can also be used separately. 98 // 99 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ 100 ("Number of " #TYPE " marked '" #NAME "'") 101 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME 102 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); 103 #define STATS_DECL(NAME, TYPE, MSG) \ 104 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); 105 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); 106 #define STATS_DECLTRACK(NAME, TYPE, MSG) \ 107 { \ 108 STATS_DECL(NAME, TYPE, MSG) \ 109 STATS_TRACK(NAME, TYPE) \ 110 } 111 #define STATS_DECLTRACK_ARG_ATTR(NAME) \ 112 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) 113 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ 114 STATS_DECLTRACK(NAME, CSArguments, \ 115 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) 116 #define STATS_DECLTRACK_FN_ATTR(NAME) \ 117 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) 118 #define STATS_DECLTRACK_CS_ATTR(NAME) \ 119 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) 120 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ 121 STATS_DECLTRACK(NAME, FunctionReturn, \ 122 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) 123 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ 124 STATS_DECLTRACK(NAME, CSReturn, \ 125 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) 126 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ 127 STATS_DECLTRACK(NAME, Floating, \ 128 ("Number of floating values known to be '" #NAME "'")) 129 130 // Specialization of the operator<< for abstract attributes subclasses. This 131 // disambiguates situations where multiple operators are applicable. 132 namespace llvm { 133 #define PIPE_OPERATOR(CLASS) \ 134 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ 135 return OS << static_cast<const AbstractAttribute &>(AA); \ 136 } 137 138 PIPE_OPERATOR(AAIsDead) 139 PIPE_OPERATOR(AANoUnwind) 140 PIPE_OPERATOR(AANoSync) 141 PIPE_OPERATOR(AANoRecurse) 142 PIPE_OPERATOR(AAWillReturn) 143 PIPE_OPERATOR(AANoReturn) 144 PIPE_OPERATOR(AAReturnedValues) 145 PIPE_OPERATOR(AANonNull) 146 PIPE_OPERATOR(AANoAlias) 147 PIPE_OPERATOR(AADereferenceable) 148 PIPE_OPERATOR(AAAlign) 149 PIPE_OPERATOR(AANoCapture) 150 PIPE_OPERATOR(AAValueSimplify) 151 PIPE_OPERATOR(AANoFree) 152 PIPE_OPERATOR(AAHeapToStack) 153 PIPE_OPERATOR(AAReachability) 154 PIPE_OPERATOR(AAMemoryBehavior) 155 PIPE_OPERATOR(AAMemoryLocation) 156 PIPE_OPERATOR(AAValueConstantRange) 157 PIPE_OPERATOR(AAPrivatizablePtr) 158 PIPE_OPERATOR(AAUndefinedBehavior) 159 PIPE_OPERATOR(AAPotentialValues) 160 PIPE_OPERATOR(AANoUndef) 161 PIPE_OPERATOR(AACallEdges) 162 PIPE_OPERATOR(AAFunctionReachability) 163 PIPE_OPERATOR(AAPointerInfo) 164 PIPE_OPERATOR(AAAssumptionInfo) 165 166 #undef PIPE_OPERATOR 167 168 template <> 169 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, 170 const DerefState &R) { 171 ChangeStatus CS0 = 172 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); 173 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); 174 return CS0 | CS1; 175 } 176 177 } // namespace llvm 178 179 /// Get pointer operand of memory accessing instruction. If \p I is 180 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, 181 /// is set to false and the instruction is volatile, return nullptr. 182 static const Value *getPointerOperand(const Instruction *I, 183 bool AllowVolatile) { 184 if (!AllowVolatile && I->isVolatile()) 185 return nullptr; 186 187 if (auto *LI = dyn_cast<LoadInst>(I)) { 188 return LI->getPointerOperand(); 189 } 190 191 if (auto *SI = dyn_cast<StoreInst>(I)) { 192 return SI->getPointerOperand(); 193 } 194 195 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { 196 return CXI->getPointerOperand(); 197 } 198 199 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { 200 return RMWI->getPointerOperand(); 201 } 202 203 return nullptr; 204 } 205 206 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and 207 /// advanced by \p Offset bytes. To aid later analysis the method tries to build 208 /// getelement pointer instructions that traverse the natural type of \p Ptr if 209 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence 210 /// through a cast to i8*. 211 /// 212 /// TODO: This could probably live somewhere more prominantly if it doesn't 213 /// already exist. 214 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr, 215 int64_t Offset, IRBuilder<NoFolder> &IRB, 216 const DataLayout &DL) { 217 assert(Offset >= 0 && "Negative offset not supported yet!"); 218 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset 219 << "-bytes as " << *ResTy << "\n"); 220 221 if (Offset) { 222 Type *Ty = PtrElemTy; 223 APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset); 224 SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset); 225 226 SmallVector<Value *, 4> ValIndices; 227 std::string GEPName = Ptr->getName().str(); 228 for (const APInt &Index : IntIndices) { 229 ValIndices.push_back(IRB.getInt(Index)); 230 GEPName += "." + std::to_string(Index.getZExtValue()); 231 } 232 233 // Create a GEP for the indices collected above. 234 Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName); 235 236 // If an offset is left we use byte-wise adjustment. 237 if (IntOffset != 0) { 238 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); 239 Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset), 240 GEPName + ".b" + Twine(IntOffset.getZExtValue())); 241 } 242 } 243 244 // Ensure the result has the requested type. 245 Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy, 246 Ptr->getName() + ".cast"); 247 248 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n"); 249 return Ptr; 250 } 251 252 /// Recursively visit all values that might become \p IRP at some point. This 253 /// will be done by looking through cast instructions, selects, phis, and calls 254 /// with the "returned" attribute. Once we cannot look through the value any 255 /// further, the callback \p VisitValueCB is invoked and passed the current 256 /// value, the \p State, and a flag to indicate if we stripped anything. 257 /// Stripped means that we unpacked the value associated with \p IRP at least 258 /// once. Note that the value used for the callback may still be the value 259 /// associated with \p IRP (due to PHIs). To limit how much effort is invested, 260 /// we will never visit more values than specified by \p MaxValues. 261 /// If \p Intraprocedural is set to true only values valid in the scope of 262 /// \p CtxI will be visited and simplification into other scopes is prevented. 263 template <typename StateTy> 264 static bool genericValueTraversal( 265 Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA, 266 StateTy &State, 267 function_ref<bool(Value &, const Instruction *, StateTy &, bool)> 268 VisitValueCB, 269 const Instruction *CtxI, bool &UsedAssumedInformation, 270 bool UseValueSimplify = true, int MaxValues = 16, 271 function_ref<Value *(Value *)> StripCB = nullptr, 272 bool Intraprocedural = false) { 273 274 struct LivenessInfo { 275 const AAIsDead *LivenessAA = nullptr; 276 bool AnyDead = false; 277 }; 278 SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs; 279 auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & { 280 LivenessInfo &LI = LivenessAAs[&F]; 281 if (!LI.LivenessAA) 282 LI.LivenessAA = &A.getAAFor<AAIsDead>(QueryingAA, IRPosition::function(F), 283 DepClassTy::NONE); 284 return LI; 285 }; 286 287 Value *InitialV = &IRP.getAssociatedValue(); 288 using Item = std::pair<Value *, const Instruction *>; 289 SmallSet<Item, 16> Visited; 290 SmallVector<Item, 16> Worklist; 291 Worklist.push_back({InitialV, CtxI}); 292 293 int Iteration = 0; 294 do { 295 Item I = Worklist.pop_back_val(); 296 Value *V = I.first; 297 CtxI = I.second; 298 if (StripCB) 299 V = StripCB(V); 300 301 // Check if we should process the current value. To prevent endless 302 // recursion keep a record of the values we followed! 303 if (!Visited.insert(I).second) 304 continue; 305 306 // Make sure we limit the compile time for complex expressions. 307 if (Iteration++ >= MaxValues) { 308 LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: " 309 << Iteration << "!\n"); 310 return false; 311 } 312 313 // Explicitly look through calls with a "returned" attribute if we do 314 // not have a pointer as stripPointerCasts only works on them. 315 Value *NewV = nullptr; 316 if (V->getType()->isPointerTy()) { 317 NewV = V->stripPointerCasts(); 318 } else { 319 auto *CB = dyn_cast<CallBase>(V); 320 if (CB && CB->getCalledFunction()) { 321 for (Argument &Arg : CB->getCalledFunction()->args()) 322 if (Arg.hasReturnedAttr()) { 323 NewV = CB->getArgOperand(Arg.getArgNo()); 324 break; 325 } 326 } 327 } 328 if (NewV && NewV != V) { 329 Worklist.push_back({NewV, CtxI}); 330 continue; 331 } 332 333 // Look through select instructions, visit assumed potential values. 334 if (auto *SI = dyn_cast<SelectInst>(V)) { 335 Optional<Constant *> C = A.getAssumedConstant( 336 *SI->getCondition(), QueryingAA, UsedAssumedInformation); 337 bool NoValueYet = !C.hasValue(); 338 if (NoValueYet || isa_and_nonnull<UndefValue>(*C)) 339 continue; 340 if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) { 341 if (CI->isZero()) 342 Worklist.push_back({SI->getFalseValue(), CtxI}); 343 else 344 Worklist.push_back({SI->getTrueValue(), CtxI}); 345 continue; 346 } 347 // We could not simplify the condition, assume both values.( 348 Worklist.push_back({SI->getTrueValue(), CtxI}); 349 Worklist.push_back({SI->getFalseValue(), CtxI}); 350 continue; 351 } 352 353 // Look through phi nodes, visit all live operands. 354 if (auto *PHI = dyn_cast<PHINode>(V)) { 355 LivenessInfo &LI = GetLivenessInfo(*PHI->getFunction()); 356 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 357 BasicBlock *IncomingBB = PHI->getIncomingBlock(u); 358 if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI->getParent())) { 359 LI.AnyDead = true; 360 UsedAssumedInformation |= !LI.LivenessAA->isAtFixpoint(); 361 continue; 362 } 363 Worklist.push_back( 364 {PHI->getIncomingValue(u), IncomingBB->getTerminator()}); 365 } 366 continue; 367 } 368 369 if (auto *Arg = dyn_cast<Argument>(V)) { 370 if (!Intraprocedural && !Arg->hasPassPointeeByValueCopyAttr()) { 371 SmallVector<Item> CallSiteValues; 372 bool UsedAssumedInformation = false; 373 if (A.checkForAllCallSites( 374 [&](AbstractCallSite ACS) { 375 // Callbacks might not have a corresponding call site operand, 376 // stick with the argument in that case. 377 Value *CSOp = ACS.getCallArgOperand(*Arg); 378 if (!CSOp) 379 return false; 380 CallSiteValues.push_back({CSOp, ACS.getInstruction()}); 381 return true; 382 }, 383 *Arg->getParent(), true, &QueryingAA, UsedAssumedInformation)) { 384 Worklist.append(CallSiteValues); 385 continue; 386 } 387 } 388 } 389 390 if (UseValueSimplify && !isa<Constant>(V)) { 391 Optional<Value *> SimpleV = 392 A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation); 393 if (!SimpleV.hasValue()) 394 continue; 395 Value *NewV = SimpleV.getValue(); 396 if (NewV && NewV != V) { 397 if (!Intraprocedural || !CtxI || 398 AA::isValidInScope(*NewV, CtxI->getFunction())) { 399 Worklist.push_back({NewV, CtxI}); 400 continue; 401 } 402 } 403 } 404 405 if (auto *LI = dyn_cast<LoadInst>(V)) { 406 bool UsedAssumedInformation = false; 407 // If we ask for the potentially loaded values from the initial pointer we 408 // will simply end up here again. The load is as far as we can make it. 409 if (LI->getPointerOperand() != InitialV) { 410 SmallSetVector<Value *, 4> PotentialCopies; 411 if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies, QueryingAA, 412 UsedAssumedInformation, 413 /* OnlyExact */ true)) { 414 // Values have to be dynamically unique or we loose the fact that a 415 // single llvm::Value might represent two runtime values (e.g., stack 416 // locations in different recursive calls). 417 bool DynamicallyUnique = 418 llvm::all_of(PotentialCopies, [&A, &QueryingAA](Value *PC) { 419 return AA::isDynamicallyUnique(A, QueryingAA, *PC); 420 }); 421 if (DynamicallyUnique && 422 (!Intraprocedural || !CtxI || 423 llvm::all_of(PotentialCopies, [CtxI](Value *PC) { 424 return AA::isValidInScope(*PC, CtxI->getFunction()); 425 }))) { 426 for (auto *PotentialCopy : PotentialCopies) 427 Worklist.push_back({PotentialCopy, CtxI}); 428 continue; 429 } 430 } 431 } 432 } 433 434 // Once a leaf is reached we inform the user through the callback. 435 if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) { 436 LLVM_DEBUG(dbgs() << "Generic value traversal visit callback failed for: " 437 << *V << "!\n"); 438 return false; 439 } 440 } while (!Worklist.empty()); 441 442 // If we actually used liveness information so we have to record a dependence. 443 for (auto &It : LivenessAAs) 444 if (It.second.AnyDead) 445 A.recordDependence(*It.second.LivenessAA, QueryingAA, 446 DepClassTy::OPTIONAL); 447 448 // All values have been visited. 449 return true; 450 } 451 452 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr, 453 SmallVectorImpl<Value *> &Objects, 454 const AbstractAttribute &QueryingAA, 455 const Instruction *CtxI, 456 bool &UsedAssumedInformation, 457 bool Intraprocedural) { 458 auto StripCB = [&](Value *V) { return getUnderlyingObject(V); }; 459 SmallPtrSet<Value *, 8> SeenObjects; 460 auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *, 461 SmallVectorImpl<Value *> &Objects, 462 bool) -> bool { 463 if (SeenObjects.insert(&Val).second) 464 Objects.push_back(&Val); 465 return true; 466 }; 467 if (!genericValueTraversal<decltype(Objects)>( 468 A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI, 469 UsedAssumedInformation, true, 32, StripCB, Intraprocedural)) 470 return false; 471 return true; 472 } 473 474 static const Value * 475 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA, 476 const Value *Val, const DataLayout &DL, APInt &Offset, 477 bool GetMinOffset, bool AllowNonInbounds, 478 bool UseAssumed = false) { 479 480 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { 481 const IRPosition &Pos = IRPosition::value(V); 482 // Only track dependence if we are going to use the assumed info. 483 const AAValueConstantRange &ValueConstantRangeAA = 484 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, 485 UseAssumed ? DepClassTy::OPTIONAL 486 : DepClassTy::NONE); 487 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() 488 : ValueConstantRangeAA.getKnown(); 489 if (Range.isFullSet()) 490 return false; 491 492 // We can only use the lower part of the range because the upper part can 493 // be higher than what the value can really be. 494 if (GetMinOffset) 495 ROffset = Range.getSignedMin(); 496 else 497 ROffset = Range.getSignedMax(); 498 return true; 499 }; 500 501 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, 502 /* AllowInvariant */ true, 503 AttributorAnalysis); 504 } 505 506 static const Value * 507 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA, 508 const Value *Ptr, int64_t &BytesOffset, 509 const DataLayout &DL, bool AllowNonInbounds = false) { 510 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 511 const Value *Base = 512 stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt, 513 /* GetMinOffset */ true, AllowNonInbounds); 514 515 BytesOffset = OffsetAPInt.getSExtValue(); 516 return Base; 517 } 518 519 /// Clamp the information known for all returned values of a function 520 /// (identified by \p QueryingAA) into \p S. 521 template <typename AAType, typename StateType = typename AAType::StateType> 522 static void clampReturnedValueStates( 523 Attributor &A, const AAType &QueryingAA, StateType &S, 524 const IRPosition::CallBaseContext *CBContext = nullptr) { 525 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " 526 << QueryingAA << " into " << S << "\n"); 527 528 assert((QueryingAA.getIRPosition().getPositionKind() == 529 IRPosition::IRP_RETURNED || 530 QueryingAA.getIRPosition().getPositionKind() == 531 IRPosition::IRP_CALL_SITE_RETURNED) && 532 "Can only clamp returned value states for a function returned or call " 533 "site returned position!"); 534 535 // Use an optional state as there might not be any return values and we want 536 // to join (IntegerState::operator&) the state of all there are. 537 Optional<StateType> T; 538 539 // Callback for each possibly returned value. 540 auto CheckReturnValue = [&](Value &RV) -> bool { 541 const IRPosition &RVPos = IRPosition::value(RV, CBContext); 542 const AAType &AA = 543 A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED); 544 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() 545 << " @ " << RVPos << "\n"); 546 const StateType &AAS = AA.getState(); 547 if (T.hasValue()) 548 *T &= AAS; 549 else 550 T = AAS; 551 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T 552 << "\n"); 553 return T->isValidState(); 554 }; 555 556 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) 557 S.indicatePessimisticFixpoint(); 558 else if (T.hasValue()) 559 S ^= *T; 560 } 561 562 namespace { 563 /// Helper class for generic deduction: return value -> returned position. 564 template <typename AAType, typename BaseType, 565 typename StateType = typename BaseType::StateType, 566 bool PropagateCallBaseContext = false> 567 struct AAReturnedFromReturnedValues : public BaseType { 568 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) 569 : BaseType(IRP, A) {} 570 571 /// See AbstractAttribute::updateImpl(...). 572 ChangeStatus updateImpl(Attributor &A) override { 573 StateType S(StateType::getBestState(this->getState())); 574 clampReturnedValueStates<AAType, StateType>( 575 A, *this, S, 576 PropagateCallBaseContext ? this->getCallBaseContext() : nullptr); 577 // TODO: If we know we visited all returned values, thus no are assumed 578 // dead, we can take the known information from the state T. 579 return clampStateAndIndicateChange<StateType>(this->getState(), S); 580 } 581 }; 582 583 /// Clamp the information known at all call sites for a given argument 584 /// (identified by \p QueryingAA) into \p S. 585 template <typename AAType, typename StateType = typename AAType::StateType> 586 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, 587 StateType &S) { 588 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " 589 << QueryingAA << " into " << S << "\n"); 590 591 assert(QueryingAA.getIRPosition().getPositionKind() == 592 IRPosition::IRP_ARGUMENT && 593 "Can only clamp call site argument states for an argument position!"); 594 595 // Use an optional state as there might not be any return values and we want 596 // to join (IntegerState::operator&) the state of all there are. 597 Optional<StateType> T; 598 599 // The argument number which is also the call site argument number. 600 unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo(); 601 602 auto CallSiteCheck = [&](AbstractCallSite ACS) { 603 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 604 // Check if a coresponding argument was found or if it is on not associated 605 // (which can happen for callback calls). 606 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 607 return false; 608 609 const AAType &AA = 610 A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED); 611 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() 612 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n"); 613 const StateType &AAS = AA.getState(); 614 if (T.hasValue()) 615 *T &= AAS; 616 else 617 T = AAS; 618 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T 619 << "\n"); 620 return T->isValidState(); 621 }; 622 623 bool UsedAssumedInformation = false; 624 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, 625 UsedAssumedInformation)) 626 S.indicatePessimisticFixpoint(); 627 else if (T.hasValue()) 628 S ^= *T; 629 } 630 631 /// This function is the bridge between argument position and the call base 632 /// context. 633 template <typename AAType, typename BaseType, 634 typename StateType = typename AAType::StateType> 635 bool getArgumentStateFromCallBaseContext(Attributor &A, 636 BaseType &QueryingAttribute, 637 IRPosition &Pos, StateType &State) { 638 assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && 639 "Expected an 'argument' position !"); 640 const CallBase *CBContext = Pos.getCallBaseContext(); 641 if (!CBContext) 642 return false; 643 644 int ArgNo = Pos.getCallSiteArgNo(); 645 assert(ArgNo >= 0 && "Invalid Arg No!"); 646 647 const auto &AA = A.getAAFor<AAType>( 648 QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo), 649 DepClassTy::REQUIRED); 650 const StateType &CBArgumentState = 651 static_cast<const StateType &>(AA.getState()); 652 653 LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument" 654 << "Position:" << Pos << "CB Arg state:" << CBArgumentState 655 << "\n"); 656 657 // NOTE: If we want to do call site grouping it should happen here. 658 State ^= CBArgumentState; 659 return true; 660 } 661 662 /// Helper class for generic deduction: call site argument -> argument position. 663 template <typename AAType, typename BaseType, 664 typename StateType = typename AAType::StateType, 665 bool BridgeCallBaseContext = false> 666 struct AAArgumentFromCallSiteArguments : public BaseType { 667 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) 668 : BaseType(IRP, A) {} 669 670 /// See AbstractAttribute::updateImpl(...). 671 ChangeStatus updateImpl(Attributor &A) override { 672 StateType S = StateType::getBestState(this->getState()); 673 674 if (BridgeCallBaseContext) { 675 bool Success = 676 getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>( 677 A, *this, this->getIRPosition(), S); 678 if (Success) 679 return clampStateAndIndicateChange<StateType>(this->getState(), S); 680 } 681 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); 682 683 // TODO: If we know we visited all incoming values, thus no are assumed 684 // dead, we can take the known information from the state T. 685 return clampStateAndIndicateChange<StateType>(this->getState(), S); 686 } 687 }; 688 689 /// Helper class for generic replication: function returned -> cs returned. 690 template <typename AAType, typename BaseType, 691 typename StateType = typename BaseType::StateType, 692 bool IntroduceCallBaseContext = false> 693 struct AACallSiteReturnedFromReturned : public BaseType { 694 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) 695 : BaseType(IRP, A) {} 696 697 /// See AbstractAttribute::updateImpl(...). 698 ChangeStatus updateImpl(Attributor &A) override { 699 assert(this->getIRPosition().getPositionKind() == 700 IRPosition::IRP_CALL_SITE_RETURNED && 701 "Can only wrap function returned positions for call site returned " 702 "positions!"); 703 auto &S = this->getState(); 704 705 const Function *AssociatedFunction = 706 this->getIRPosition().getAssociatedFunction(); 707 if (!AssociatedFunction) 708 return S.indicatePessimisticFixpoint(); 709 710 CallBase &CBContext = cast<CallBase>(this->getAnchorValue()); 711 if (IntroduceCallBaseContext) 712 LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:" 713 << CBContext << "\n"); 714 715 IRPosition FnPos = IRPosition::returned( 716 *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr); 717 const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED); 718 return clampStateAndIndicateChange(S, AA.getState()); 719 } 720 }; 721 722 /// Helper function to accumulate uses. 723 template <class AAType, typename StateType = typename AAType::StateType> 724 static void followUsesInContext(AAType &AA, Attributor &A, 725 MustBeExecutedContextExplorer &Explorer, 726 const Instruction *CtxI, 727 SetVector<const Use *> &Uses, 728 StateType &State) { 729 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); 730 for (unsigned u = 0; u < Uses.size(); ++u) { 731 const Use *U = Uses[u]; 732 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { 733 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); 734 if (Found && AA.followUseInMBEC(A, U, UserI, State)) 735 for (const Use &Us : UserI->uses()) 736 Uses.insert(&Us); 737 } 738 } 739 } 740 741 /// Use the must-be-executed-context around \p I to add information into \p S. 742 /// The AAType class is required to have `followUseInMBEC` method with the 743 /// following signature and behaviour: 744 /// 745 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) 746 /// U - Underlying use. 747 /// I - The user of the \p U. 748 /// Returns true if the value should be tracked transitively. 749 /// 750 template <class AAType, typename StateType = typename AAType::StateType> 751 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, 752 Instruction &CtxI) { 753 754 // Container for (transitive) uses of the associated value. 755 SetVector<const Use *> Uses; 756 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) 757 Uses.insert(&U); 758 759 MustBeExecutedContextExplorer &Explorer = 760 A.getInfoCache().getMustBeExecutedContextExplorer(); 761 762 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); 763 764 if (S.isAtFixpoint()) 765 return; 766 767 SmallVector<const BranchInst *, 4> BrInsts; 768 auto Pred = [&](const Instruction *I) { 769 if (const BranchInst *Br = dyn_cast<BranchInst>(I)) 770 if (Br->isConditional()) 771 BrInsts.push_back(Br); 772 return true; 773 }; 774 775 // Here, accumulate conditional branch instructions in the context. We 776 // explore the child paths and collect the known states. The disjunction of 777 // those states can be merged to its own state. Let ParentState_i be a state 778 // to indicate the known information for an i-th branch instruction in the 779 // context. ChildStates are created for its successors respectively. 780 // 781 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} 782 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} 783 // ... 784 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} 785 // 786 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m 787 // 788 // FIXME: Currently, recursive branches are not handled. For example, we 789 // can't deduce that ptr must be dereferenced in below function. 790 // 791 // void f(int a, int c, int *ptr) { 792 // if(a) 793 // if (b) { 794 // *ptr = 0; 795 // } else { 796 // *ptr = 1; 797 // } 798 // else { 799 // if (b) { 800 // *ptr = 0; 801 // } else { 802 // *ptr = 1; 803 // } 804 // } 805 // } 806 807 Explorer.checkForAllContext(&CtxI, Pred); 808 for (const BranchInst *Br : BrInsts) { 809 StateType ParentState; 810 811 // The known state of the parent state is a conjunction of children's 812 // known states so it is initialized with a best state. 813 ParentState.indicateOptimisticFixpoint(); 814 815 for (const BasicBlock *BB : Br->successors()) { 816 StateType ChildState; 817 818 size_t BeforeSize = Uses.size(); 819 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); 820 821 // Erase uses which only appear in the child. 822 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) 823 It = Uses.erase(It); 824 825 ParentState &= ChildState; 826 } 827 828 // Use only known state. 829 S += ParentState; 830 } 831 } 832 } // namespace 833 834 /// ------------------------ PointerInfo --------------------------------------- 835 836 namespace llvm { 837 namespace AA { 838 namespace PointerInfo { 839 840 struct State; 841 842 } // namespace PointerInfo 843 } // namespace AA 844 845 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage. 846 template <> 847 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> { 848 using Access = AAPointerInfo::Access; 849 static inline Access getEmptyKey(); 850 static inline Access getTombstoneKey(); 851 static unsigned getHashValue(const Access &A); 852 static bool isEqual(const Access &LHS, const Access &RHS); 853 }; 854 855 /// Helper that allows OffsetAndSize as a key in a DenseMap. 856 template <> 857 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize> 858 : DenseMapInfo<std::pair<int64_t, int64_t>> {}; 859 860 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign 861 /// but the instruction 862 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> { 863 using Base = DenseMapInfo<Instruction *>; 864 using Access = AAPointerInfo::Access; 865 static inline Access getEmptyKey(); 866 static inline Access getTombstoneKey(); 867 static unsigned getHashValue(const Access &A); 868 static bool isEqual(const Access &LHS, const Access &RHS); 869 }; 870 871 } // namespace llvm 872 873 /// A type to track pointer/struct usage and accesses for AAPointerInfo. 874 struct AA::PointerInfo::State : public AbstractState { 875 876 ~State() { 877 // We do not delete the Accesses objects but need to destroy them still. 878 for (auto &It : AccessBins) 879 It.second->~Accesses(); 880 } 881 882 /// Return the best possible representable state. 883 static State getBestState(const State &SIS) { return State(); } 884 885 /// Return the worst possible representable state. 886 static State getWorstState(const State &SIS) { 887 State R; 888 R.indicatePessimisticFixpoint(); 889 return R; 890 } 891 892 State() = default; 893 State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) { 894 SIS.AccessBins.clear(); 895 } 896 897 const State &getAssumed() const { return *this; } 898 899 /// See AbstractState::isValidState(). 900 bool isValidState() const override { return BS.isValidState(); } 901 902 /// See AbstractState::isAtFixpoint(). 903 bool isAtFixpoint() const override { return BS.isAtFixpoint(); } 904 905 /// See AbstractState::indicateOptimisticFixpoint(). 906 ChangeStatus indicateOptimisticFixpoint() override { 907 BS.indicateOptimisticFixpoint(); 908 return ChangeStatus::UNCHANGED; 909 } 910 911 /// See AbstractState::indicatePessimisticFixpoint(). 912 ChangeStatus indicatePessimisticFixpoint() override { 913 BS.indicatePessimisticFixpoint(); 914 return ChangeStatus::CHANGED; 915 } 916 917 State &operator=(const State &R) { 918 if (this == &R) 919 return *this; 920 BS = R.BS; 921 AccessBins = R.AccessBins; 922 return *this; 923 } 924 925 State &operator=(State &&R) { 926 if (this == &R) 927 return *this; 928 std::swap(BS, R.BS); 929 std::swap(AccessBins, R.AccessBins); 930 return *this; 931 } 932 933 bool operator==(const State &R) const { 934 if (BS != R.BS) 935 return false; 936 if (AccessBins.size() != R.AccessBins.size()) 937 return false; 938 auto It = begin(), RIt = R.begin(), E = end(); 939 while (It != E) { 940 if (It->getFirst() != RIt->getFirst()) 941 return false; 942 auto &Accs = It->getSecond(); 943 auto &RAccs = RIt->getSecond(); 944 if (Accs->size() != RAccs->size()) 945 return false; 946 for (const auto &ZipIt : llvm::zip(*Accs, *RAccs)) 947 if (std::get<0>(ZipIt) != std::get<1>(ZipIt)) 948 return false; 949 ++It; 950 ++RIt; 951 } 952 return true; 953 } 954 bool operator!=(const State &R) const { return !(*this == R); } 955 956 /// We store accesses in a set with the instruction as key. 957 struct Accesses { 958 SmallVector<AAPointerInfo::Access, 4> Accesses; 959 DenseMap<const Instruction *, unsigned> Map; 960 961 unsigned size() const { return Accesses.size(); } 962 963 using vec_iterator = decltype(Accesses)::iterator; 964 vec_iterator begin() { return Accesses.begin(); } 965 vec_iterator end() { return Accesses.end(); } 966 967 using iterator = decltype(Map)::const_iterator; 968 iterator find(AAPointerInfo::Access &Acc) { 969 return Map.find(Acc.getRemoteInst()); 970 } 971 iterator find_end() { return Map.end(); } 972 973 AAPointerInfo::Access &get(iterator &It) { 974 return Accesses[It->getSecond()]; 975 } 976 977 void insert(AAPointerInfo::Access &Acc) { 978 Map[Acc.getRemoteInst()] = Accesses.size(); 979 Accesses.push_back(Acc); 980 } 981 }; 982 983 /// We store all accesses in bins denoted by their offset and size. 984 using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>; 985 986 AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); } 987 AccessBinsTy::const_iterator end() const { return AccessBins.end(); } 988 989 protected: 990 /// The bins with all the accesses for the associated pointer. 991 AccessBinsTy AccessBins; 992 993 /// Add a new access to the state at offset \p Offset and with size \p Size. 994 /// The access is associated with \p I, writes \p Content (if anything), and 995 /// is of kind \p Kind. 996 /// \Returns CHANGED, if the state changed, UNCHANGED otherwise. 997 ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size, 998 Instruction &I, Optional<Value *> Content, 999 AAPointerInfo::AccessKind Kind, Type *Ty, 1000 Instruction *RemoteI = nullptr, 1001 Accesses *BinPtr = nullptr) { 1002 AAPointerInfo::OffsetAndSize Key{Offset, Size}; 1003 Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key]; 1004 if (!Bin) 1005 Bin = new (A.Allocator) Accesses; 1006 AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty); 1007 // Check if we have an access for this instruction in this bin, if not, 1008 // simply add it. 1009 auto It = Bin->find(Acc); 1010 if (It == Bin->find_end()) { 1011 Bin->insert(Acc); 1012 return ChangeStatus::CHANGED; 1013 } 1014 // If the existing access is the same as then new one, nothing changed. 1015 AAPointerInfo::Access &Current = Bin->get(It); 1016 AAPointerInfo::Access Before = Current; 1017 // The new one will be combined with the existing one. 1018 Current &= Acc; 1019 return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; 1020 } 1021 1022 /// See AAPointerInfo::forallInterferingAccesses. 1023 bool forallInterferingAccesses( 1024 AAPointerInfo::OffsetAndSize OAS, 1025 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { 1026 if (!isValidState()) 1027 return false; 1028 1029 for (auto &It : AccessBins) { 1030 AAPointerInfo::OffsetAndSize ItOAS = It.getFirst(); 1031 if (!OAS.mayOverlap(ItOAS)) 1032 continue; 1033 bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown(); 1034 for (auto &Access : *It.getSecond()) 1035 if (!CB(Access, IsExact)) 1036 return false; 1037 } 1038 return true; 1039 } 1040 1041 /// See AAPointerInfo::forallInterferingAccesses. 1042 bool forallInterferingAccesses( 1043 Instruction &I, 1044 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { 1045 if (!isValidState()) 1046 return false; 1047 1048 // First find the offset and size of I. 1049 AAPointerInfo::OffsetAndSize OAS(-1, -1); 1050 for (auto &It : AccessBins) { 1051 for (auto &Access : *It.getSecond()) { 1052 if (Access.getRemoteInst() == &I) { 1053 OAS = It.getFirst(); 1054 break; 1055 } 1056 } 1057 if (OAS.getSize() != -1) 1058 break; 1059 } 1060 // No access for I was found, we are done. 1061 if (OAS.getSize() == -1) 1062 return true; 1063 1064 // Now that we have an offset and size, find all overlapping ones and use 1065 // the callback on the accesses. 1066 return forallInterferingAccesses(OAS, CB); 1067 } 1068 1069 private: 1070 /// State to track fixpoint and validity. 1071 BooleanState BS; 1072 }; 1073 1074 namespace { 1075 struct AAPointerInfoImpl 1076 : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> { 1077 using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>; 1078 AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {} 1079 1080 /// See AbstractAttribute::initialize(...). 1081 void initialize(Attributor &A) override { AAPointerInfo::initialize(A); } 1082 1083 /// See AbstractAttribute::getAsStr(). 1084 const std::string getAsStr() const override { 1085 return std::string("PointerInfo ") + 1086 (isValidState() ? (std::string("#") + 1087 std::to_string(AccessBins.size()) + " bins") 1088 : "<invalid>"); 1089 } 1090 1091 /// See AbstractAttribute::manifest(...). 1092 ChangeStatus manifest(Attributor &A) override { 1093 return AAPointerInfo::manifest(A); 1094 } 1095 1096 bool forallInterferingAccesses( 1097 OffsetAndSize OAS, 1098 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) 1099 const override { 1100 return State::forallInterferingAccesses(OAS, CB); 1101 } 1102 bool forallInterferingAccesses( 1103 Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I, 1104 function_ref<bool(const Access &, bool)> UserCB) const override { 1105 SmallPtrSet<const Access *, 8> DominatingWrites; 1106 SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses; 1107 1108 Function &Scope = *I.getFunction(); 1109 const auto &NoSyncAA = A.getAAFor<AANoSync>( 1110 QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL); 1111 const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>( 1112 IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL); 1113 const bool NoSync = NoSyncAA.isAssumedNoSync(); 1114 1115 // Helper to determine if we need to consider threading, which we cannot 1116 // right now. However, if the function is (assumed) nosync or the thread 1117 // executing all instructions is the main thread only we can ignore 1118 // threading. 1119 auto CanIgnoreThreading = [&](const Instruction &I) -> bool { 1120 if (NoSync) 1121 return true; 1122 if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I)) 1123 return true; 1124 return false; 1125 }; 1126 1127 // Helper to determine if the access is executed by the same thread as the 1128 // load, for now it is sufficient to avoid any potential threading effects 1129 // as we cannot deal with them anyway. 1130 auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool { 1131 return CanIgnoreThreading(*Acc.getLocalInst()); 1132 }; 1133 1134 // TODO: Use inter-procedural reachability and dominance. 1135 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1136 QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL); 1137 1138 const bool FindInterferingWrites = I.mayReadFromMemory(); 1139 const bool FindInterferingReads = I.mayWriteToMemory(); 1140 const bool UseDominanceReasoning = FindInterferingWrites; 1141 const bool CanUseCFGResoning = CanIgnoreThreading(I); 1142 InformationCache &InfoCache = A.getInfoCache(); 1143 const DominatorTree *DT = 1144 NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning 1145 ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( 1146 Scope) 1147 : nullptr; 1148 1149 enum GPUAddressSpace : unsigned { 1150 Generic = 0, 1151 Global = 1, 1152 Shared = 3, 1153 Constant = 4, 1154 Local = 5, 1155 }; 1156 1157 // Helper to check if a value has "kernel lifetime", that is it will not 1158 // outlive a GPU kernel. This is true for shared, constant, and local 1159 // globals on AMD and NVIDIA GPUs. 1160 auto HasKernelLifetime = [&](Value *V, Module &M) { 1161 Triple T(M.getTargetTriple()); 1162 if (!(T.isAMDGPU() || T.isNVPTX())) 1163 return false; 1164 switch (V->getType()->getPointerAddressSpace()) { 1165 case GPUAddressSpace::Shared: 1166 case GPUAddressSpace::Constant: 1167 case GPUAddressSpace::Local: 1168 return true; 1169 default: 1170 return false; 1171 }; 1172 }; 1173 1174 // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query 1175 // to determine if we should look at reachability from the callee. For 1176 // certain pointers we know the lifetime and we do not have to step into the 1177 // callee to determine reachability as the pointer would be dead in the 1178 // callee. See the conditional initialization below. 1179 std::function<bool(const Function &)> IsLiveInCalleeCB; 1180 1181 if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) { 1182 // If the alloca containing function is not recursive the alloca 1183 // must be dead in the callee. 1184 const Function *AIFn = AI->getFunction(); 1185 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1186 *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL); 1187 if (NoRecurseAA.isAssumedNoRecurse()) { 1188 IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; }; 1189 } 1190 } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) { 1191 // If the global has kernel lifetime we can stop if we reach a kernel 1192 // as it is "dead" in the (unknown) callees. 1193 if (HasKernelLifetime(GV, *GV->getParent())) 1194 IsLiveInCalleeCB = [](const Function &Fn) { 1195 return !Fn.hasFnAttribute("kernel"); 1196 }; 1197 } 1198 1199 auto AccessCB = [&](const Access &Acc, bool Exact) { 1200 if ((!FindInterferingWrites || !Acc.isWrite()) && 1201 (!FindInterferingReads || !Acc.isRead())) 1202 return true; 1203 1204 // For now we only filter accesses based on CFG reasoning which does not 1205 // work yet if we have threading effects, or the access is complicated. 1206 if (CanUseCFGResoning) { 1207 if ((!Acc.isWrite() || 1208 !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA, 1209 IsLiveInCalleeCB)) && 1210 (!Acc.isRead() || 1211 !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA, 1212 IsLiveInCalleeCB))) 1213 return true; 1214 if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) && 1215 IsSameThreadAsLoad(Acc)) { 1216 if (DT->dominates(Acc.getLocalInst(), &I)) 1217 DominatingWrites.insert(&Acc); 1218 } 1219 } 1220 1221 InterferingAccesses.push_back({&Acc, Exact}); 1222 return true; 1223 }; 1224 if (!State::forallInterferingAccesses(I, AccessCB)) 1225 return false; 1226 1227 // If we cannot use CFG reasoning we only filter the non-write accesses 1228 // and are done here. 1229 if (!CanUseCFGResoning) { 1230 for (auto &It : InterferingAccesses) 1231 if (!UserCB(*It.first, It.second)) 1232 return false; 1233 return true; 1234 } 1235 1236 // Helper to determine if we can skip a specific write access. This is in 1237 // the worst case quadratic as we are looking for another write that will 1238 // hide the effect of this one. 1239 auto CanSkipAccess = [&](const Access &Acc, bool Exact) { 1240 if (!IsSameThreadAsLoad(Acc)) 1241 return false; 1242 if (!DominatingWrites.count(&Acc)) 1243 return false; 1244 for (const Access *DomAcc : DominatingWrites) { 1245 assert(Acc.getLocalInst()->getFunction() == 1246 DomAcc->getLocalInst()->getFunction() && 1247 "Expected dominating writes to be in the same function!"); 1248 1249 if (DomAcc != &Acc && 1250 DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) { 1251 return true; 1252 } 1253 } 1254 return false; 1255 }; 1256 1257 // Run the user callback on all accesses we cannot skip and return if that 1258 // succeeded for all or not. 1259 unsigned NumInterferingAccesses = InterferingAccesses.size(); 1260 for (auto &It : InterferingAccesses) { 1261 if (!DT || NumInterferingAccesses > MaxInterferingAccesses || 1262 !CanSkipAccess(*It.first, It.second)) { 1263 if (!UserCB(*It.first, It.second)) 1264 return false; 1265 } 1266 } 1267 return true; 1268 } 1269 1270 ChangeStatus translateAndAddCalleeState(Attributor &A, 1271 const AAPointerInfo &CalleeAA, 1272 int64_t CallArgOffset, CallBase &CB) { 1273 using namespace AA::PointerInfo; 1274 if (!CalleeAA.getState().isValidState() || !isValidState()) 1275 return indicatePessimisticFixpoint(); 1276 1277 const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA); 1278 bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr(); 1279 1280 // Combine the accesses bin by bin. 1281 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1282 for (auto &It : CalleeImplAA.getState()) { 1283 OffsetAndSize OAS = OffsetAndSize::getUnknown(); 1284 if (CallArgOffset != OffsetAndSize::Unknown) 1285 OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset, 1286 It.first.getSize()); 1287 Accesses *Bin = AccessBins[OAS]; 1288 for (const AAPointerInfo::Access &RAcc : *It.second) { 1289 if (IsByval && !RAcc.isRead()) 1290 continue; 1291 bool UsedAssumedInformation = false; 1292 Optional<Value *> Content = A.translateArgumentToCallSiteContent( 1293 RAcc.getContent(), CB, *this, UsedAssumedInformation); 1294 AccessKind AK = 1295 AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ 1296 : AccessKind::AK_READ_WRITE)); 1297 Changed = 1298 Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content, 1299 AK, RAcc.getType(), RAcc.getRemoteInst(), Bin); 1300 } 1301 } 1302 return Changed; 1303 } 1304 1305 /// Statistic tracking for all AAPointerInfo implementations. 1306 /// See AbstractAttribute::trackStatistics(). 1307 void trackPointerInfoStatistics(const IRPosition &IRP) const {} 1308 }; 1309 1310 struct AAPointerInfoFloating : public AAPointerInfoImpl { 1311 using AccessKind = AAPointerInfo::AccessKind; 1312 AAPointerInfoFloating(const IRPosition &IRP, Attributor &A) 1313 : AAPointerInfoImpl(IRP, A) {} 1314 1315 /// See AbstractAttribute::initialize(...). 1316 void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); } 1317 1318 /// Deal with an access and signal if it was handled successfully. 1319 bool handleAccess(Attributor &A, Instruction &I, Value &Ptr, 1320 Optional<Value *> Content, AccessKind Kind, int64_t Offset, 1321 ChangeStatus &Changed, Type *Ty, 1322 int64_t Size = OffsetAndSize::Unknown) { 1323 using namespace AA::PointerInfo; 1324 // No need to find a size if one is given or the offset is unknown. 1325 if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown && 1326 Ty) { 1327 const DataLayout &DL = A.getDataLayout(); 1328 TypeSize AccessSize = DL.getTypeStoreSize(Ty); 1329 if (!AccessSize.isScalable()) 1330 Size = AccessSize.getFixedSize(); 1331 } 1332 Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty); 1333 return true; 1334 }; 1335 1336 /// Helper struct, will support ranges eventually. 1337 struct OffsetInfo { 1338 int64_t Offset = OffsetAndSize::Unknown; 1339 1340 bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; } 1341 }; 1342 1343 /// See AbstractAttribute::updateImpl(...). 1344 ChangeStatus updateImpl(Attributor &A) override { 1345 using namespace AA::PointerInfo; 1346 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1347 Value &AssociatedValue = getAssociatedValue(); 1348 1349 const DataLayout &DL = A.getDataLayout(); 1350 DenseMap<Value *, OffsetInfo> OffsetInfoMap; 1351 OffsetInfoMap[&AssociatedValue] = OffsetInfo{0}; 1352 1353 auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI, 1354 bool &Follow) { 1355 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1356 UsrOI = PtrOI; 1357 Follow = true; 1358 return true; 1359 }; 1360 1361 const auto *TLI = getAnchorScope() 1362 ? A.getInfoCache().getTargetLibraryInfoForFunction( 1363 *getAnchorScope()) 1364 : nullptr; 1365 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 1366 Value *CurPtr = U.get(); 1367 User *Usr = U.getUser(); 1368 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " 1369 << *Usr << "\n"); 1370 assert(OffsetInfoMap.count(CurPtr) && 1371 "The current pointer offset should have been seeded!"); 1372 1373 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) { 1374 if (CE->isCast()) 1375 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); 1376 if (CE->isCompare()) 1377 return true; 1378 if (!isa<GEPOperator>(CE)) { 1379 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE 1380 << "\n"); 1381 return false; 1382 } 1383 } 1384 if (auto *GEP = dyn_cast<GEPOperator>(Usr)) { 1385 // Note the order here, the Usr access might change the map, CurPtr is 1386 // already in it though. 1387 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1388 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1389 UsrOI = PtrOI; 1390 1391 // TODO: Use range information. 1392 if (PtrOI.Offset == OffsetAndSize::Unknown || 1393 !GEP->hasAllConstantIndices()) { 1394 UsrOI.Offset = OffsetAndSize::Unknown; 1395 Follow = true; 1396 return true; 1397 } 1398 1399 SmallVector<Value *, 8> Indices; 1400 for (Use &Idx : GEP->indices()) { 1401 if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) { 1402 Indices.push_back(CIdx); 1403 continue; 1404 } 1405 1406 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP 1407 << " : " << *Idx << "\n"); 1408 return false; 1409 } 1410 UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType( 1411 GEP->getSourceElementType(), Indices); 1412 Follow = true; 1413 return true; 1414 } 1415 if (isa<CastInst>(Usr) || isa<SelectInst>(Usr)) 1416 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); 1417 1418 // For PHIs we need to take care of the recurrence explicitly as the value 1419 // might change while we iterate through a loop. For now, we give up if 1420 // the PHI is not invariant. 1421 if (isa<PHINode>(Usr)) { 1422 // Note the order here, the Usr access might change the map, CurPtr is 1423 // already in it though. 1424 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1425 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1426 // Check if the PHI is invariant (so far). 1427 if (UsrOI == PtrOI) 1428 return true; 1429 1430 // Check if the PHI operand has already an unknown offset as we can't 1431 // improve on that anymore. 1432 if (PtrOI.Offset == OffsetAndSize::Unknown) { 1433 UsrOI = PtrOI; 1434 Follow = true; 1435 return true; 1436 } 1437 1438 // Check if the PHI operand is not dependent on the PHI itself. 1439 // TODO: This is not great as we look at the pointer type. However, it 1440 // is unclear where the Offset size comes from with typeless pointers. 1441 APInt Offset( 1442 DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()), 1443 0); 1444 if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets( 1445 DL, Offset, /* AllowNonInbounds */ true)) { 1446 if (Offset != PtrOI.Offset) { 1447 LLVM_DEBUG(dbgs() 1448 << "[AAPointerInfo] PHI operand pointer offset mismatch " 1449 << *CurPtr << " in " << *Usr << "\n"); 1450 return false; 1451 } 1452 return HandlePassthroughUser(Usr, PtrOI, Follow); 1453 } 1454 1455 // TODO: Approximate in case we know the direction of the recurrence. 1456 LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex " 1457 << *CurPtr << " in " << *Usr << "\n"); 1458 UsrOI = PtrOI; 1459 UsrOI.Offset = OffsetAndSize::Unknown; 1460 Follow = true; 1461 return true; 1462 } 1463 1464 if (auto *LoadI = dyn_cast<LoadInst>(Usr)) 1465 return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr, 1466 AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset, 1467 Changed, LoadI->getType()); 1468 if (auto *StoreI = dyn_cast<StoreInst>(Usr)) { 1469 if (StoreI->getValueOperand() == CurPtr) { 1470 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store " 1471 << *StoreI << "\n"); 1472 return false; 1473 } 1474 bool UsedAssumedInformation = false; 1475 Optional<Value *> Content = A.getAssumedSimplified( 1476 *StoreI->getValueOperand(), *this, UsedAssumedInformation); 1477 return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE, 1478 OffsetInfoMap[CurPtr].Offset, Changed, 1479 StoreI->getValueOperand()->getType()); 1480 } 1481 if (auto *CB = dyn_cast<CallBase>(Usr)) { 1482 if (CB->isLifetimeStartOrEnd()) 1483 return true; 1484 if (TLI && isFreeCall(CB, TLI)) 1485 return true; 1486 if (CB->isArgOperand(&U)) { 1487 unsigned ArgNo = CB->getArgOperandNo(&U); 1488 const auto &CSArgPI = A.getAAFor<AAPointerInfo>( 1489 *this, IRPosition::callsite_argument(*CB, ArgNo), 1490 DepClassTy::REQUIRED); 1491 Changed = translateAndAddCalleeState( 1492 A, CSArgPI, OffsetInfoMap[CurPtr].Offset, *CB) | 1493 Changed; 1494 return true; 1495 } 1496 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB 1497 << "\n"); 1498 // TODO: Allow some call uses 1499 return false; 1500 } 1501 1502 LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n"); 1503 return false; 1504 }; 1505 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { 1506 if (OffsetInfoMap.count(NewU)) 1507 return OffsetInfoMap[NewU] == OffsetInfoMap[OldU]; 1508 OffsetInfoMap[NewU] = OffsetInfoMap[OldU]; 1509 return true; 1510 }; 1511 if (!A.checkForAllUses(UsePred, *this, AssociatedValue, 1512 /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL, 1513 EquivalentUseCB)) 1514 return indicatePessimisticFixpoint(); 1515 1516 LLVM_DEBUG({ 1517 dbgs() << "Accesses by bin after update:\n"; 1518 for (auto &It : AccessBins) { 1519 dbgs() << "[" << It.first.getOffset() << "-" 1520 << It.first.getOffset() + It.first.getSize() 1521 << "] : " << It.getSecond()->size() << "\n"; 1522 for (auto &Acc : *It.getSecond()) { 1523 dbgs() << " - " << Acc.getKind() << " - " << *Acc.getLocalInst() 1524 << "\n"; 1525 if (Acc.getLocalInst() != Acc.getRemoteInst()) 1526 dbgs() << " --> " 1527 << *Acc.getRemoteInst() << "\n"; 1528 if (!Acc.isWrittenValueYetUndetermined()) { 1529 if (Acc.getWrittenValue()) 1530 dbgs() << " - c: " << *Acc.getWrittenValue() << "\n"; 1531 else 1532 dbgs() << " - c: <unknown>\n"; 1533 } 1534 } 1535 } 1536 }); 1537 1538 return Changed; 1539 } 1540 1541 /// See AbstractAttribute::trackStatistics() 1542 void trackStatistics() const override { 1543 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1544 } 1545 }; 1546 1547 struct AAPointerInfoReturned final : AAPointerInfoImpl { 1548 AAPointerInfoReturned(const IRPosition &IRP, Attributor &A) 1549 : AAPointerInfoImpl(IRP, A) {} 1550 1551 /// See AbstractAttribute::updateImpl(...). 1552 ChangeStatus updateImpl(Attributor &A) override { 1553 return indicatePessimisticFixpoint(); 1554 } 1555 1556 /// See AbstractAttribute::trackStatistics() 1557 void trackStatistics() const override { 1558 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1559 } 1560 }; 1561 1562 struct AAPointerInfoArgument final : AAPointerInfoFloating { 1563 AAPointerInfoArgument(const IRPosition &IRP, Attributor &A) 1564 : AAPointerInfoFloating(IRP, A) {} 1565 1566 /// See AbstractAttribute::initialize(...). 1567 void initialize(Attributor &A) override { 1568 AAPointerInfoFloating::initialize(A); 1569 if (getAnchorScope()->isDeclaration()) 1570 indicatePessimisticFixpoint(); 1571 } 1572 1573 /// See AbstractAttribute::trackStatistics() 1574 void trackStatistics() const override { 1575 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1576 } 1577 }; 1578 1579 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating { 1580 AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) 1581 : AAPointerInfoFloating(IRP, A) {} 1582 1583 /// See AbstractAttribute::updateImpl(...). 1584 ChangeStatus updateImpl(Attributor &A) override { 1585 using namespace AA::PointerInfo; 1586 // We handle memory intrinsics explicitly, at least the first (= 1587 // destination) and second (=source) arguments as we know how they are 1588 // accessed. 1589 if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) { 1590 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 1591 int64_t LengthVal = OffsetAndSize::Unknown; 1592 if (Length) 1593 LengthVal = Length->getSExtValue(); 1594 Value &Ptr = getAssociatedValue(); 1595 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 1596 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1597 if (ArgNo == 0) { 1598 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed, 1599 nullptr, LengthVal); 1600 } else if (ArgNo == 1) { 1601 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed, 1602 nullptr, LengthVal); 1603 } else { 1604 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic " 1605 << *MI << "\n"); 1606 return indicatePessimisticFixpoint(); 1607 } 1608 return Changed; 1609 } 1610 1611 // TODO: Once we have call site specific value information we can provide 1612 // call site specific liveness information and then it makes 1613 // sense to specialize attributes for call sites arguments instead of 1614 // redirecting requests to the callee argument. 1615 Argument *Arg = getAssociatedArgument(); 1616 if (!Arg) 1617 return indicatePessimisticFixpoint(); 1618 const IRPosition &ArgPos = IRPosition::argument(*Arg); 1619 auto &ArgAA = 1620 A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED); 1621 return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI())); 1622 } 1623 1624 /// See AbstractAttribute::trackStatistics() 1625 void trackStatistics() const override { 1626 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1627 } 1628 }; 1629 1630 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating { 1631 AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) 1632 : AAPointerInfoFloating(IRP, A) {} 1633 1634 /// See AbstractAttribute::trackStatistics() 1635 void trackStatistics() const override { 1636 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1637 } 1638 }; 1639 } // namespace 1640 1641 /// -----------------------NoUnwind Function Attribute-------------------------- 1642 1643 namespace { 1644 struct AANoUnwindImpl : AANoUnwind { 1645 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} 1646 1647 const std::string getAsStr() const override { 1648 return getAssumed() ? "nounwind" : "may-unwind"; 1649 } 1650 1651 /// See AbstractAttribute::updateImpl(...). 1652 ChangeStatus updateImpl(Attributor &A) override { 1653 auto Opcodes = { 1654 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 1655 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 1656 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 1657 1658 auto CheckForNoUnwind = [&](Instruction &I) { 1659 if (!I.mayThrow()) 1660 return true; 1661 1662 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1663 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( 1664 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 1665 return NoUnwindAA.isAssumedNoUnwind(); 1666 } 1667 return false; 1668 }; 1669 1670 bool UsedAssumedInformation = false; 1671 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes, 1672 UsedAssumedInformation)) 1673 return indicatePessimisticFixpoint(); 1674 1675 return ChangeStatus::UNCHANGED; 1676 } 1677 }; 1678 1679 struct AANoUnwindFunction final : public AANoUnwindImpl { 1680 AANoUnwindFunction(const IRPosition &IRP, Attributor &A) 1681 : AANoUnwindImpl(IRP, A) {} 1682 1683 /// See AbstractAttribute::trackStatistics() 1684 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } 1685 }; 1686 1687 /// NoUnwind attribute deduction for a call sites. 1688 struct AANoUnwindCallSite final : AANoUnwindImpl { 1689 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) 1690 : AANoUnwindImpl(IRP, A) {} 1691 1692 /// See AbstractAttribute::initialize(...). 1693 void initialize(Attributor &A) override { 1694 AANoUnwindImpl::initialize(A); 1695 Function *F = getAssociatedFunction(); 1696 if (!F || F->isDeclaration()) 1697 indicatePessimisticFixpoint(); 1698 } 1699 1700 /// See AbstractAttribute::updateImpl(...). 1701 ChangeStatus updateImpl(Attributor &A) override { 1702 // TODO: Once we have call site specific value information we can provide 1703 // call site specific liveness information and then it makes 1704 // sense to specialize attributes for call sites arguments instead of 1705 // redirecting requests to the callee argument. 1706 Function *F = getAssociatedFunction(); 1707 const IRPosition &FnPos = IRPosition::function(*F); 1708 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED); 1709 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1710 } 1711 1712 /// See AbstractAttribute::trackStatistics() 1713 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } 1714 }; 1715 } // namespace 1716 1717 /// --------------------- Function Return Values ------------------------------- 1718 1719 namespace { 1720 /// "Attribute" that collects all potential returned values and the return 1721 /// instructions that they arise from. 1722 /// 1723 /// If there is a unique returned value R, the manifest method will: 1724 /// - mark R with the "returned" attribute, if R is an argument. 1725 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { 1726 1727 /// Mapping of values potentially returned by the associated function to the 1728 /// return instructions that might return them. 1729 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; 1730 1731 /// State flags 1732 /// 1733 ///{ 1734 bool IsFixed = false; 1735 bool IsValidState = true; 1736 ///} 1737 1738 public: 1739 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) 1740 : AAReturnedValues(IRP, A) {} 1741 1742 /// See AbstractAttribute::initialize(...). 1743 void initialize(Attributor &A) override { 1744 // Reset the state. 1745 IsFixed = false; 1746 IsValidState = true; 1747 ReturnedValues.clear(); 1748 1749 Function *F = getAssociatedFunction(); 1750 if (!F || F->isDeclaration()) { 1751 indicatePessimisticFixpoint(); 1752 return; 1753 } 1754 assert(!F->getReturnType()->isVoidTy() && 1755 "Did not expect a void return type!"); 1756 1757 // The map from instruction opcodes to those instructions in the function. 1758 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); 1759 1760 // Look through all arguments, if one is marked as returned we are done. 1761 for (Argument &Arg : F->args()) { 1762 if (Arg.hasReturnedAttr()) { 1763 auto &ReturnInstSet = ReturnedValues[&Arg]; 1764 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) 1765 for (Instruction *RI : *Insts) 1766 ReturnInstSet.insert(cast<ReturnInst>(RI)); 1767 1768 indicateOptimisticFixpoint(); 1769 return; 1770 } 1771 } 1772 1773 if (!A.isFunctionIPOAmendable(*F)) 1774 indicatePessimisticFixpoint(); 1775 } 1776 1777 /// See AbstractAttribute::manifest(...). 1778 ChangeStatus manifest(Attributor &A) override; 1779 1780 /// See AbstractAttribute::getState(...). 1781 AbstractState &getState() override { return *this; } 1782 1783 /// See AbstractAttribute::getState(...). 1784 const AbstractState &getState() const override { return *this; } 1785 1786 /// See AbstractAttribute::updateImpl(Attributor &A). 1787 ChangeStatus updateImpl(Attributor &A) override; 1788 1789 llvm::iterator_range<iterator> returned_values() override { 1790 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1791 } 1792 1793 llvm::iterator_range<const_iterator> returned_values() const override { 1794 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1795 } 1796 1797 /// Return the number of potential return values, -1 if unknown. 1798 size_t getNumReturnValues() const override { 1799 return isValidState() ? ReturnedValues.size() : -1; 1800 } 1801 1802 /// Return an assumed unique return value if a single candidate is found. If 1803 /// there cannot be one, return a nullptr. If it is not clear yet, return the 1804 /// Optional::NoneType. 1805 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 1806 1807 /// See AbstractState::checkForAllReturnedValues(...). 1808 bool checkForAllReturnedValuesAndReturnInsts( 1809 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1810 const override; 1811 1812 /// Pretty print the attribute similar to the IR representation. 1813 const std::string getAsStr() const override; 1814 1815 /// See AbstractState::isAtFixpoint(). 1816 bool isAtFixpoint() const override { return IsFixed; } 1817 1818 /// See AbstractState::isValidState(). 1819 bool isValidState() const override { return IsValidState; } 1820 1821 /// See AbstractState::indicateOptimisticFixpoint(...). 1822 ChangeStatus indicateOptimisticFixpoint() override { 1823 IsFixed = true; 1824 return ChangeStatus::UNCHANGED; 1825 } 1826 1827 ChangeStatus indicatePessimisticFixpoint() override { 1828 IsFixed = true; 1829 IsValidState = false; 1830 return ChangeStatus::CHANGED; 1831 } 1832 }; 1833 1834 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 1835 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1836 1837 // Bookkeeping. 1838 assert(isValidState()); 1839 STATS_DECLTRACK(KnownReturnValues, FunctionReturn, 1840 "Number of function with known return values"); 1841 1842 // Check if we have an assumed unique return value that we could manifest. 1843 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); 1844 1845 if (!UniqueRV.hasValue() || !UniqueRV.getValue()) 1846 return Changed; 1847 1848 // Bookkeeping. 1849 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, 1850 "Number of function with unique return"); 1851 // If the assumed unique return value is an argument, annotate it. 1852 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { 1853 if (UniqueRVArg->getType()->canLosslesslyBitCastTo( 1854 getAssociatedFunction()->getReturnType())) { 1855 getIRPosition() = IRPosition::argument(*UniqueRVArg); 1856 Changed = IRAttribute::manifest(A); 1857 } 1858 } 1859 return Changed; 1860 } 1861 1862 const std::string AAReturnedValuesImpl::getAsStr() const { 1863 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 1864 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")"; 1865 } 1866 1867 Optional<Value *> 1868 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { 1869 // If checkForAllReturnedValues provides a unique value, ignoring potential 1870 // undef values that can also be present, it is assumed to be the actual 1871 // return value and forwarded to the caller of this method. If there are 1872 // multiple, a nullptr is returned indicating there cannot be a unique 1873 // returned value. 1874 Optional<Value *> UniqueRV; 1875 Type *Ty = getAssociatedFunction()->getReturnType(); 1876 1877 auto Pred = [&](Value &RV) -> bool { 1878 UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty); 1879 return UniqueRV != Optional<Value *>(nullptr); 1880 }; 1881 1882 if (!A.checkForAllReturnedValues(Pred, *this)) 1883 UniqueRV = nullptr; 1884 1885 return UniqueRV; 1886 } 1887 1888 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( 1889 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1890 const { 1891 if (!isValidState()) 1892 return false; 1893 1894 // Check all returned values but ignore call sites as long as we have not 1895 // encountered an overdefined one during an update. 1896 for (auto &It : ReturnedValues) { 1897 Value *RV = It.first; 1898 if (!Pred(*RV, It.second)) 1899 return false; 1900 } 1901 1902 return true; 1903 } 1904 1905 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 1906 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1907 1908 auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret, 1909 bool) -> bool { 1910 assert(AA::isValidInScope(V, Ret.getFunction()) && 1911 "Assumed returned value should be valid in function scope!"); 1912 if (ReturnedValues[&V].insert(&Ret)) 1913 Changed = ChangeStatus::CHANGED; 1914 return true; 1915 }; 1916 1917 bool UsedAssumedInformation = false; 1918 auto ReturnInstCB = [&](Instruction &I) { 1919 ReturnInst &Ret = cast<ReturnInst>(I); 1920 return genericValueTraversal<ReturnInst>( 1921 A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB, 1922 &I, UsedAssumedInformation, /* UseValueSimplify */ true, 1923 /* MaxValues */ 16, 1924 /* StripCB */ nullptr, /* Intraprocedural */ true); 1925 }; 1926 1927 // Discover returned values from all live returned instructions in the 1928 // associated function. 1929 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret}, 1930 UsedAssumedInformation)) 1931 return indicatePessimisticFixpoint(); 1932 return Changed; 1933 } 1934 1935 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { 1936 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) 1937 : AAReturnedValuesImpl(IRP, A) {} 1938 1939 /// See AbstractAttribute::trackStatistics() 1940 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } 1941 }; 1942 1943 /// Returned values information for a call sites. 1944 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { 1945 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) 1946 : AAReturnedValuesImpl(IRP, A) {} 1947 1948 /// See AbstractAttribute::initialize(...). 1949 void initialize(Attributor &A) override { 1950 // TODO: Once we have call site specific value information we can provide 1951 // call site specific liveness information and then it makes 1952 // sense to specialize attributes for call sites instead of 1953 // redirecting requests to the callee. 1954 llvm_unreachable("Abstract attributes for returned values are not " 1955 "supported for call sites yet!"); 1956 } 1957 1958 /// See AbstractAttribute::updateImpl(...). 1959 ChangeStatus updateImpl(Attributor &A) override { 1960 return indicatePessimisticFixpoint(); 1961 } 1962 1963 /// See AbstractAttribute::trackStatistics() 1964 void trackStatistics() const override {} 1965 }; 1966 } // namespace 1967 1968 /// ------------------------ NoSync Function Attribute ------------------------- 1969 1970 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) { 1971 if (!I->isAtomic()) 1972 return false; 1973 1974 if (auto *FI = dyn_cast<FenceInst>(I)) 1975 // All legal orderings for fence are stronger than monotonic. 1976 return FI->getSyncScopeID() != SyncScope::SingleThread; 1977 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) { 1978 // Unordered is not a legal ordering for cmpxchg. 1979 return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic || 1980 AI->getFailureOrdering() != AtomicOrdering::Monotonic); 1981 } 1982 1983 AtomicOrdering Ordering; 1984 switch (I->getOpcode()) { 1985 case Instruction::AtomicRMW: 1986 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 1987 break; 1988 case Instruction::Store: 1989 Ordering = cast<StoreInst>(I)->getOrdering(); 1990 break; 1991 case Instruction::Load: 1992 Ordering = cast<LoadInst>(I)->getOrdering(); 1993 break; 1994 default: 1995 llvm_unreachable( 1996 "New atomic operations need to be known in the attributor."); 1997 } 1998 1999 return (Ordering != AtomicOrdering::Unordered && 2000 Ordering != AtomicOrdering::Monotonic); 2001 } 2002 2003 /// Return true if this intrinsic is nosync. This is only used for intrinsics 2004 /// which would be nosync except that they have a volatile flag. All other 2005 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td. 2006 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) { 2007 if (auto *MI = dyn_cast<MemIntrinsic>(I)) 2008 return !MI->isVolatile(); 2009 return false; 2010 } 2011 2012 namespace { 2013 struct AANoSyncImpl : AANoSync { 2014 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} 2015 2016 const std::string getAsStr() const override { 2017 return getAssumed() ? "nosync" : "may-sync"; 2018 } 2019 2020 /// See AbstractAttribute::updateImpl(...). 2021 ChangeStatus updateImpl(Attributor &A) override; 2022 }; 2023 2024 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { 2025 2026 auto CheckRWInstForNoSync = [&](Instruction &I) { 2027 return AA::isNoSyncInst(A, I, *this); 2028 }; 2029 2030 auto CheckForNoSync = [&](Instruction &I) { 2031 // At this point we handled all read/write effects and they are all 2032 // nosync, so they can be skipped. 2033 if (I.mayReadOrWriteMemory()) 2034 return true; 2035 2036 // non-convergent and readnone imply nosync. 2037 return !cast<CallBase>(I).isConvergent(); 2038 }; 2039 2040 bool UsedAssumedInformation = false; 2041 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this, 2042 UsedAssumedInformation) || 2043 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this, 2044 UsedAssumedInformation)) 2045 return indicatePessimisticFixpoint(); 2046 2047 return ChangeStatus::UNCHANGED; 2048 } 2049 2050 struct AANoSyncFunction final : public AANoSyncImpl { 2051 AANoSyncFunction(const IRPosition &IRP, Attributor &A) 2052 : AANoSyncImpl(IRP, A) {} 2053 2054 /// See AbstractAttribute::trackStatistics() 2055 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } 2056 }; 2057 2058 /// NoSync attribute deduction for a call sites. 2059 struct AANoSyncCallSite final : AANoSyncImpl { 2060 AANoSyncCallSite(const IRPosition &IRP, Attributor &A) 2061 : AANoSyncImpl(IRP, A) {} 2062 2063 /// See AbstractAttribute::initialize(...). 2064 void initialize(Attributor &A) override { 2065 AANoSyncImpl::initialize(A); 2066 Function *F = getAssociatedFunction(); 2067 if (!F || F->isDeclaration()) 2068 indicatePessimisticFixpoint(); 2069 } 2070 2071 /// See AbstractAttribute::updateImpl(...). 2072 ChangeStatus updateImpl(Attributor &A) override { 2073 // TODO: Once we have call site specific value information we can provide 2074 // call site specific liveness information and then it makes 2075 // sense to specialize attributes for call sites arguments instead of 2076 // redirecting requests to the callee argument. 2077 Function *F = getAssociatedFunction(); 2078 const IRPosition &FnPos = IRPosition::function(*F); 2079 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED); 2080 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2081 } 2082 2083 /// See AbstractAttribute::trackStatistics() 2084 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } 2085 }; 2086 } // namespace 2087 2088 /// ------------------------ No-Free Attributes ---------------------------- 2089 2090 namespace { 2091 struct AANoFreeImpl : public AANoFree { 2092 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} 2093 2094 /// See AbstractAttribute::updateImpl(...). 2095 ChangeStatus updateImpl(Attributor &A) override { 2096 auto CheckForNoFree = [&](Instruction &I) { 2097 const auto &CB = cast<CallBase>(I); 2098 if (CB.hasFnAttr(Attribute::NoFree)) 2099 return true; 2100 2101 const auto &NoFreeAA = A.getAAFor<AANoFree>( 2102 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 2103 return NoFreeAA.isAssumedNoFree(); 2104 }; 2105 2106 bool UsedAssumedInformation = false; 2107 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this, 2108 UsedAssumedInformation)) 2109 return indicatePessimisticFixpoint(); 2110 return ChangeStatus::UNCHANGED; 2111 } 2112 2113 /// See AbstractAttribute::getAsStr(). 2114 const std::string getAsStr() const override { 2115 return getAssumed() ? "nofree" : "may-free"; 2116 } 2117 }; 2118 2119 struct AANoFreeFunction final : public AANoFreeImpl { 2120 AANoFreeFunction(const IRPosition &IRP, Attributor &A) 2121 : AANoFreeImpl(IRP, A) {} 2122 2123 /// See AbstractAttribute::trackStatistics() 2124 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } 2125 }; 2126 2127 /// NoFree attribute deduction for a call sites. 2128 struct AANoFreeCallSite final : AANoFreeImpl { 2129 AANoFreeCallSite(const IRPosition &IRP, Attributor &A) 2130 : AANoFreeImpl(IRP, A) {} 2131 2132 /// See AbstractAttribute::initialize(...). 2133 void initialize(Attributor &A) override { 2134 AANoFreeImpl::initialize(A); 2135 Function *F = getAssociatedFunction(); 2136 if (!F || F->isDeclaration()) 2137 indicatePessimisticFixpoint(); 2138 } 2139 2140 /// See AbstractAttribute::updateImpl(...). 2141 ChangeStatus updateImpl(Attributor &A) override { 2142 // TODO: Once we have call site specific value information we can provide 2143 // call site specific liveness information and then it makes 2144 // sense to specialize attributes for call sites arguments instead of 2145 // redirecting requests to the callee argument. 2146 Function *F = getAssociatedFunction(); 2147 const IRPosition &FnPos = IRPosition::function(*F); 2148 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED); 2149 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2150 } 2151 2152 /// See AbstractAttribute::trackStatistics() 2153 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } 2154 }; 2155 2156 /// NoFree attribute for floating values. 2157 struct AANoFreeFloating : AANoFreeImpl { 2158 AANoFreeFloating(const IRPosition &IRP, Attributor &A) 2159 : AANoFreeImpl(IRP, A) {} 2160 2161 /// See AbstractAttribute::trackStatistics() 2162 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} 2163 2164 /// See Abstract Attribute::updateImpl(...). 2165 ChangeStatus updateImpl(Attributor &A) override { 2166 const IRPosition &IRP = getIRPosition(); 2167 2168 const auto &NoFreeAA = A.getAAFor<AANoFree>( 2169 *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL); 2170 if (NoFreeAA.isAssumedNoFree()) 2171 return ChangeStatus::UNCHANGED; 2172 2173 Value &AssociatedValue = getIRPosition().getAssociatedValue(); 2174 auto Pred = [&](const Use &U, bool &Follow) -> bool { 2175 Instruction *UserI = cast<Instruction>(U.getUser()); 2176 if (auto *CB = dyn_cast<CallBase>(UserI)) { 2177 if (CB->isBundleOperand(&U)) 2178 return false; 2179 if (!CB->isArgOperand(&U)) 2180 return true; 2181 unsigned ArgNo = CB->getArgOperandNo(&U); 2182 2183 const auto &NoFreeArg = A.getAAFor<AANoFree>( 2184 *this, IRPosition::callsite_argument(*CB, ArgNo), 2185 DepClassTy::REQUIRED); 2186 return NoFreeArg.isAssumedNoFree(); 2187 } 2188 2189 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 2190 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 2191 Follow = true; 2192 return true; 2193 } 2194 if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) || 2195 isa<ReturnInst>(UserI)) 2196 return true; 2197 2198 // Unknown user. 2199 return false; 2200 }; 2201 if (!A.checkForAllUses(Pred, *this, AssociatedValue)) 2202 return indicatePessimisticFixpoint(); 2203 2204 return ChangeStatus::UNCHANGED; 2205 } 2206 }; 2207 2208 /// NoFree attribute for a call site argument. 2209 struct AANoFreeArgument final : AANoFreeFloating { 2210 AANoFreeArgument(const IRPosition &IRP, Attributor &A) 2211 : AANoFreeFloating(IRP, A) {} 2212 2213 /// See AbstractAttribute::trackStatistics() 2214 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } 2215 }; 2216 2217 /// NoFree attribute for call site arguments. 2218 struct AANoFreeCallSiteArgument final : AANoFreeFloating { 2219 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) 2220 : AANoFreeFloating(IRP, A) {} 2221 2222 /// See AbstractAttribute::updateImpl(...). 2223 ChangeStatus updateImpl(Attributor &A) override { 2224 // TODO: Once we have call site specific value information we can provide 2225 // call site specific liveness information and then it makes 2226 // sense to specialize attributes for call sites arguments instead of 2227 // redirecting requests to the callee argument. 2228 Argument *Arg = getAssociatedArgument(); 2229 if (!Arg) 2230 return indicatePessimisticFixpoint(); 2231 const IRPosition &ArgPos = IRPosition::argument(*Arg); 2232 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED); 2233 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 2234 } 2235 2236 /// See AbstractAttribute::trackStatistics() 2237 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)}; 2238 }; 2239 2240 /// NoFree attribute for function return value. 2241 struct AANoFreeReturned final : AANoFreeFloating { 2242 AANoFreeReturned(const IRPosition &IRP, Attributor &A) 2243 : AANoFreeFloating(IRP, A) { 2244 llvm_unreachable("NoFree is not applicable to function returns!"); 2245 } 2246 2247 /// See AbstractAttribute::initialize(...). 2248 void initialize(Attributor &A) override { 2249 llvm_unreachable("NoFree is not applicable to function returns!"); 2250 } 2251 2252 /// See AbstractAttribute::updateImpl(...). 2253 ChangeStatus updateImpl(Attributor &A) override { 2254 llvm_unreachable("NoFree is not applicable to function returns!"); 2255 } 2256 2257 /// See AbstractAttribute::trackStatistics() 2258 void trackStatistics() const override {} 2259 }; 2260 2261 /// NoFree attribute deduction for a call site return value. 2262 struct AANoFreeCallSiteReturned final : AANoFreeFloating { 2263 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) 2264 : AANoFreeFloating(IRP, A) {} 2265 2266 ChangeStatus manifest(Attributor &A) override { 2267 return ChangeStatus::UNCHANGED; 2268 } 2269 /// See AbstractAttribute::trackStatistics() 2270 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } 2271 }; 2272 } // namespace 2273 2274 /// ------------------------ NonNull Argument Attribute ------------------------ 2275 namespace { 2276 static int64_t getKnownNonNullAndDerefBytesForUse( 2277 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, 2278 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { 2279 TrackUse = false; 2280 2281 const Value *UseV = U->get(); 2282 if (!UseV->getType()->isPointerTy()) 2283 return 0; 2284 2285 // We need to follow common pointer manipulation uses to the accesses they 2286 // feed into. We can try to be smart to avoid looking through things we do not 2287 // like for now, e.g., non-inbounds GEPs. 2288 if (isa<CastInst>(I)) { 2289 TrackUse = true; 2290 return 0; 2291 } 2292 2293 if (isa<GetElementPtrInst>(I)) { 2294 TrackUse = true; 2295 return 0; 2296 } 2297 2298 Type *PtrTy = UseV->getType(); 2299 const Function *F = I->getFunction(); 2300 bool NullPointerIsDefined = 2301 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; 2302 const DataLayout &DL = A.getInfoCache().getDL(); 2303 if (const auto *CB = dyn_cast<CallBase>(I)) { 2304 if (CB->isBundleOperand(U)) { 2305 if (RetainedKnowledge RK = getKnowledgeFromUse( 2306 U, {Attribute::NonNull, Attribute::Dereferenceable})) { 2307 IsNonNull |= 2308 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); 2309 return RK.ArgValue; 2310 } 2311 return 0; 2312 } 2313 2314 if (CB->isCallee(U)) { 2315 IsNonNull |= !NullPointerIsDefined; 2316 return 0; 2317 } 2318 2319 unsigned ArgNo = CB->getArgOperandNo(U); 2320 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 2321 // As long as we only use known information there is no need to track 2322 // dependences here. 2323 auto &DerefAA = 2324 A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE); 2325 IsNonNull |= DerefAA.isKnownNonNull(); 2326 return DerefAA.getKnownDereferenceableBytes(); 2327 } 2328 2329 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); 2330 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) 2331 return 0; 2332 2333 int64_t Offset; 2334 const Value *Base = 2335 getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL); 2336 if (Base && Base == &AssociatedValue) { 2337 int64_t DerefBytes = Loc->Size.getValue() + Offset; 2338 IsNonNull |= !NullPointerIsDefined; 2339 return std::max(int64_t(0), DerefBytes); 2340 } 2341 2342 /// Corner case when an offset is 0. 2343 Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL, 2344 /*AllowNonInbounds*/ true); 2345 if (Base && Base == &AssociatedValue && Offset == 0) { 2346 int64_t DerefBytes = Loc->Size.getValue(); 2347 IsNonNull |= !NullPointerIsDefined; 2348 return std::max(int64_t(0), DerefBytes); 2349 } 2350 2351 return 0; 2352 } 2353 2354 struct AANonNullImpl : AANonNull { 2355 AANonNullImpl(const IRPosition &IRP, Attributor &A) 2356 : AANonNull(IRP, A), 2357 NullIsDefined(NullPointerIsDefined( 2358 getAnchorScope(), 2359 getAssociatedValue().getType()->getPointerAddressSpace())) {} 2360 2361 /// See AbstractAttribute::initialize(...). 2362 void initialize(Attributor &A) override { 2363 Value &V = getAssociatedValue(); 2364 if (!NullIsDefined && 2365 hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, 2366 /* IgnoreSubsumingPositions */ false, &A)) { 2367 indicateOptimisticFixpoint(); 2368 return; 2369 } 2370 2371 if (isa<ConstantPointerNull>(V)) { 2372 indicatePessimisticFixpoint(); 2373 return; 2374 } 2375 2376 AANonNull::initialize(A); 2377 2378 bool CanBeNull, CanBeFreed; 2379 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull, 2380 CanBeFreed)) { 2381 if (!CanBeNull) { 2382 indicateOptimisticFixpoint(); 2383 return; 2384 } 2385 } 2386 2387 if (isa<GlobalValue>(&getAssociatedValue())) { 2388 indicatePessimisticFixpoint(); 2389 return; 2390 } 2391 2392 if (Instruction *CtxI = getCtxI()) 2393 followUsesInMBEC(*this, A, getState(), *CtxI); 2394 } 2395 2396 /// See followUsesInMBEC 2397 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 2398 AANonNull::StateType &State) { 2399 bool IsNonNull = false; 2400 bool TrackUse = false; 2401 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, 2402 IsNonNull, TrackUse); 2403 State.setKnown(IsNonNull); 2404 return TrackUse; 2405 } 2406 2407 /// See AbstractAttribute::getAsStr(). 2408 const std::string getAsStr() const override { 2409 return getAssumed() ? "nonnull" : "may-null"; 2410 } 2411 2412 /// Flag to determine if the underlying value can be null and still allow 2413 /// valid accesses. 2414 const bool NullIsDefined; 2415 }; 2416 2417 /// NonNull attribute for a floating value. 2418 struct AANonNullFloating : public AANonNullImpl { 2419 AANonNullFloating(const IRPosition &IRP, Attributor &A) 2420 : AANonNullImpl(IRP, A) {} 2421 2422 /// See AbstractAttribute::updateImpl(...). 2423 ChangeStatus updateImpl(Attributor &A) override { 2424 const DataLayout &DL = A.getDataLayout(); 2425 2426 DominatorTree *DT = nullptr; 2427 AssumptionCache *AC = nullptr; 2428 InformationCache &InfoCache = A.getInfoCache(); 2429 if (const Function *Fn = getAnchorScope()) { 2430 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); 2431 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); 2432 } 2433 2434 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 2435 AANonNull::StateType &T, bool Stripped) -> bool { 2436 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V), 2437 DepClassTy::REQUIRED); 2438 if (!Stripped && this == &AA) { 2439 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) 2440 T.indicatePessimisticFixpoint(); 2441 } else { 2442 // Use abstract attribute information. 2443 const AANonNull::StateType &NS = AA.getState(); 2444 T ^= NS; 2445 } 2446 return T.isValidState(); 2447 }; 2448 2449 StateType T; 2450 bool UsedAssumedInformation = false; 2451 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 2452 VisitValueCB, getCtxI(), 2453 UsedAssumedInformation)) 2454 return indicatePessimisticFixpoint(); 2455 2456 return clampStateAndIndicateChange(getState(), T); 2457 } 2458 2459 /// See AbstractAttribute::trackStatistics() 2460 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2461 }; 2462 2463 /// NonNull attribute for function return value. 2464 struct AANonNullReturned final 2465 : AAReturnedFromReturnedValues<AANonNull, AANonNull> { 2466 AANonNullReturned(const IRPosition &IRP, Attributor &A) 2467 : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {} 2468 2469 /// See AbstractAttribute::getAsStr(). 2470 const std::string getAsStr() const override { 2471 return getAssumed() ? "nonnull" : "may-null"; 2472 } 2473 2474 /// See AbstractAttribute::trackStatistics() 2475 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2476 }; 2477 2478 /// NonNull attribute for function argument. 2479 struct AANonNullArgument final 2480 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { 2481 AANonNullArgument(const IRPosition &IRP, Attributor &A) 2482 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} 2483 2484 /// See AbstractAttribute::trackStatistics() 2485 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } 2486 }; 2487 2488 struct AANonNullCallSiteArgument final : AANonNullFloating { 2489 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) 2490 : AANonNullFloating(IRP, A) {} 2491 2492 /// See AbstractAttribute::trackStatistics() 2493 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } 2494 }; 2495 2496 /// NonNull attribute for a call site return position. 2497 struct AANonNullCallSiteReturned final 2498 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { 2499 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) 2500 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} 2501 2502 /// See AbstractAttribute::trackStatistics() 2503 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } 2504 }; 2505 } // namespace 2506 2507 /// ------------------------ No-Recurse Attributes ---------------------------- 2508 2509 namespace { 2510 struct AANoRecurseImpl : public AANoRecurse { 2511 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} 2512 2513 /// See AbstractAttribute::getAsStr() 2514 const std::string getAsStr() const override { 2515 return getAssumed() ? "norecurse" : "may-recurse"; 2516 } 2517 }; 2518 2519 struct AANoRecurseFunction final : AANoRecurseImpl { 2520 AANoRecurseFunction(const IRPosition &IRP, Attributor &A) 2521 : AANoRecurseImpl(IRP, A) {} 2522 2523 /// See AbstractAttribute::updateImpl(...). 2524 ChangeStatus updateImpl(Attributor &A) override { 2525 2526 // If all live call sites are known to be no-recurse, we are as well. 2527 auto CallSitePred = [&](AbstractCallSite ACS) { 2528 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 2529 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 2530 DepClassTy::NONE); 2531 return NoRecurseAA.isKnownNoRecurse(); 2532 }; 2533 bool UsedAssumedInformation = false; 2534 if (A.checkForAllCallSites(CallSitePred, *this, true, 2535 UsedAssumedInformation)) { 2536 // If we know all call sites and all are known no-recurse, we are done. 2537 // If all known call sites, which might not be all that exist, are known 2538 // to be no-recurse, we are not done but we can continue to assume 2539 // no-recurse. If one of the call sites we have not visited will become 2540 // live, another update is triggered. 2541 if (!UsedAssumedInformation) 2542 indicateOptimisticFixpoint(); 2543 return ChangeStatus::UNCHANGED; 2544 } 2545 2546 const AAFunctionReachability &EdgeReachability = 2547 A.getAAFor<AAFunctionReachability>(*this, getIRPosition(), 2548 DepClassTy::REQUIRED); 2549 if (EdgeReachability.canReach(A, *getAnchorScope())) 2550 return indicatePessimisticFixpoint(); 2551 return ChangeStatus::UNCHANGED; 2552 } 2553 2554 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } 2555 }; 2556 2557 /// NoRecurse attribute deduction for a call sites. 2558 struct AANoRecurseCallSite final : AANoRecurseImpl { 2559 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) 2560 : AANoRecurseImpl(IRP, A) {} 2561 2562 /// See AbstractAttribute::initialize(...). 2563 void initialize(Attributor &A) override { 2564 AANoRecurseImpl::initialize(A); 2565 Function *F = getAssociatedFunction(); 2566 if (!F || F->isDeclaration()) 2567 indicatePessimisticFixpoint(); 2568 } 2569 2570 /// See AbstractAttribute::updateImpl(...). 2571 ChangeStatus updateImpl(Attributor &A) override { 2572 // TODO: Once we have call site specific value information we can provide 2573 // call site specific liveness information and then it makes 2574 // sense to specialize attributes for call sites arguments instead of 2575 // redirecting requests to the callee argument. 2576 Function *F = getAssociatedFunction(); 2577 const IRPosition &FnPos = IRPosition::function(*F); 2578 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED); 2579 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2580 } 2581 2582 /// See AbstractAttribute::trackStatistics() 2583 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } 2584 }; 2585 } // namespace 2586 2587 /// -------------------- Undefined-Behavior Attributes ------------------------ 2588 2589 namespace { 2590 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { 2591 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) 2592 : AAUndefinedBehavior(IRP, A) {} 2593 2594 /// See AbstractAttribute::updateImpl(...). 2595 // through a pointer (i.e. also branches etc.) 2596 ChangeStatus updateImpl(Attributor &A) override { 2597 const size_t UBPrevSize = KnownUBInsts.size(); 2598 const size_t NoUBPrevSize = AssumedNoUBInsts.size(); 2599 2600 auto InspectMemAccessInstForUB = [&](Instruction &I) { 2601 // Lang ref now states volatile store is not UB, let's skip them. 2602 if (I.isVolatile() && I.mayWriteToMemory()) 2603 return true; 2604 2605 // Skip instructions that are already saved. 2606 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2607 return true; 2608 2609 // If we reach here, we know we have an instruction 2610 // that accesses memory through a pointer operand, 2611 // for which getPointerOperand() should give it to us. 2612 Value *PtrOp = 2613 const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true)); 2614 assert(PtrOp && 2615 "Expected pointer operand of memory accessing instruction"); 2616 2617 // Either we stopped and the appropriate action was taken, 2618 // or we got back a simplified value to continue. 2619 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); 2620 if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue()) 2621 return true; 2622 const Value *PtrOpVal = SimplifiedPtrOp.getValue(); 2623 2624 // A memory access through a pointer is considered UB 2625 // only if the pointer has constant null value. 2626 // TODO: Expand it to not only check constant values. 2627 if (!isa<ConstantPointerNull>(PtrOpVal)) { 2628 AssumedNoUBInsts.insert(&I); 2629 return true; 2630 } 2631 const Type *PtrTy = PtrOpVal->getType(); 2632 2633 // Because we only consider instructions inside functions, 2634 // assume that a parent function exists. 2635 const Function *F = I.getFunction(); 2636 2637 // A memory access using constant null pointer is only considered UB 2638 // if null pointer is _not_ defined for the target platform. 2639 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) 2640 AssumedNoUBInsts.insert(&I); 2641 else 2642 KnownUBInsts.insert(&I); 2643 return true; 2644 }; 2645 2646 auto InspectBrInstForUB = [&](Instruction &I) { 2647 // A conditional branch instruction is considered UB if it has `undef` 2648 // condition. 2649 2650 // Skip instructions that are already saved. 2651 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2652 return true; 2653 2654 // We know we have a branch instruction. 2655 auto *BrInst = cast<BranchInst>(&I); 2656 2657 // Unconditional branches are never considered UB. 2658 if (BrInst->isUnconditional()) 2659 return true; 2660 2661 // Either we stopped and the appropriate action was taken, 2662 // or we got back a simplified value to continue. 2663 Optional<Value *> SimplifiedCond = 2664 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); 2665 if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue()) 2666 return true; 2667 AssumedNoUBInsts.insert(&I); 2668 return true; 2669 }; 2670 2671 auto InspectCallSiteForUB = [&](Instruction &I) { 2672 // Check whether a callsite always cause UB or not 2673 2674 // Skip instructions that are already saved. 2675 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2676 return true; 2677 2678 // Check nonnull and noundef argument attribute violation for each 2679 // callsite. 2680 CallBase &CB = cast<CallBase>(I); 2681 Function *Callee = CB.getCalledFunction(); 2682 if (!Callee) 2683 return true; 2684 for (unsigned idx = 0; idx < CB.arg_size(); idx++) { 2685 // If current argument is known to be simplified to null pointer and the 2686 // corresponding argument position is known to have nonnull attribute, 2687 // the argument is poison. Furthermore, if the argument is poison and 2688 // the position is known to have noundef attriubte, this callsite is 2689 // considered UB. 2690 if (idx >= Callee->arg_size()) 2691 break; 2692 Value *ArgVal = CB.getArgOperand(idx); 2693 if (!ArgVal) 2694 continue; 2695 // Here, we handle three cases. 2696 // (1) Not having a value means it is dead. (we can replace the value 2697 // with undef) 2698 // (2) Simplified to undef. The argument violate noundef attriubte. 2699 // (3) Simplified to null pointer where known to be nonnull. 2700 // The argument is a poison value and violate noundef attribute. 2701 IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx); 2702 auto &NoUndefAA = 2703 A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2704 if (!NoUndefAA.isKnownNoUndef()) 2705 continue; 2706 bool UsedAssumedInformation = false; 2707 Optional<Value *> SimplifiedVal = A.getAssumedSimplified( 2708 IRPosition::value(*ArgVal), *this, UsedAssumedInformation); 2709 if (UsedAssumedInformation) 2710 continue; 2711 if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue()) 2712 return true; 2713 if (!SimplifiedVal.hasValue() || 2714 isa<UndefValue>(*SimplifiedVal.getValue())) { 2715 KnownUBInsts.insert(&I); 2716 continue; 2717 } 2718 if (!ArgVal->getType()->isPointerTy() || 2719 !isa<ConstantPointerNull>(*SimplifiedVal.getValue())) 2720 continue; 2721 auto &NonNullAA = 2722 A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2723 if (NonNullAA.isKnownNonNull()) 2724 KnownUBInsts.insert(&I); 2725 } 2726 return true; 2727 }; 2728 2729 auto InspectReturnInstForUB = [&](Instruction &I) { 2730 auto &RI = cast<ReturnInst>(I); 2731 // Either we stopped and the appropriate action was taken, 2732 // or we got back a simplified return value to continue. 2733 Optional<Value *> SimplifiedRetValue = 2734 stopOnUndefOrAssumed(A, RI.getReturnValue(), &I); 2735 if (!SimplifiedRetValue.hasValue() || !SimplifiedRetValue.getValue()) 2736 return true; 2737 2738 // Check if a return instruction always cause UB or not 2739 // Note: It is guaranteed that the returned position of the anchor 2740 // scope has noundef attribute when this is called. 2741 // We also ensure the return position is not "assumed dead" 2742 // because the returned value was then potentially simplified to 2743 // `undef` in AAReturnedValues without removing the `noundef` 2744 // attribute yet. 2745 2746 // When the returned position has noundef attriubte, UB occurs in the 2747 // following cases. 2748 // (1) Returned value is known to be undef. 2749 // (2) The value is known to be a null pointer and the returned 2750 // position has nonnull attribute (because the returned value is 2751 // poison). 2752 if (isa<ConstantPointerNull>(*SimplifiedRetValue)) { 2753 auto &NonNullAA = A.getAAFor<AANonNull>( 2754 *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE); 2755 if (NonNullAA.isKnownNonNull()) 2756 KnownUBInsts.insert(&I); 2757 } 2758 2759 return true; 2760 }; 2761 2762 bool UsedAssumedInformation = false; 2763 A.checkForAllInstructions(InspectMemAccessInstForUB, *this, 2764 {Instruction::Load, Instruction::Store, 2765 Instruction::AtomicCmpXchg, 2766 Instruction::AtomicRMW}, 2767 UsedAssumedInformation, 2768 /* CheckBBLivenessOnly */ true); 2769 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, 2770 UsedAssumedInformation, 2771 /* CheckBBLivenessOnly */ true); 2772 A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this, 2773 UsedAssumedInformation); 2774 2775 // If the returned position of the anchor scope has noundef attriubte, check 2776 // all returned instructions. 2777 if (!getAnchorScope()->getReturnType()->isVoidTy()) { 2778 const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope()); 2779 if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) { 2780 auto &RetPosNoUndefAA = 2781 A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE); 2782 if (RetPosNoUndefAA.isKnownNoUndef()) 2783 A.checkForAllInstructions(InspectReturnInstForUB, *this, 2784 {Instruction::Ret}, UsedAssumedInformation, 2785 /* CheckBBLivenessOnly */ true); 2786 } 2787 } 2788 2789 if (NoUBPrevSize != AssumedNoUBInsts.size() || 2790 UBPrevSize != KnownUBInsts.size()) 2791 return ChangeStatus::CHANGED; 2792 return ChangeStatus::UNCHANGED; 2793 } 2794 2795 bool isKnownToCauseUB(Instruction *I) const override { 2796 return KnownUBInsts.count(I); 2797 } 2798 2799 bool isAssumedToCauseUB(Instruction *I) const override { 2800 // In simple words, if an instruction is not in the assumed to _not_ 2801 // cause UB, then it is assumed UB (that includes those 2802 // in the KnownUBInsts set). The rest is boilerplate 2803 // is to ensure that it is one of the instructions we test 2804 // for UB. 2805 2806 switch (I->getOpcode()) { 2807 case Instruction::Load: 2808 case Instruction::Store: 2809 case Instruction::AtomicCmpXchg: 2810 case Instruction::AtomicRMW: 2811 return !AssumedNoUBInsts.count(I); 2812 case Instruction::Br: { 2813 auto *BrInst = cast<BranchInst>(I); 2814 if (BrInst->isUnconditional()) 2815 return false; 2816 return !AssumedNoUBInsts.count(I); 2817 } break; 2818 default: 2819 return false; 2820 } 2821 return false; 2822 } 2823 2824 ChangeStatus manifest(Attributor &A) override { 2825 if (KnownUBInsts.empty()) 2826 return ChangeStatus::UNCHANGED; 2827 for (Instruction *I : KnownUBInsts) 2828 A.changeToUnreachableAfterManifest(I); 2829 return ChangeStatus::CHANGED; 2830 } 2831 2832 /// See AbstractAttribute::getAsStr() 2833 const std::string getAsStr() const override { 2834 return getAssumed() ? "undefined-behavior" : "no-ub"; 2835 } 2836 2837 /// Note: The correctness of this analysis depends on the fact that the 2838 /// following 2 sets will stop changing after some point. 2839 /// "Change" here means that their size changes. 2840 /// The size of each set is monotonically increasing 2841 /// (we only add items to them) and it is upper bounded by the number of 2842 /// instructions in the processed function (we can never save more 2843 /// elements in either set than this number). Hence, at some point, 2844 /// they will stop increasing. 2845 /// Consequently, at some point, both sets will have stopped 2846 /// changing, effectively making the analysis reach a fixpoint. 2847 2848 /// Note: These 2 sets are disjoint and an instruction can be considered 2849 /// one of 3 things: 2850 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in 2851 /// the KnownUBInsts set. 2852 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior 2853 /// has a reason to assume it). 2854 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior 2855 /// could not find a reason to assume or prove that it can cause UB, 2856 /// hence it assumes it doesn't. We have a set for these instructions 2857 /// so that we don't reprocess them in every update. 2858 /// Note however that instructions in this set may cause UB. 2859 2860 protected: 2861 /// A set of all live instructions _known_ to cause UB. 2862 SmallPtrSet<Instruction *, 8> KnownUBInsts; 2863 2864 private: 2865 /// A set of all the (live) instructions that are assumed to _not_ cause UB. 2866 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; 2867 2868 // Should be called on updates in which if we're processing an instruction 2869 // \p I that depends on a value \p V, one of the following has to happen: 2870 // - If the value is assumed, then stop. 2871 // - If the value is known but undef, then consider it UB. 2872 // - Otherwise, do specific processing with the simplified value. 2873 // We return None in the first 2 cases to signify that an appropriate 2874 // action was taken and the caller should stop. 2875 // Otherwise, we return the simplified value that the caller should 2876 // use for specific processing. 2877 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V, 2878 Instruction *I) { 2879 bool UsedAssumedInformation = false; 2880 Optional<Value *> SimplifiedV = A.getAssumedSimplified( 2881 IRPosition::value(*V), *this, UsedAssumedInformation); 2882 if (!UsedAssumedInformation) { 2883 // Don't depend on assumed values. 2884 if (!SimplifiedV.hasValue()) { 2885 // If it is known (which we tested above) but it doesn't have a value, 2886 // then we can assume `undef` and hence the instruction is UB. 2887 KnownUBInsts.insert(I); 2888 return llvm::None; 2889 } 2890 if (!SimplifiedV.getValue()) 2891 return nullptr; 2892 V = *SimplifiedV; 2893 } 2894 if (isa<UndefValue>(V)) { 2895 KnownUBInsts.insert(I); 2896 return llvm::None; 2897 } 2898 return V; 2899 } 2900 }; 2901 2902 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { 2903 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) 2904 : AAUndefinedBehaviorImpl(IRP, A) {} 2905 2906 /// See AbstractAttribute::trackStatistics() 2907 void trackStatistics() const override { 2908 STATS_DECL(UndefinedBehaviorInstruction, Instruction, 2909 "Number of instructions known to have UB"); 2910 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += 2911 KnownUBInsts.size(); 2912 } 2913 }; 2914 } // namespace 2915 2916 /// ------------------------ Will-Return Attributes ---------------------------- 2917 2918 namespace { 2919 // Helper function that checks whether a function has any cycle which we don't 2920 // know if it is bounded or not. 2921 // Loops with maximum trip count are considered bounded, any other cycle not. 2922 static bool mayContainUnboundedCycle(Function &F, Attributor &A) { 2923 ScalarEvolution *SE = 2924 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); 2925 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); 2926 // If either SCEV or LoopInfo is not available for the function then we assume 2927 // any cycle to be unbounded cycle. 2928 // We use scc_iterator which uses Tarjan algorithm to find all the maximal 2929 // SCCs.To detect if there's a cycle, we only need to find the maximal ones. 2930 if (!SE || !LI) { 2931 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) 2932 if (SCCI.hasCycle()) 2933 return true; 2934 return false; 2935 } 2936 2937 // If there's irreducible control, the function may contain non-loop cycles. 2938 if (mayContainIrreducibleControl(F, LI)) 2939 return true; 2940 2941 // Any loop that does not have a max trip count is considered unbounded cycle. 2942 for (auto *L : LI->getLoopsInPreorder()) { 2943 if (!SE->getSmallConstantMaxTripCount(L)) 2944 return true; 2945 } 2946 return false; 2947 } 2948 2949 struct AAWillReturnImpl : public AAWillReturn { 2950 AAWillReturnImpl(const IRPosition &IRP, Attributor &A) 2951 : AAWillReturn(IRP, A) {} 2952 2953 /// See AbstractAttribute::initialize(...). 2954 void initialize(Attributor &A) override { 2955 AAWillReturn::initialize(A); 2956 2957 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) { 2958 indicateOptimisticFixpoint(); 2959 return; 2960 } 2961 } 2962 2963 /// Check for `mustprogress` and `readonly` as they imply `willreturn`. 2964 bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) { 2965 // Check for `mustprogress` in the scope and the associated function which 2966 // might be different if this is a call site. 2967 if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) && 2968 (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress())) 2969 return false; 2970 2971 bool IsKnown; 2972 if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 2973 return IsKnown || !KnownOnly; 2974 return false; 2975 } 2976 2977 /// See AbstractAttribute::updateImpl(...). 2978 ChangeStatus updateImpl(Attributor &A) override { 2979 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 2980 return ChangeStatus::UNCHANGED; 2981 2982 auto CheckForWillReturn = [&](Instruction &I) { 2983 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); 2984 const auto &WillReturnAA = 2985 A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED); 2986 if (WillReturnAA.isKnownWillReturn()) 2987 return true; 2988 if (!WillReturnAA.isAssumedWillReturn()) 2989 return false; 2990 const auto &NoRecurseAA = 2991 A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED); 2992 return NoRecurseAA.isAssumedNoRecurse(); 2993 }; 2994 2995 bool UsedAssumedInformation = false; 2996 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this, 2997 UsedAssumedInformation)) 2998 return indicatePessimisticFixpoint(); 2999 3000 return ChangeStatus::UNCHANGED; 3001 } 3002 3003 /// See AbstractAttribute::getAsStr() 3004 const std::string getAsStr() const override { 3005 return getAssumed() ? "willreturn" : "may-noreturn"; 3006 } 3007 }; 3008 3009 struct AAWillReturnFunction final : AAWillReturnImpl { 3010 AAWillReturnFunction(const IRPosition &IRP, Attributor &A) 3011 : AAWillReturnImpl(IRP, A) {} 3012 3013 /// See AbstractAttribute::initialize(...). 3014 void initialize(Attributor &A) override { 3015 AAWillReturnImpl::initialize(A); 3016 3017 Function *F = getAnchorScope(); 3018 if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A)) 3019 indicatePessimisticFixpoint(); 3020 } 3021 3022 /// See AbstractAttribute::trackStatistics() 3023 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } 3024 }; 3025 3026 /// WillReturn attribute deduction for a call sites. 3027 struct AAWillReturnCallSite final : AAWillReturnImpl { 3028 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) 3029 : AAWillReturnImpl(IRP, A) {} 3030 3031 /// See AbstractAttribute::initialize(...). 3032 void initialize(Attributor &A) override { 3033 AAWillReturnImpl::initialize(A); 3034 Function *F = getAssociatedFunction(); 3035 if (!F || !A.isFunctionIPOAmendable(*F)) 3036 indicatePessimisticFixpoint(); 3037 } 3038 3039 /// See AbstractAttribute::updateImpl(...). 3040 ChangeStatus updateImpl(Attributor &A) override { 3041 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 3042 return ChangeStatus::UNCHANGED; 3043 3044 // TODO: Once we have call site specific value information we can provide 3045 // call site specific liveness information and then it makes 3046 // sense to specialize attributes for call sites arguments instead of 3047 // redirecting requests to the callee argument. 3048 Function *F = getAssociatedFunction(); 3049 const IRPosition &FnPos = IRPosition::function(*F); 3050 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED); 3051 return clampStateAndIndicateChange(getState(), FnAA.getState()); 3052 } 3053 3054 /// See AbstractAttribute::trackStatistics() 3055 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } 3056 }; 3057 } // namespace 3058 3059 /// -------------------AAReachability Attribute-------------------------- 3060 3061 namespace { 3062 struct AAReachabilityImpl : AAReachability { 3063 AAReachabilityImpl(const IRPosition &IRP, Attributor &A) 3064 : AAReachability(IRP, A) {} 3065 3066 const std::string getAsStr() const override { 3067 // TODO: Return the number of reachable queries. 3068 return "reachable"; 3069 } 3070 3071 /// See AbstractAttribute::updateImpl(...). 3072 ChangeStatus updateImpl(Attributor &A) override { 3073 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 3074 *this, IRPosition::function(*getAnchorScope()), DepClassTy::REQUIRED); 3075 if (!NoRecurseAA.isAssumedNoRecurse()) 3076 return indicatePessimisticFixpoint(); 3077 return ChangeStatus::UNCHANGED; 3078 } 3079 }; 3080 3081 struct AAReachabilityFunction final : public AAReachabilityImpl { 3082 AAReachabilityFunction(const IRPosition &IRP, Attributor &A) 3083 : AAReachabilityImpl(IRP, A) {} 3084 3085 /// See AbstractAttribute::trackStatistics() 3086 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); } 3087 }; 3088 } // namespace 3089 3090 /// ------------------------ NoAlias Argument Attribute ------------------------ 3091 3092 namespace { 3093 struct AANoAliasImpl : AANoAlias { 3094 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { 3095 assert(getAssociatedType()->isPointerTy() && 3096 "Noalias is a pointer attribute"); 3097 } 3098 3099 const std::string getAsStr() const override { 3100 return getAssumed() ? "noalias" : "may-alias"; 3101 } 3102 }; 3103 3104 /// NoAlias attribute for a floating value. 3105 struct AANoAliasFloating final : AANoAliasImpl { 3106 AANoAliasFloating(const IRPosition &IRP, Attributor &A) 3107 : AANoAliasImpl(IRP, A) {} 3108 3109 /// See AbstractAttribute::initialize(...). 3110 void initialize(Attributor &A) override { 3111 AANoAliasImpl::initialize(A); 3112 Value *Val = &getAssociatedValue(); 3113 do { 3114 CastInst *CI = dyn_cast<CastInst>(Val); 3115 if (!CI) 3116 break; 3117 Value *Base = CI->getOperand(0); 3118 if (!Base->hasOneUse()) 3119 break; 3120 Val = Base; 3121 } while (true); 3122 3123 if (!Val->getType()->isPointerTy()) { 3124 indicatePessimisticFixpoint(); 3125 return; 3126 } 3127 3128 if (isa<AllocaInst>(Val)) 3129 indicateOptimisticFixpoint(); 3130 else if (isa<ConstantPointerNull>(Val) && 3131 !NullPointerIsDefined(getAnchorScope(), 3132 Val->getType()->getPointerAddressSpace())) 3133 indicateOptimisticFixpoint(); 3134 else if (Val != &getAssociatedValue()) { 3135 const auto &ValNoAliasAA = A.getAAFor<AANoAlias>( 3136 *this, IRPosition::value(*Val), DepClassTy::OPTIONAL); 3137 if (ValNoAliasAA.isKnownNoAlias()) 3138 indicateOptimisticFixpoint(); 3139 } 3140 } 3141 3142 /// See AbstractAttribute::updateImpl(...). 3143 ChangeStatus updateImpl(Attributor &A) override { 3144 // TODO: Implement this. 3145 return indicatePessimisticFixpoint(); 3146 } 3147 3148 /// See AbstractAttribute::trackStatistics() 3149 void trackStatistics() const override { 3150 STATS_DECLTRACK_FLOATING_ATTR(noalias) 3151 } 3152 }; 3153 3154 /// NoAlias attribute for an argument. 3155 struct AANoAliasArgument final 3156 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { 3157 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; 3158 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 3159 3160 /// See AbstractAttribute::initialize(...). 3161 void initialize(Attributor &A) override { 3162 Base::initialize(A); 3163 // See callsite argument attribute and callee argument attribute. 3164 if (hasAttr({Attribute::ByVal})) 3165 indicateOptimisticFixpoint(); 3166 } 3167 3168 /// See AbstractAttribute::update(...). 3169 ChangeStatus updateImpl(Attributor &A) override { 3170 // We have to make sure no-alias on the argument does not break 3171 // synchronization when this is a callback argument, see also [1] below. 3172 // If synchronization cannot be affected, we delegate to the base updateImpl 3173 // function, otherwise we give up for now. 3174 3175 // If the function is no-sync, no-alias cannot break synchronization. 3176 const auto &NoSyncAA = 3177 A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()), 3178 DepClassTy::OPTIONAL); 3179 if (NoSyncAA.isAssumedNoSync()) 3180 return Base::updateImpl(A); 3181 3182 // If the argument is read-only, no-alias cannot break synchronization. 3183 bool IsKnown; 3184 if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 3185 return Base::updateImpl(A); 3186 3187 // If the argument is never passed through callbacks, no-alias cannot break 3188 // synchronization. 3189 bool UsedAssumedInformation = false; 3190 if (A.checkForAllCallSites( 3191 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, 3192 true, UsedAssumedInformation)) 3193 return Base::updateImpl(A); 3194 3195 // TODO: add no-alias but make sure it doesn't break synchronization by 3196 // introducing fake uses. See: 3197 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, 3198 // International Workshop on OpenMP 2018, 3199 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf 3200 3201 return indicatePessimisticFixpoint(); 3202 } 3203 3204 /// See AbstractAttribute::trackStatistics() 3205 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } 3206 }; 3207 3208 struct AANoAliasCallSiteArgument final : AANoAliasImpl { 3209 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) 3210 : AANoAliasImpl(IRP, A) {} 3211 3212 /// See AbstractAttribute::initialize(...). 3213 void initialize(Attributor &A) override { 3214 // See callsite argument attribute and callee argument attribute. 3215 const auto &CB = cast<CallBase>(getAnchorValue()); 3216 if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias)) 3217 indicateOptimisticFixpoint(); 3218 Value &Val = getAssociatedValue(); 3219 if (isa<ConstantPointerNull>(Val) && 3220 !NullPointerIsDefined(getAnchorScope(), 3221 Val.getType()->getPointerAddressSpace())) 3222 indicateOptimisticFixpoint(); 3223 } 3224 3225 /// Determine if the underlying value may alias with the call site argument 3226 /// \p OtherArgNo of \p ICS (= the underlying call site). 3227 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, 3228 const AAMemoryBehavior &MemBehaviorAA, 3229 const CallBase &CB, unsigned OtherArgNo) { 3230 // We do not need to worry about aliasing with the underlying IRP. 3231 if (this->getCalleeArgNo() == (int)OtherArgNo) 3232 return false; 3233 3234 // If it is not a pointer or pointer vector we do not alias. 3235 const Value *ArgOp = CB.getArgOperand(OtherArgNo); 3236 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 3237 return false; 3238 3239 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 3240 *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE); 3241 3242 // If the argument is readnone, there is no read-write aliasing. 3243 if (CBArgMemBehaviorAA.isAssumedReadNone()) { 3244 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3245 return false; 3246 } 3247 3248 // If the argument is readonly and the underlying value is readonly, there 3249 // is no read-write aliasing. 3250 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); 3251 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { 3252 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3253 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3254 return false; 3255 } 3256 3257 // We have to utilize actual alias analysis queries so we need the object. 3258 if (!AAR) 3259 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); 3260 3261 // Try to rule it out at the call site. 3262 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); 3263 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " 3264 "callsite arguments: " 3265 << getAssociatedValue() << " " << *ArgOp << " => " 3266 << (IsAliasing ? "" : "no-") << "alias \n"); 3267 3268 return IsAliasing; 3269 } 3270 3271 bool 3272 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, 3273 const AAMemoryBehavior &MemBehaviorAA, 3274 const AANoAlias &NoAliasAA) { 3275 // We can deduce "noalias" if the following conditions hold. 3276 // (i) Associated value is assumed to be noalias in the definition. 3277 // (ii) Associated value is assumed to be no-capture in all the uses 3278 // possibly executed before this callsite. 3279 // (iii) There is no other pointer argument which could alias with the 3280 // value. 3281 3282 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); 3283 if (!AssociatedValueIsNoAliasAtDef) { 3284 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() 3285 << " is not no-alias at the definition\n"); 3286 return false; 3287 } 3288 3289 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); 3290 3291 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3292 const Function *ScopeFn = VIRP.getAnchorScope(); 3293 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE); 3294 // Check whether the value is captured in the scope using AANoCapture. 3295 // Look at CFG and check only uses possibly executed before this 3296 // callsite. 3297 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 3298 Instruction *UserI = cast<Instruction>(U.getUser()); 3299 3300 // If UserI is the curr instruction and there is a single potential use of 3301 // the value in UserI we allow the use. 3302 // TODO: We should inspect the operands and allow those that cannot alias 3303 // with the value. 3304 if (UserI == getCtxI() && UserI->getNumOperands() == 1) 3305 return true; 3306 3307 if (ScopeFn) { 3308 const auto &ReachabilityAA = A.getAAFor<AAReachability>( 3309 *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL); 3310 3311 if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI())) 3312 return true; 3313 3314 if (auto *CB = dyn_cast<CallBase>(UserI)) { 3315 if (CB->isArgOperand(&U)) { 3316 3317 unsigned ArgNo = CB->getArgOperandNo(&U); 3318 3319 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 3320 *this, IRPosition::callsite_argument(*CB, ArgNo), 3321 DepClassTy::OPTIONAL); 3322 3323 if (NoCaptureAA.isAssumedNoCapture()) 3324 return true; 3325 } 3326 } 3327 } 3328 3329 // For cases which can potentially have more users 3330 if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) || 3331 isa<SelectInst>(U)) { 3332 Follow = true; 3333 return true; 3334 } 3335 3336 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n"); 3337 return false; 3338 }; 3339 3340 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 3341 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { 3342 LLVM_DEBUG( 3343 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() 3344 << " cannot be noalias as it is potentially captured\n"); 3345 return false; 3346 } 3347 } 3348 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); 3349 3350 // Check there is no other pointer argument which could alias with the 3351 // value passed at this call site. 3352 // TODO: AbstractCallSite 3353 const auto &CB = cast<CallBase>(getAnchorValue()); 3354 for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++) 3355 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) 3356 return false; 3357 3358 return true; 3359 } 3360 3361 /// See AbstractAttribute::updateImpl(...). 3362 ChangeStatus updateImpl(Attributor &A) override { 3363 // If the argument is readnone we are done as there are no accesses via the 3364 // argument. 3365 auto &MemBehaviorAA = 3366 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 3367 if (MemBehaviorAA.isAssumedReadNone()) { 3368 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3369 return ChangeStatus::UNCHANGED; 3370 } 3371 3372 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3373 const auto &NoAliasAA = 3374 A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE); 3375 3376 AAResults *AAR = nullptr; 3377 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, 3378 NoAliasAA)) { 3379 LLVM_DEBUG( 3380 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"); 3381 return ChangeStatus::UNCHANGED; 3382 } 3383 3384 return indicatePessimisticFixpoint(); 3385 } 3386 3387 /// See AbstractAttribute::trackStatistics() 3388 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } 3389 }; 3390 3391 /// NoAlias attribute for function return value. 3392 struct AANoAliasReturned final : AANoAliasImpl { 3393 AANoAliasReturned(const IRPosition &IRP, Attributor &A) 3394 : AANoAliasImpl(IRP, A) {} 3395 3396 /// See AbstractAttribute::initialize(...). 3397 void initialize(Attributor &A) override { 3398 AANoAliasImpl::initialize(A); 3399 Function *F = getAssociatedFunction(); 3400 if (!F || F->isDeclaration()) 3401 indicatePessimisticFixpoint(); 3402 } 3403 3404 /// See AbstractAttribute::updateImpl(...). 3405 virtual ChangeStatus updateImpl(Attributor &A) override { 3406 3407 auto CheckReturnValue = [&](Value &RV) -> bool { 3408 if (Constant *C = dyn_cast<Constant>(&RV)) 3409 if (C->isNullValue() || isa<UndefValue>(C)) 3410 return true; 3411 3412 /// For now, we can only deduce noalias if we have call sites. 3413 /// FIXME: add more support. 3414 if (!isa<CallBase>(&RV)) 3415 return false; 3416 3417 const IRPosition &RVPos = IRPosition::value(RV); 3418 const auto &NoAliasAA = 3419 A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED); 3420 if (!NoAliasAA.isAssumedNoAlias()) 3421 return false; 3422 3423 const auto &NoCaptureAA = 3424 A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED); 3425 return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); 3426 }; 3427 3428 if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) 3429 return indicatePessimisticFixpoint(); 3430 3431 return ChangeStatus::UNCHANGED; 3432 } 3433 3434 /// See AbstractAttribute::trackStatistics() 3435 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } 3436 }; 3437 3438 /// NoAlias attribute deduction for a call site return value. 3439 struct AANoAliasCallSiteReturned final : AANoAliasImpl { 3440 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) 3441 : AANoAliasImpl(IRP, A) {} 3442 3443 /// See AbstractAttribute::initialize(...). 3444 void initialize(Attributor &A) override { 3445 AANoAliasImpl::initialize(A); 3446 Function *F = getAssociatedFunction(); 3447 if (!F || F->isDeclaration()) 3448 indicatePessimisticFixpoint(); 3449 } 3450 3451 /// See AbstractAttribute::updateImpl(...). 3452 ChangeStatus updateImpl(Attributor &A) override { 3453 // TODO: Once we have call site specific value information we can provide 3454 // call site specific liveness information and then it makes 3455 // sense to specialize attributes for call sites arguments instead of 3456 // redirecting requests to the callee argument. 3457 Function *F = getAssociatedFunction(); 3458 const IRPosition &FnPos = IRPosition::returned(*F); 3459 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED); 3460 return clampStateAndIndicateChange(getState(), FnAA.getState()); 3461 } 3462 3463 /// See AbstractAttribute::trackStatistics() 3464 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } 3465 }; 3466 } // namespace 3467 3468 /// -------------------AAIsDead Function Attribute----------------------- 3469 3470 namespace { 3471 struct AAIsDeadValueImpl : public AAIsDead { 3472 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3473 3474 /// See AbstractAttribute::initialize(...). 3475 void initialize(Attributor &A) override { 3476 if (auto *Scope = getAnchorScope()) 3477 if (!A.isRunOn(*Scope)) 3478 indicatePessimisticFixpoint(); 3479 } 3480 3481 /// See AAIsDead::isAssumedDead(). 3482 bool isAssumedDead() const override { return isAssumed(IS_DEAD); } 3483 3484 /// See AAIsDead::isKnownDead(). 3485 bool isKnownDead() const override { return isKnown(IS_DEAD); } 3486 3487 /// See AAIsDead::isAssumedDead(BasicBlock *). 3488 bool isAssumedDead(const BasicBlock *BB) const override { return false; } 3489 3490 /// See AAIsDead::isKnownDead(BasicBlock *). 3491 bool isKnownDead(const BasicBlock *BB) const override { return false; } 3492 3493 /// See AAIsDead::isAssumedDead(Instruction *I). 3494 bool isAssumedDead(const Instruction *I) const override { 3495 return I == getCtxI() && isAssumedDead(); 3496 } 3497 3498 /// See AAIsDead::isKnownDead(Instruction *I). 3499 bool isKnownDead(const Instruction *I) const override { 3500 return isAssumedDead(I) && isKnownDead(); 3501 } 3502 3503 /// See AbstractAttribute::getAsStr(). 3504 virtual const std::string getAsStr() const override { 3505 return isAssumedDead() ? "assumed-dead" : "assumed-live"; 3506 } 3507 3508 /// Check if all uses are assumed dead. 3509 bool areAllUsesAssumedDead(Attributor &A, Value &V) { 3510 // Callers might not check the type, void has no uses. 3511 if (V.getType()->isVoidTy() || V.use_empty()) 3512 return true; 3513 3514 // If we replace a value with a constant there are no uses left afterwards. 3515 if (!isa<Constant>(V)) { 3516 if (auto *I = dyn_cast<Instruction>(&V)) 3517 if (!A.isRunOn(*I->getFunction())) 3518 return false; 3519 bool UsedAssumedInformation = false; 3520 Optional<Constant *> C = 3521 A.getAssumedConstant(V, *this, UsedAssumedInformation); 3522 if (!C.hasValue() || *C) 3523 return true; 3524 } 3525 3526 auto UsePred = [&](const Use &U, bool &Follow) { return false; }; 3527 // Explicitly set the dependence class to required because we want a long 3528 // chain of N dependent instructions to be considered live as soon as one is 3529 // without going through N update cycles. This is not required for 3530 // correctness. 3531 return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false, 3532 DepClassTy::REQUIRED); 3533 } 3534 3535 /// Determine if \p I is assumed to be side-effect free. 3536 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { 3537 if (!I || wouldInstructionBeTriviallyDead(I)) 3538 return true; 3539 3540 auto *CB = dyn_cast<CallBase>(I); 3541 if (!CB || isa<IntrinsicInst>(CB)) 3542 return false; 3543 3544 const IRPosition &CallIRP = IRPosition::callsite_function(*CB); 3545 const auto &NoUnwindAA = 3546 A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE); 3547 if (!NoUnwindAA.isAssumedNoUnwind()) 3548 return false; 3549 if (!NoUnwindAA.isKnownNoUnwind()) 3550 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); 3551 3552 bool IsKnown; 3553 return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown); 3554 } 3555 }; 3556 3557 struct AAIsDeadFloating : public AAIsDeadValueImpl { 3558 AAIsDeadFloating(const IRPosition &IRP, Attributor &A) 3559 : AAIsDeadValueImpl(IRP, A) {} 3560 3561 /// See AbstractAttribute::initialize(...). 3562 void initialize(Attributor &A) override { 3563 AAIsDeadValueImpl::initialize(A); 3564 3565 if (isa<UndefValue>(getAssociatedValue())) { 3566 indicatePessimisticFixpoint(); 3567 return; 3568 } 3569 3570 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3571 if (!isAssumedSideEffectFree(A, I)) { 3572 if (!isa_and_nonnull<StoreInst>(I)) 3573 indicatePessimisticFixpoint(); 3574 else 3575 removeAssumedBits(HAS_NO_EFFECT); 3576 } 3577 } 3578 3579 bool isDeadStore(Attributor &A, StoreInst &SI) { 3580 // Lang ref now states volatile store is not UB/dead, let's skip them. 3581 if (SI.isVolatile()) 3582 return false; 3583 3584 bool UsedAssumedInformation = false; 3585 SmallSetVector<Value *, 4> PotentialCopies; 3586 if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this, 3587 UsedAssumedInformation)) 3588 return false; 3589 return llvm::all_of(PotentialCopies, [&](Value *V) { 3590 return A.isAssumedDead(IRPosition::value(*V), this, nullptr, 3591 UsedAssumedInformation); 3592 }); 3593 } 3594 3595 /// See AbstractAttribute::getAsStr(). 3596 const std::string getAsStr() const override { 3597 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3598 if (isa_and_nonnull<StoreInst>(I)) 3599 if (isValidState()) 3600 return "assumed-dead-store"; 3601 return AAIsDeadValueImpl::getAsStr(); 3602 } 3603 3604 /// See AbstractAttribute::updateImpl(...). 3605 ChangeStatus updateImpl(Attributor &A) override { 3606 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3607 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { 3608 if (!isDeadStore(A, *SI)) 3609 return indicatePessimisticFixpoint(); 3610 } else { 3611 if (!isAssumedSideEffectFree(A, I)) 3612 return indicatePessimisticFixpoint(); 3613 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3614 return indicatePessimisticFixpoint(); 3615 } 3616 return ChangeStatus::UNCHANGED; 3617 } 3618 3619 /// See AbstractAttribute::manifest(...). 3620 ChangeStatus manifest(Attributor &A) override { 3621 Value &V = getAssociatedValue(); 3622 if (auto *I = dyn_cast<Instruction>(&V)) { 3623 // If we get here we basically know the users are all dead. We check if 3624 // isAssumedSideEffectFree returns true here again because it might not be 3625 // the case and only the users are dead but the instruction (=call) is 3626 // still needed. 3627 if (isa<StoreInst>(I) || 3628 (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) { 3629 A.deleteAfterManifest(*I); 3630 return ChangeStatus::CHANGED; 3631 } 3632 } 3633 return ChangeStatus::UNCHANGED; 3634 } 3635 3636 /// See AbstractAttribute::trackStatistics() 3637 void trackStatistics() const override { 3638 STATS_DECLTRACK_FLOATING_ATTR(IsDead) 3639 } 3640 }; 3641 3642 struct AAIsDeadArgument : public AAIsDeadFloating { 3643 AAIsDeadArgument(const IRPosition &IRP, Attributor &A) 3644 : AAIsDeadFloating(IRP, A) {} 3645 3646 /// See AbstractAttribute::initialize(...). 3647 void initialize(Attributor &A) override { 3648 AAIsDeadFloating::initialize(A); 3649 if (!A.isFunctionIPOAmendable(*getAnchorScope())) 3650 indicatePessimisticFixpoint(); 3651 } 3652 3653 /// See AbstractAttribute::manifest(...). 3654 ChangeStatus manifest(Attributor &A) override { 3655 Argument &Arg = *getAssociatedArgument(); 3656 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) 3657 if (A.registerFunctionSignatureRewrite( 3658 Arg, /* ReplacementTypes */ {}, 3659 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, 3660 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { 3661 return ChangeStatus::CHANGED; 3662 } 3663 return ChangeStatus::UNCHANGED; 3664 } 3665 3666 /// See AbstractAttribute::trackStatistics() 3667 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) } 3668 }; 3669 3670 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { 3671 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) 3672 : AAIsDeadValueImpl(IRP, A) {} 3673 3674 /// See AbstractAttribute::initialize(...). 3675 void initialize(Attributor &A) override { 3676 AAIsDeadValueImpl::initialize(A); 3677 if (isa<UndefValue>(getAssociatedValue())) 3678 indicatePessimisticFixpoint(); 3679 } 3680 3681 /// See AbstractAttribute::updateImpl(...). 3682 ChangeStatus updateImpl(Attributor &A) override { 3683 // TODO: Once we have call site specific value information we can provide 3684 // call site specific liveness information and then it makes 3685 // sense to specialize attributes for call sites arguments instead of 3686 // redirecting requests to the callee argument. 3687 Argument *Arg = getAssociatedArgument(); 3688 if (!Arg) 3689 return indicatePessimisticFixpoint(); 3690 const IRPosition &ArgPos = IRPosition::argument(*Arg); 3691 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED); 3692 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 3693 } 3694 3695 /// See AbstractAttribute::manifest(...). 3696 ChangeStatus manifest(Attributor &A) override { 3697 CallBase &CB = cast<CallBase>(getAnchorValue()); 3698 Use &U = CB.getArgOperandUse(getCallSiteArgNo()); 3699 assert(!isa<UndefValue>(U.get()) && 3700 "Expected undef values to be filtered out!"); 3701 UndefValue &UV = *UndefValue::get(U->getType()); 3702 if (A.changeUseAfterManifest(U, UV)) 3703 return ChangeStatus::CHANGED; 3704 return ChangeStatus::UNCHANGED; 3705 } 3706 3707 /// See AbstractAttribute::trackStatistics() 3708 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) } 3709 }; 3710 3711 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { 3712 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) 3713 : AAIsDeadFloating(IRP, A) {} 3714 3715 /// See AAIsDead::isAssumedDead(). 3716 bool isAssumedDead() const override { 3717 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; 3718 } 3719 3720 /// See AbstractAttribute::initialize(...). 3721 void initialize(Attributor &A) override { 3722 AAIsDeadFloating::initialize(A); 3723 if (isa<UndefValue>(getAssociatedValue())) { 3724 indicatePessimisticFixpoint(); 3725 return; 3726 } 3727 3728 // We track this separately as a secondary state. 3729 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); 3730 } 3731 3732 /// See AbstractAttribute::updateImpl(...). 3733 ChangeStatus updateImpl(Attributor &A) override { 3734 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3735 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { 3736 IsAssumedSideEffectFree = false; 3737 Changed = ChangeStatus::CHANGED; 3738 } 3739 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3740 return indicatePessimisticFixpoint(); 3741 return Changed; 3742 } 3743 3744 /// See AbstractAttribute::trackStatistics() 3745 void trackStatistics() const override { 3746 if (IsAssumedSideEffectFree) 3747 STATS_DECLTRACK_CSRET_ATTR(IsDead) 3748 else 3749 STATS_DECLTRACK_CSRET_ATTR(UnusedResult) 3750 } 3751 3752 /// See AbstractAttribute::getAsStr(). 3753 const std::string getAsStr() const override { 3754 return isAssumedDead() 3755 ? "assumed-dead" 3756 : (getAssumed() ? "assumed-dead-users" : "assumed-live"); 3757 } 3758 3759 private: 3760 bool IsAssumedSideEffectFree = true; 3761 }; 3762 3763 struct AAIsDeadReturned : public AAIsDeadValueImpl { 3764 AAIsDeadReturned(const IRPosition &IRP, Attributor &A) 3765 : AAIsDeadValueImpl(IRP, A) {} 3766 3767 /// See AbstractAttribute::updateImpl(...). 3768 ChangeStatus updateImpl(Attributor &A) override { 3769 3770 bool UsedAssumedInformation = false; 3771 A.checkForAllInstructions([](Instruction &) { return true; }, *this, 3772 {Instruction::Ret}, UsedAssumedInformation); 3773 3774 auto PredForCallSite = [&](AbstractCallSite ACS) { 3775 if (ACS.isCallbackCall() || !ACS.getInstruction()) 3776 return false; 3777 return areAllUsesAssumedDead(A, *ACS.getInstruction()); 3778 }; 3779 3780 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 3781 UsedAssumedInformation)) 3782 return indicatePessimisticFixpoint(); 3783 3784 return ChangeStatus::UNCHANGED; 3785 } 3786 3787 /// See AbstractAttribute::manifest(...). 3788 ChangeStatus manifest(Attributor &A) override { 3789 // TODO: Rewrite the signature to return void? 3790 bool AnyChange = false; 3791 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); 3792 auto RetInstPred = [&](Instruction &I) { 3793 ReturnInst &RI = cast<ReturnInst>(I); 3794 if (!isa<UndefValue>(RI.getReturnValue())) 3795 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); 3796 return true; 3797 }; 3798 bool UsedAssumedInformation = false; 3799 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}, 3800 UsedAssumedInformation); 3801 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3802 } 3803 3804 /// See AbstractAttribute::trackStatistics() 3805 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) } 3806 }; 3807 3808 struct AAIsDeadFunction : public AAIsDead { 3809 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3810 3811 /// See AbstractAttribute::initialize(...). 3812 void initialize(Attributor &A) override { 3813 Function *F = getAnchorScope(); 3814 if (!F || F->isDeclaration() || !A.isRunOn(*F)) { 3815 indicatePessimisticFixpoint(); 3816 return; 3817 } 3818 ToBeExploredFrom.insert(&F->getEntryBlock().front()); 3819 assumeLive(A, F->getEntryBlock()); 3820 } 3821 3822 /// See AbstractAttribute::getAsStr(). 3823 const std::string getAsStr() const override { 3824 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + 3825 std::to_string(getAnchorScope()->size()) + "][#TBEP " + 3826 std::to_string(ToBeExploredFrom.size()) + "][#KDE " + 3827 std::to_string(KnownDeadEnds.size()) + "]"; 3828 } 3829 3830 /// See AbstractAttribute::manifest(...). 3831 ChangeStatus manifest(Attributor &A) override { 3832 assert(getState().isValidState() && 3833 "Attempted to manifest an invalid state!"); 3834 3835 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 3836 Function &F = *getAnchorScope(); 3837 3838 if (AssumedLiveBlocks.empty()) { 3839 A.deleteAfterManifest(F); 3840 return ChangeStatus::CHANGED; 3841 } 3842 3843 // Flag to determine if we can change an invoke to a call assuming the 3844 // callee is nounwind. This is not possible if the personality of the 3845 // function allows to catch asynchronous exceptions. 3846 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 3847 3848 KnownDeadEnds.set_union(ToBeExploredFrom); 3849 for (const Instruction *DeadEndI : KnownDeadEnds) { 3850 auto *CB = dyn_cast<CallBase>(DeadEndI); 3851 if (!CB) 3852 continue; 3853 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3854 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 3855 bool MayReturn = !NoReturnAA.isAssumedNoReturn(); 3856 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) 3857 continue; 3858 3859 if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) 3860 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); 3861 else 3862 A.changeToUnreachableAfterManifest( 3863 const_cast<Instruction *>(DeadEndI->getNextNode())); 3864 HasChanged = ChangeStatus::CHANGED; 3865 } 3866 3867 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted."); 3868 for (BasicBlock &BB : F) 3869 if (!AssumedLiveBlocks.count(&BB)) { 3870 A.deleteAfterManifest(BB); 3871 ++BUILD_STAT_NAME(AAIsDead, BasicBlock); 3872 HasChanged = ChangeStatus::CHANGED; 3873 } 3874 3875 return HasChanged; 3876 } 3877 3878 /// See AbstractAttribute::updateImpl(...). 3879 ChangeStatus updateImpl(Attributor &A) override; 3880 3881 bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override { 3882 assert(From->getParent() == getAnchorScope() && 3883 To->getParent() == getAnchorScope() && 3884 "Used AAIsDead of the wrong function"); 3885 return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To)); 3886 } 3887 3888 /// See AbstractAttribute::trackStatistics() 3889 void trackStatistics() const override {} 3890 3891 /// Returns true if the function is assumed dead. 3892 bool isAssumedDead() const override { return false; } 3893 3894 /// See AAIsDead::isKnownDead(). 3895 bool isKnownDead() const override { return false; } 3896 3897 /// See AAIsDead::isAssumedDead(BasicBlock *). 3898 bool isAssumedDead(const BasicBlock *BB) const override { 3899 assert(BB->getParent() == getAnchorScope() && 3900 "BB must be in the same anchor scope function."); 3901 3902 if (!getAssumed()) 3903 return false; 3904 return !AssumedLiveBlocks.count(BB); 3905 } 3906 3907 /// See AAIsDead::isKnownDead(BasicBlock *). 3908 bool isKnownDead(const BasicBlock *BB) const override { 3909 return getKnown() && isAssumedDead(BB); 3910 } 3911 3912 /// See AAIsDead::isAssumed(Instruction *I). 3913 bool isAssumedDead(const Instruction *I) const override { 3914 assert(I->getParent()->getParent() == getAnchorScope() && 3915 "Instruction must be in the same anchor scope function."); 3916 3917 if (!getAssumed()) 3918 return false; 3919 3920 // If it is not in AssumedLiveBlocks then it for sure dead. 3921 // Otherwise, it can still be after noreturn call in a live block. 3922 if (!AssumedLiveBlocks.count(I->getParent())) 3923 return true; 3924 3925 // If it is not after a liveness barrier it is live. 3926 const Instruction *PrevI = I->getPrevNode(); 3927 while (PrevI) { 3928 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) 3929 return true; 3930 PrevI = PrevI->getPrevNode(); 3931 } 3932 return false; 3933 } 3934 3935 /// See AAIsDead::isKnownDead(Instruction *I). 3936 bool isKnownDead(const Instruction *I) const override { 3937 return getKnown() && isAssumedDead(I); 3938 } 3939 3940 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A 3941 /// that internal function called from \p BB should now be looked at. 3942 bool assumeLive(Attributor &A, const BasicBlock &BB) { 3943 if (!AssumedLiveBlocks.insert(&BB).second) 3944 return false; 3945 3946 // We assume that all of BB is (probably) live now and if there are calls to 3947 // internal functions we will assume that those are now live as well. This 3948 // is a performance optimization for blocks with calls to a lot of internal 3949 // functions. It can however cause dead functions to be treated as live. 3950 for (const Instruction &I : BB) 3951 if (const auto *CB = dyn_cast<CallBase>(&I)) 3952 if (const Function *F = CB->getCalledFunction()) 3953 if (F->hasLocalLinkage()) 3954 A.markLiveInternalFunction(*F); 3955 return true; 3956 } 3957 3958 /// Collection of instructions that need to be explored again, e.g., we 3959 /// did assume they do not transfer control to (one of their) successors. 3960 SmallSetVector<const Instruction *, 8> ToBeExploredFrom; 3961 3962 /// Collection of instructions that are known to not transfer control. 3963 SmallSetVector<const Instruction *, 8> KnownDeadEnds; 3964 3965 /// Collection of all assumed live edges 3966 DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges; 3967 3968 /// Collection of all assumed live BasicBlocks. 3969 DenseSet<const BasicBlock *> AssumedLiveBlocks; 3970 }; 3971 3972 static bool 3973 identifyAliveSuccessors(Attributor &A, const CallBase &CB, 3974 AbstractAttribute &AA, 3975 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3976 const IRPosition &IPos = IRPosition::callsite_function(CB); 3977 3978 const auto &NoReturnAA = 3979 A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL); 3980 if (NoReturnAA.isAssumedNoReturn()) 3981 return !NoReturnAA.isKnownNoReturn(); 3982 if (CB.isTerminator()) 3983 AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); 3984 else 3985 AliveSuccessors.push_back(CB.getNextNode()); 3986 return false; 3987 } 3988 3989 static bool 3990 identifyAliveSuccessors(Attributor &A, const InvokeInst &II, 3991 AbstractAttribute &AA, 3992 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3993 bool UsedAssumedInformation = 3994 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); 3995 3996 // First, determine if we can change an invoke to a call assuming the 3997 // callee is nounwind. This is not possible if the personality of the 3998 // function allows to catch asynchronous exceptions. 3999 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { 4000 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 4001 } else { 4002 const IRPosition &IPos = IRPosition::callsite_function(II); 4003 const auto &AANoUnw = 4004 A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL); 4005 if (AANoUnw.isAssumedNoUnwind()) { 4006 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); 4007 } else { 4008 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 4009 } 4010 } 4011 return UsedAssumedInformation; 4012 } 4013 4014 static bool 4015 identifyAliveSuccessors(Attributor &A, const BranchInst &BI, 4016 AbstractAttribute &AA, 4017 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 4018 bool UsedAssumedInformation = false; 4019 if (BI.getNumSuccessors() == 1) { 4020 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 4021 } else { 4022 Optional<Constant *> C = 4023 A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation); 4024 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) { 4025 // No value yet, assume both edges are dead. 4026 } else if (isa_and_nonnull<ConstantInt>(*C)) { 4027 const BasicBlock *SuccBB = 4028 BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue()); 4029 AliveSuccessors.push_back(&SuccBB->front()); 4030 } else { 4031 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 4032 AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); 4033 UsedAssumedInformation = false; 4034 } 4035 } 4036 return UsedAssumedInformation; 4037 } 4038 4039 static bool 4040 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, 4041 AbstractAttribute &AA, 4042 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 4043 bool UsedAssumedInformation = false; 4044 Optional<Constant *> C = 4045 A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation); 4046 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) { 4047 // No value yet, assume all edges are dead. 4048 } else if (isa_and_nonnull<ConstantInt>(C.getValue())) { 4049 for (auto &CaseIt : SI.cases()) { 4050 if (CaseIt.getCaseValue() == C.getValue()) { 4051 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); 4052 return UsedAssumedInformation; 4053 } 4054 } 4055 AliveSuccessors.push_back(&SI.getDefaultDest()->front()); 4056 return UsedAssumedInformation; 4057 } else { 4058 for (const BasicBlock *SuccBB : successors(SI.getParent())) 4059 AliveSuccessors.push_back(&SuccBB->front()); 4060 } 4061 return UsedAssumedInformation; 4062 } 4063 4064 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { 4065 ChangeStatus Change = ChangeStatus::UNCHANGED; 4066 4067 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" 4068 << getAnchorScope()->size() << "] BBs and " 4069 << ToBeExploredFrom.size() << " exploration points and " 4070 << KnownDeadEnds.size() << " known dead ends\n"); 4071 4072 // Copy and clear the list of instructions we need to explore from. It is 4073 // refilled with instructions the next update has to look at. 4074 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), 4075 ToBeExploredFrom.end()); 4076 decltype(ToBeExploredFrom) NewToBeExploredFrom; 4077 4078 SmallVector<const Instruction *, 8> AliveSuccessors; 4079 while (!Worklist.empty()) { 4080 const Instruction *I = Worklist.pop_back_val(); 4081 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"); 4082 4083 // Fast forward for uninteresting instructions. We could look for UB here 4084 // though. 4085 while (!I->isTerminator() && !isa<CallBase>(I)) 4086 I = I->getNextNode(); 4087 4088 AliveSuccessors.clear(); 4089 4090 bool UsedAssumedInformation = false; 4091 switch (I->getOpcode()) { 4092 // TODO: look for (assumed) UB to backwards propagate "deadness". 4093 default: 4094 assert(I->isTerminator() && 4095 "Expected non-terminators to be handled already!"); 4096 for (const BasicBlock *SuccBB : successors(I->getParent())) 4097 AliveSuccessors.push_back(&SuccBB->front()); 4098 break; 4099 case Instruction::Call: 4100 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), 4101 *this, AliveSuccessors); 4102 break; 4103 case Instruction::Invoke: 4104 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), 4105 *this, AliveSuccessors); 4106 break; 4107 case Instruction::Br: 4108 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), 4109 *this, AliveSuccessors); 4110 break; 4111 case Instruction::Switch: 4112 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), 4113 *this, AliveSuccessors); 4114 break; 4115 } 4116 4117 if (UsedAssumedInformation) { 4118 NewToBeExploredFrom.insert(I); 4119 } else if (AliveSuccessors.empty() || 4120 (I->isTerminator() && 4121 AliveSuccessors.size() < I->getNumSuccessors())) { 4122 if (KnownDeadEnds.insert(I)) 4123 Change = ChangeStatus::CHANGED; 4124 } 4125 4126 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: " 4127 << AliveSuccessors.size() << " UsedAssumedInformation: " 4128 << UsedAssumedInformation << "\n"); 4129 4130 for (const Instruction *AliveSuccessor : AliveSuccessors) { 4131 if (!I->isTerminator()) { 4132 assert(AliveSuccessors.size() == 1 && 4133 "Non-terminator expected to have a single successor!"); 4134 Worklist.push_back(AliveSuccessor); 4135 } else { 4136 // record the assumed live edge 4137 auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent()); 4138 if (AssumedLiveEdges.insert(Edge).second) 4139 Change = ChangeStatus::CHANGED; 4140 if (assumeLive(A, *AliveSuccessor->getParent())) 4141 Worklist.push_back(AliveSuccessor); 4142 } 4143 } 4144 } 4145 4146 // Check if the content of ToBeExploredFrom changed, ignore the order. 4147 if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() || 4148 llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) { 4149 return !ToBeExploredFrom.count(I); 4150 })) { 4151 Change = ChangeStatus::CHANGED; 4152 ToBeExploredFrom = std::move(NewToBeExploredFrom); 4153 } 4154 4155 // If we know everything is live there is no need to query for liveness. 4156 // Instead, indicating a pessimistic fixpoint will cause the state to be 4157 // "invalid" and all queries to be answered conservatively without lookups. 4158 // To be in this state we have to (1) finished the exploration and (3) not 4159 // discovered any non-trivial dead end and (2) not ruled unreachable code 4160 // dead. 4161 if (ToBeExploredFrom.empty() && 4162 getAnchorScope()->size() == AssumedLiveBlocks.size() && 4163 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { 4164 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; 4165 })) 4166 return indicatePessimisticFixpoint(); 4167 return Change; 4168 } 4169 4170 /// Liveness information for a call sites. 4171 struct AAIsDeadCallSite final : AAIsDeadFunction { 4172 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) 4173 : AAIsDeadFunction(IRP, A) {} 4174 4175 /// See AbstractAttribute::initialize(...). 4176 void initialize(Attributor &A) override { 4177 // TODO: Once we have call site specific value information we can provide 4178 // call site specific liveness information and then it makes 4179 // sense to specialize attributes for call sites instead of 4180 // redirecting requests to the callee. 4181 llvm_unreachable("Abstract attributes for liveness are not " 4182 "supported for call sites yet!"); 4183 } 4184 4185 /// See AbstractAttribute::updateImpl(...). 4186 ChangeStatus updateImpl(Attributor &A) override { 4187 return indicatePessimisticFixpoint(); 4188 } 4189 4190 /// See AbstractAttribute::trackStatistics() 4191 void trackStatistics() const override {} 4192 }; 4193 } // namespace 4194 4195 /// -------------------- Dereferenceable Argument Attribute -------------------- 4196 4197 namespace { 4198 struct AADereferenceableImpl : AADereferenceable { 4199 AADereferenceableImpl(const IRPosition &IRP, Attributor &A) 4200 : AADereferenceable(IRP, A) {} 4201 using StateType = DerefState; 4202 4203 /// See AbstractAttribute::initialize(...). 4204 void initialize(Attributor &A) override { 4205 SmallVector<Attribute, 4> Attrs; 4206 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, 4207 Attrs, /* IgnoreSubsumingPositions */ false, &A); 4208 for (const Attribute &Attr : Attrs) 4209 takeKnownDerefBytesMaximum(Attr.getValueAsInt()); 4210 4211 const IRPosition &IRP = this->getIRPosition(); 4212 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE); 4213 4214 bool CanBeNull, CanBeFreed; 4215 takeKnownDerefBytesMaximum( 4216 IRP.getAssociatedValue().getPointerDereferenceableBytes( 4217 A.getDataLayout(), CanBeNull, CanBeFreed)); 4218 4219 bool IsFnInterface = IRP.isFnInterfaceKind(); 4220 Function *FnScope = IRP.getAnchorScope(); 4221 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { 4222 indicatePessimisticFixpoint(); 4223 return; 4224 } 4225 4226 if (Instruction *CtxI = getCtxI()) 4227 followUsesInMBEC(*this, A, getState(), *CtxI); 4228 } 4229 4230 /// See AbstractAttribute::getState() 4231 /// { 4232 StateType &getState() override { return *this; } 4233 const StateType &getState() const override { return *this; } 4234 /// } 4235 4236 /// Helper function for collecting accessed bytes in must-be-executed-context 4237 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, 4238 DerefState &State) { 4239 const Value *UseV = U->get(); 4240 if (!UseV->getType()->isPointerTy()) 4241 return; 4242 4243 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); 4244 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) 4245 return; 4246 4247 int64_t Offset; 4248 const Value *Base = GetPointerBaseWithConstantOffset( 4249 Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true); 4250 if (Base && Base == &getAssociatedValue()) 4251 State.addAccessedBytes(Offset, Loc->Size.getValue()); 4252 } 4253 4254 /// See followUsesInMBEC 4255 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4256 AADereferenceable::StateType &State) { 4257 bool IsNonNull = false; 4258 bool TrackUse = false; 4259 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( 4260 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); 4261 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes 4262 << " for instruction " << *I << "\n"); 4263 4264 addAccessedBytesForUse(A, U, I, State); 4265 State.takeKnownDerefBytesMaximum(DerefBytes); 4266 return TrackUse; 4267 } 4268 4269 /// See AbstractAttribute::manifest(...). 4270 ChangeStatus manifest(Attributor &A) override { 4271 ChangeStatus Change = AADereferenceable::manifest(A); 4272 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { 4273 removeAttrs({Attribute::DereferenceableOrNull}); 4274 return ChangeStatus::CHANGED; 4275 } 4276 return Change; 4277 } 4278 4279 void getDeducedAttributes(LLVMContext &Ctx, 4280 SmallVectorImpl<Attribute> &Attrs) const override { 4281 // TODO: Add *_globally support 4282 if (isAssumedNonNull()) 4283 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 4284 Ctx, getAssumedDereferenceableBytes())); 4285 else 4286 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 4287 Ctx, getAssumedDereferenceableBytes())); 4288 } 4289 4290 /// See AbstractAttribute::getAsStr(). 4291 const std::string getAsStr() const override { 4292 if (!getAssumedDereferenceableBytes()) 4293 return "unknown-dereferenceable"; 4294 return std::string("dereferenceable") + 4295 (isAssumedNonNull() ? "" : "_or_null") + 4296 (isAssumedGlobal() ? "_globally" : "") + "<" + 4297 std::to_string(getKnownDereferenceableBytes()) + "-" + 4298 std::to_string(getAssumedDereferenceableBytes()) + ">"; 4299 } 4300 }; 4301 4302 /// Dereferenceable attribute for a floating value. 4303 struct AADereferenceableFloating : AADereferenceableImpl { 4304 AADereferenceableFloating(const IRPosition &IRP, Attributor &A) 4305 : AADereferenceableImpl(IRP, A) {} 4306 4307 /// See AbstractAttribute::updateImpl(...). 4308 ChangeStatus updateImpl(Attributor &A) override { 4309 const DataLayout &DL = A.getDataLayout(); 4310 4311 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T, 4312 bool Stripped) -> bool { 4313 unsigned IdxWidth = 4314 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 4315 APInt Offset(IdxWidth, 0); 4316 const Value *Base = stripAndAccumulateOffsets( 4317 A, *this, &V, DL, Offset, /* GetMinOffset */ false, 4318 /* AllowNonInbounds */ true); 4319 4320 const auto &AA = A.getAAFor<AADereferenceable>( 4321 *this, IRPosition::value(*Base), DepClassTy::REQUIRED); 4322 int64_t DerefBytes = 0; 4323 if (!Stripped && this == &AA) { 4324 // Use IR information if we did not strip anything. 4325 // TODO: track globally. 4326 bool CanBeNull, CanBeFreed; 4327 DerefBytes = 4328 Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); 4329 T.GlobalState.indicatePessimisticFixpoint(); 4330 } else { 4331 const DerefState &DS = AA.getState(); 4332 DerefBytes = DS.DerefBytesState.getAssumed(); 4333 T.GlobalState &= DS.GlobalState; 4334 } 4335 4336 // For now we do not try to "increase" dereferenceability due to negative 4337 // indices as we first have to come up with code to deal with loops and 4338 // for overflows of the dereferenceable bytes. 4339 int64_t OffsetSExt = Offset.getSExtValue(); 4340 if (OffsetSExt < 0) 4341 OffsetSExt = 0; 4342 4343 T.takeAssumedDerefBytesMinimum( 4344 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4345 4346 if (this == &AA) { 4347 if (!Stripped) { 4348 // If nothing was stripped IR information is all we got. 4349 T.takeKnownDerefBytesMaximum( 4350 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4351 T.indicatePessimisticFixpoint(); 4352 } else if (OffsetSExt > 0) { 4353 // If something was stripped but there is circular reasoning we look 4354 // for the offset. If it is positive we basically decrease the 4355 // dereferenceable bytes in a circluar loop now, which will simply 4356 // drive them down to the known value in a very slow way which we 4357 // can accelerate. 4358 T.indicatePessimisticFixpoint(); 4359 } 4360 } 4361 4362 return T.isValidState(); 4363 }; 4364 4365 DerefState T; 4366 bool UsedAssumedInformation = false; 4367 if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T, 4368 VisitValueCB, getCtxI(), 4369 UsedAssumedInformation)) 4370 return indicatePessimisticFixpoint(); 4371 4372 return clampStateAndIndicateChange(getState(), T); 4373 } 4374 4375 /// See AbstractAttribute::trackStatistics() 4376 void trackStatistics() const override { 4377 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) 4378 } 4379 }; 4380 4381 /// Dereferenceable attribute for a return value. 4382 struct AADereferenceableReturned final 4383 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { 4384 AADereferenceableReturned(const IRPosition &IRP, Attributor &A) 4385 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( 4386 IRP, A) {} 4387 4388 /// See AbstractAttribute::trackStatistics() 4389 void trackStatistics() const override { 4390 STATS_DECLTRACK_FNRET_ATTR(dereferenceable) 4391 } 4392 }; 4393 4394 /// Dereferenceable attribute for an argument 4395 struct AADereferenceableArgument final 4396 : AAArgumentFromCallSiteArguments<AADereferenceable, 4397 AADereferenceableImpl> { 4398 using Base = 4399 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; 4400 AADereferenceableArgument(const IRPosition &IRP, Attributor &A) 4401 : Base(IRP, A) {} 4402 4403 /// See AbstractAttribute::trackStatistics() 4404 void trackStatistics() const override { 4405 STATS_DECLTRACK_ARG_ATTR(dereferenceable) 4406 } 4407 }; 4408 4409 /// Dereferenceable attribute for a call site argument. 4410 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { 4411 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) 4412 : AADereferenceableFloating(IRP, A) {} 4413 4414 /// See AbstractAttribute::trackStatistics() 4415 void trackStatistics() const override { 4416 STATS_DECLTRACK_CSARG_ATTR(dereferenceable) 4417 } 4418 }; 4419 4420 /// Dereferenceable attribute deduction for a call site return value. 4421 struct AADereferenceableCallSiteReturned final 4422 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { 4423 using Base = 4424 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; 4425 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) 4426 : Base(IRP, A) {} 4427 4428 /// See AbstractAttribute::trackStatistics() 4429 void trackStatistics() const override { 4430 STATS_DECLTRACK_CS_ATTR(dereferenceable); 4431 } 4432 }; 4433 } // namespace 4434 4435 // ------------------------ Align Argument Attribute ------------------------ 4436 4437 namespace { 4438 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA, 4439 Value &AssociatedValue, const Use *U, 4440 const Instruction *I, bool &TrackUse) { 4441 // We need to follow common pointer manipulation uses to the accesses they 4442 // feed into. 4443 if (isa<CastInst>(I)) { 4444 // Follow all but ptr2int casts. 4445 TrackUse = !isa<PtrToIntInst>(I); 4446 return 0; 4447 } 4448 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 4449 if (GEP->hasAllConstantIndices()) 4450 TrackUse = true; 4451 return 0; 4452 } 4453 4454 MaybeAlign MA; 4455 if (const auto *CB = dyn_cast<CallBase>(I)) { 4456 if (CB->isBundleOperand(U) || CB->isCallee(U)) 4457 return 0; 4458 4459 unsigned ArgNo = CB->getArgOperandNo(U); 4460 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 4461 // As long as we only use known information there is no need to track 4462 // dependences here. 4463 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE); 4464 MA = MaybeAlign(AlignAA.getKnownAlign()); 4465 } 4466 4467 const DataLayout &DL = A.getDataLayout(); 4468 const Value *UseV = U->get(); 4469 if (auto *SI = dyn_cast<StoreInst>(I)) { 4470 if (SI->getPointerOperand() == UseV) 4471 MA = SI->getAlign(); 4472 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 4473 if (LI->getPointerOperand() == UseV) 4474 MA = LI->getAlign(); 4475 } 4476 4477 if (!MA || *MA <= QueryingAA.getKnownAlign()) 4478 return 0; 4479 4480 unsigned Alignment = MA->value(); 4481 int64_t Offset; 4482 4483 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { 4484 if (Base == &AssociatedValue) { 4485 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4486 // So we can say that the maximum power of two which is a divisor of 4487 // gcd(Offset, Alignment) is an alignment. 4488 4489 uint32_t gcd = 4490 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment); 4491 Alignment = llvm::PowerOf2Floor(gcd); 4492 } 4493 } 4494 4495 return Alignment; 4496 } 4497 4498 struct AAAlignImpl : AAAlign { 4499 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} 4500 4501 /// See AbstractAttribute::initialize(...). 4502 void initialize(Attributor &A) override { 4503 SmallVector<Attribute, 4> Attrs; 4504 getAttrs({Attribute::Alignment}, Attrs); 4505 for (const Attribute &Attr : Attrs) 4506 takeKnownMaximum(Attr.getValueAsInt()); 4507 4508 Value &V = getAssociatedValue(); 4509 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value()); 4510 4511 if (getIRPosition().isFnInterfaceKind() && 4512 (!getAnchorScope() || 4513 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { 4514 indicatePessimisticFixpoint(); 4515 return; 4516 } 4517 4518 if (Instruction *CtxI = getCtxI()) 4519 followUsesInMBEC(*this, A, getState(), *CtxI); 4520 } 4521 4522 /// See AbstractAttribute::manifest(...). 4523 ChangeStatus manifest(Attributor &A) override { 4524 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; 4525 4526 // Check for users that allow alignment annotations. 4527 Value &AssociatedValue = getAssociatedValue(); 4528 for (const Use &U : AssociatedValue.uses()) { 4529 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { 4530 if (SI->getPointerOperand() == &AssociatedValue) 4531 if (SI->getAlignment() < getAssumedAlign()) { 4532 STATS_DECLTRACK(AAAlign, Store, 4533 "Number of times alignment added to a store"); 4534 SI->setAlignment(Align(getAssumedAlign())); 4535 LoadStoreChanged = ChangeStatus::CHANGED; 4536 } 4537 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { 4538 if (LI->getPointerOperand() == &AssociatedValue) 4539 if (LI->getAlignment() < getAssumedAlign()) { 4540 LI->setAlignment(Align(getAssumedAlign())); 4541 STATS_DECLTRACK(AAAlign, Load, 4542 "Number of times alignment added to a load"); 4543 LoadStoreChanged = ChangeStatus::CHANGED; 4544 } 4545 } 4546 } 4547 4548 ChangeStatus Changed = AAAlign::manifest(A); 4549 4550 Align InheritAlign = 4551 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4552 if (InheritAlign >= getAssumedAlign()) 4553 return LoadStoreChanged; 4554 return Changed | LoadStoreChanged; 4555 } 4556 4557 // TODO: Provide a helper to determine the implied ABI alignment and check in 4558 // the existing manifest method and a new one for AAAlignImpl that value 4559 // to avoid making the alignment explicit if it did not improve. 4560 4561 /// See AbstractAttribute::getDeducedAttributes 4562 virtual void 4563 getDeducedAttributes(LLVMContext &Ctx, 4564 SmallVectorImpl<Attribute> &Attrs) const override { 4565 if (getAssumedAlign() > 1) 4566 Attrs.emplace_back( 4567 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); 4568 } 4569 4570 /// See followUsesInMBEC 4571 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4572 AAAlign::StateType &State) { 4573 bool TrackUse = false; 4574 4575 unsigned int KnownAlign = 4576 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); 4577 State.takeKnownMaximum(KnownAlign); 4578 4579 return TrackUse; 4580 } 4581 4582 /// See AbstractAttribute::getAsStr(). 4583 const std::string getAsStr() const override { 4584 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + 4585 "-" + std::to_string(getAssumedAlign()) + ">") 4586 : "unknown-align"; 4587 } 4588 }; 4589 4590 /// Align attribute for a floating value. 4591 struct AAAlignFloating : AAAlignImpl { 4592 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} 4593 4594 /// See AbstractAttribute::updateImpl(...). 4595 ChangeStatus updateImpl(Attributor &A) override { 4596 const DataLayout &DL = A.getDataLayout(); 4597 4598 auto VisitValueCB = [&](Value &V, const Instruction *, 4599 AAAlign::StateType &T, bool Stripped) -> bool { 4600 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V)) 4601 return true; 4602 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V), 4603 DepClassTy::REQUIRED); 4604 if (!Stripped && this == &AA) { 4605 int64_t Offset; 4606 unsigned Alignment = 1; 4607 if (const Value *Base = 4608 GetPointerBaseWithConstantOffset(&V, Offset, DL)) { 4609 // TODO: Use AAAlign for the base too. 4610 Align PA = Base->getPointerAlignment(DL); 4611 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4612 // So we can say that the maximum power of two which is a divisor of 4613 // gcd(Offset, Alignment) is an alignment. 4614 4615 uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), 4616 uint32_t(PA.value())); 4617 Alignment = llvm::PowerOf2Floor(gcd); 4618 } else { 4619 Alignment = V.getPointerAlignment(DL).value(); 4620 } 4621 // Use only IR information if we did not strip anything. 4622 T.takeKnownMaximum(Alignment); 4623 T.indicatePessimisticFixpoint(); 4624 } else { 4625 // Use abstract attribute information. 4626 const AAAlign::StateType &DS = AA.getState(); 4627 T ^= DS; 4628 } 4629 return T.isValidState(); 4630 }; 4631 4632 StateType T; 4633 bool UsedAssumedInformation = false; 4634 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 4635 VisitValueCB, getCtxI(), 4636 UsedAssumedInformation)) 4637 return indicatePessimisticFixpoint(); 4638 4639 // TODO: If we know we visited all incoming values, thus no are assumed 4640 // dead, we can take the known information from the state T. 4641 return clampStateAndIndicateChange(getState(), T); 4642 } 4643 4644 /// See AbstractAttribute::trackStatistics() 4645 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } 4646 }; 4647 4648 /// Align attribute for function return value. 4649 struct AAAlignReturned final 4650 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { 4651 using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>; 4652 AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4653 4654 /// See AbstractAttribute::initialize(...). 4655 void initialize(Attributor &A) override { 4656 Base::initialize(A); 4657 Function *F = getAssociatedFunction(); 4658 if (!F || F->isDeclaration()) 4659 indicatePessimisticFixpoint(); 4660 } 4661 4662 /// See AbstractAttribute::trackStatistics() 4663 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } 4664 }; 4665 4666 /// Align attribute for function argument. 4667 struct AAAlignArgument final 4668 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { 4669 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; 4670 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4671 4672 /// See AbstractAttribute::manifest(...). 4673 ChangeStatus manifest(Attributor &A) override { 4674 // If the associated argument is involved in a must-tail call we give up 4675 // because we would need to keep the argument alignments of caller and 4676 // callee in-sync. Just does not seem worth the trouble right now. 4677 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) 4678 return ChangeStatus::UNCHANGED; 4679 return Base::manifest(A); 4680 } 4681 4682 /// See AbstractAttribute::trackStatistics() 4683 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } 4684 }; 4685 4686 struct AAAlignCallSiteArgument final : AAAlignFloating { 4687 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) 4688 : AAAlignFloating(IRP, A) {} 4689 4690 /// See AbstractAttribute::manifest(...). 4691 ChangeStatus manifest(Attributor &A) override { 4692 // If the associated argument is involved in a must-tail call we give up 4693 // because we would need to keep the argument alignments of caller and 4694 // callee in-sync. Just does not seem worth the trouble right now. 4695 if (Argument *Arg = getAssociatedArgument()) 4696 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) 4697 return ChangeStatus::UNCHANGED; 4698 ChangeStatus Changed = AAAlignImpl::manifest(A); 4699 Align InheritAlign = 4700 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4701 if (InheritAlign >= getAssumedAlign()) 4702 Changed = ChangeStatus::UNCHANGED; 4703 return Changed; 4704 } 4705 4706 /// See AbstractAttribute::updateImpl(Attributor &A). 4707 ChangeStatus updateImpl(Attributor &A) override { 4708 ChangeStatus Changed = AAAlignFloating::updateImpl(A); 4709 if (Argument *Arg = getAssociatedArgument()) { 4710 // We only take known information from the argument 4711 // so we do not need to track a dependence. 4712 const auto &ArgAlignAA = A.getAAFor<AAAlign>( 4713 *this, IRPosition::argument(*Arg), DepClassTy::NONE); 4714 takeKnownMaximum(ArgAlignAA.getKnownAlign()); 4715 } 4716 return Changed; 4717 } 4718 4719 /// See AbstractAttribute::trackStatistics() 4720 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } 4721 }; 4722 4723 /// Align attribute deduction for a call site return value. 4724 struct AAAlignCallSiteReturned final 4725 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { 4726 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; 4727 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) 4728 : Base(IRP, A) {} 4729 4730 /// See AbstractAttribute::initialize(...). 4731 void initialize(Attributor &A) override { 4732 Base::initialize(A); 4733 Function *F = getAssociatedFunction(); 4734 if (!F || F->isDeclaration()) 4735 indicatePessimisticFixpoint(); 4736 } 4737 4738 /// See AbstractAttribute::trackStatistics() 4739 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } 4740 }; 4741 } // namespace 4742 4743 /// ------------------ Function No-Return Attribute ---------------------------- 4744 namespace { 4745 struct AANoReturnImpl : public AANoReturn { 4746 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} 4747 4748 /// See AbstractAttribute::initialize(...). 4749 void initialize(Attributor &A) override { 4750 AANoReturn::initialize(A); 4751 Function *F = getAssociatedFunction(); 4752 if (!F || F->isDeclaration()) 4753 indicatePessimisticFixpoint(); 4754 } 4755 4756 /// See AbstractAttribute::getAsStr(). 4757 const std::string getAsStr() const override { 4758 return getAssumed() ? "noreturn" : "may-return"; 4759 } 4760 4761 /// See AbstractAttribute::updateImpl(Attributor &A). 4762 virtual ChangeStatus updateImpl(Attributor &A) override { 4763 auto CheckForNoReturn = [](Instruction &) { return false; }; 4764 bool UsedAssumedInformation = false; 4765 if (!A.checkForAllInstructions(CheckForNoReturn, *this, 4766 {(unsigned)Instruction::Ret}, 4767 UsedAssumedInformation)) 4768 return indicatePessimisticFixpoint(); 4769 return ChangeStatus::UNCHANGED; 4770 } 4771 }; 4772 4773 struct AANoReturnFunction final : AANoReturnImpl { 4774 AANoReturnFunction(const IRPosition &IRP, Attributor &A) 4775 : AANoReturnImpl(IRP, A) {} 4776 4777 /// See AbstractAttribute::trackStatistics() 4778 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } 4779 }; 4780 4781 /// NoReturn attribute deduction for a call sites. 4782 struct AANoReturnCallSite final : AANoReturnImpl { 4783 AANoReturnCallSite(const IRPosition &IRP, Attributor &A) 4784 : AANoReturnImpl(IRP, A) {} 4785 4786 /// See AbstractAttribute::initialize(...). 4787 void initialize(Attributor &A) override { 4788 AANoReturnImpl::initialize(A); 4789 if (Function *F = getAssociatedFunction()) { 4790 const IRPosition &FnPos = IRPosition::function(*F); 4791 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4792 if (!FnAA.isAssumedNoReturn()) 4793 indicatePessimisticFixpoint(); 4794 } 4795 } 4796 4797 /// See AbstractAttribute::updateImpl(...). 4798 ChangeStatus updateImpl(Attributor &A) override { 4799 // TODO: Once we have call site specific value information we can provide 4800 // call site specific liveness information and then it makes 4801 // sense to specialize attributes for call sites arguments instead of 4802 // redirecting requests to the callee argument. 4803 Function *F = getAssociatedFunction(); 4804 const IRPosition &FnPos = IRPosition::function(*F); 4805 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4806 return clampStateAndIndicateChange(getState(), FnAA.getState()); 4807 } 4808 4809 /// See AbstractAttribute::trackStatistics() 4810 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } 4811 }; 4812 } // namespace 4813 4814 /// ----------------------- Variable Capturing --------------------------------- 4815 4816 namespace { 4817 /// A class to hold the state of for no-capture attributes. 4818 struct AANoCaptureImpl : public AANoCapture { 4819 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} 4820 4821 /// See AbstractAttribute::initialize(...). 4822 void initialize(Attributor &A) override { 4823 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { 4824 indicateOptimisticFixpoint(); 4825 return; 4826 } 4827 Function *AnchorScope = getAnchorScope(); 4828 if (isFnInterfaceKind() && 4829 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { 4830 indicatePessimisticFixpoint(); 4831 return; 4832 } 4833 4834 // You cannot "capture" null in the default address space. 4835 if (isa<ConstantPointerNull>(getAssociatedValue()) && 4836 getAssociatedValue().getType()->getPointerAddressSpace() == 0) { 4837 indicateOptimisticFixpoint(); 4838 return; 4839 } 4840 4841 const Function *F = 4842 isArgumentPosition() ? getAssociatedFunction() : AnchorScope; 4843 4844 // Check what state the associated function can actually capture. 4845 if (F) 4846 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 4847 else 4848 indicatePessimisticFixpoint(); 4849 } 4850 4851 /// See AbstractAttribute::updateImpl(...). 4852 ChangeStatus updateImpl(Attributor &A) override; 4853 4854 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). 4855 virtual void 4856 getDeducedAttributes(LLVMContext &Ctx, 4857 SmallVectorImpl<Attribute> &Attrs) const override { 4858 if (!isAssumedNoCaptureMaybeReturned()) 4859 return; 4860 4861 if (isArgumentPosition()) { 4862 if (isAssumedNoCapture()) 4863 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); 4864 else if (ManifestInternal) 4865 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); 4866 } 4867 } 4868 4869 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known 4870 /// depending on the ability of the function associated with \p IRP to capture 4871 /// state in memory and through "returning/throwing", respectively. 4872 static void determineFunctionCaptureCapabilities(const IRPosition &IRP, 4873 const Function &F, 4874 BitIntegerState &State) { 4875 // TODO: Once we have memory behavior attributes we should use them here. 4876 4877 // If we know we cannot communicate or write to memory, we do not care about 4878 // ptr2int anymore. 4879 if (F.onlyReadsMemory() && F.doesNotThrow() && 4880 F.getReturnType()->isVoidTy()) { 4881 State.addKnownBits(NO_CAPTURE); 4882 return; 4883 } 4884 4885 // A function cannot capture state in memory if it only reads memory, it can 4886 // however return/throw state and the state might be influenced by the 4887 // pointer value, e.g., loading from a returned pointer might reveal a bit. 4888 if (F.onlyReadsMemory()) 4889 State.addKnownBits(NOT_CAPTURED_IN_MEM); 4890 4891 // A function cannot communicate state back if it does not through 4892 // exceptions and doesn not return values. 4893 if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) 4894 State.addKnownBits(NOT_CAPTURED_IN_RET); 4895 4896 // Check existing "returned" attributes. 4897 int ArgNo = IRP.getCalleeArgNo(); 4898 if (F.doesNotThrow() && ArgNo >= 0) { 4899 for (unsigned u = 0, e = F.arg_size(); u < e; ++u) 4900 if (F.hasParamAttribute(u, Attribute::Returned)) { 4901 if (u == unsigned(ArgNo)) 4902 State.removeAssumedBits(NOT_CAPTURED_IN_RET); 4903 else if (F.onlyReadsMemory()) 4904 State.addKnownBits(NO_CAPTURE); 4905 else 4906 State.addKnownBits(NOT_CAPTURED_IN_RET); 4907 break; 4908 } 4909 } 4910 } 4911 4912 /// See AbstractState::getAsStr(). 4913 const std::string getAsStr() const override { 4914 if (isKnownNoCapture()) 4915 return "known not-captured"; 4916 if (isAssumedNoCapture()) 4917 return "assumed not-captured"; 4918 if (isKnownNoCaptureMaybeReturned()) 4919 return "known not-captured-maybe-returned"; 4920 if (isAssumedNoCaptureMaybeReturned()) 4921 return "assumed not-captured-maybe-returned"; 4922 return "assumed-captured"; 4923 } 4924 4925 /// Check the use \p U and update \p State accordingly. Return true if we 4926 /// should continue to update the state. 4927 bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U, 4928 bool &Follow) { 4929 Instruction *UInst = cast<Instruction>(U.getUser()); 4930 LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in " 4931 << *UInst << "\n"); 4932 4933 // Deal with ptr2int by following uses. 4934 if (isa<PtrToIntInst>(UInst)) { 4935 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n"); 4936 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 4937 /* Return */ true); 4938 } 4939 4940 // For stores we already checked if we can follow them, if they make it 4941 // here we give up. 4942 if (isa<StoreInst>(UInst)) 4943 return isCapturedIn(State, /* Memory */ true, /* Integer */ false, 4944 /* Return */ false); 4945 4946 // Explicitly catch return instructions. 4947 if (isa<ReturnInst>(UInst)) { 4948 if (UInst->getFunction() == getAnchorScope()) 4949 return isCapturedIn(State, /* Memory */ false, /* Integer */ false, 4950 /* Return */ true); 4951 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 4952 /* Return */ true); 4953 } 4954 4955 // For now we only use special logic for call sites. However, the tracker 4956 // itself knows about a lot of other non-capturing cases already. 4957 auto *CB = dyn_cast<CallBase>(UInst); 4958 if (!CB || !CB->isArgOperand(&U)) 4959 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 4960 /* Return */ true); 4961 4962 unsigned ArgNo = CB->getArgOperandNo(&U); 4963 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); 4964 // If we have a abstract no-capture attribute for the argument we can use 4965 // it to justify a non-capture attribute here. This allows recursion! 4966 auto &ArgNoCaptureAA = 4967 A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED); 4968 if (ArgNoCaptureAA.isAssumedNoCapture()) 4969 return isCapturedIn(State, /* Memory */ false, /* Integer */ false, 4970 /* Return */ false); 4971 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 4972 Follow = true; 4973 return isCapturedIn(State, /* Memory */ false, /* Integer */ false, 4974 /* Return */ false); 4975 } 4976 4977 // Lastly, we could not find a reason no-capture can be assumed so we don't. 4978 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 4979 /* Return */ true); 4980 } 4981 4982 /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and 4983 /// \p CapturedInRet, then return true if we should continue updating the 4984 /// state. 4985 static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem, 4986 bool CapturedInInt, bool CapturedInRet) { 4987 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " 4988 << CapturedInInt << "|Ret " << CapturedInRet << "]\n"); 4989 if (CapturedInMem) 4990 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); 4991 if (CapturedInInt) 4992 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); 4993 if (CapturedInRet) 4994 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); 4995 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4996 } 4997 }; 4998 4999 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { 5000 const IRPosition &IRP = getIRPosition(); 5001 Value *V = isArgumentPosition() ? IRP.getAssociatedArgument() 5002 : &IRP.getAssociatedValue(); 5003 if (!V) 5004 return indicatePessimisticFixpoint(); 5005 5006 const Function *F = 5007 isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 5008 assert(F && "Expected a function!"); 5009 const IRPosition &FnPos = IRPosition::function(*F); 5010 5011 AANoCapture::StateType T; 5012 5013 // Readonly means we cannot capture through memory. 5014 bool IsKnown; 5015 if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) { 5016 T.addKnownBits(NOT_CAPTURED_IN_MEM); 5017 if (IsKnown) 5018 addKnownBits(NOT_CAPTURED_IN_MEM); 5019 } 5020 5021 // Make sure all returned values are different than the underlying value. 5022 // TODO: we could do this in a more sophisticated way inside 5023 // AAReturnedValues, e.g., track all values that escape through returns 5024 // directly somehow. 5025 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { 5026 bool SeenConstant = false; 5027 for (auto &It : RVAA.returned_values()) { 5028 if (isa<Constant>(It.first)) { 5029 if (SeenConstant) 5030 return false; 5031 SeenConstant = true; 5032 } else if (!isa<Argument>(It.first) || 5033 It.first == getAssociatedArgument()) 5034 return false; 5035 } 5036 return true; 5037 }; 5038 5039 const auto &NoUnwindAA = 5040 A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL); 5041 if (NoUnwindAA.isAssumedNoUnwind()) { 5042 bool IsVoidTy = F->getReturnType()->isVoidTy(); 5043 const AAReturnedValues *RVAA = 5044 IsVoidTy ? nullptr 5045 : &A.getAAFor<AAReturnedValues>(*this, FnPos, 5046 5047 DepClassTy::OPTIONAL); 5048 if (IsVoidTy || CheckReturnedArgs(*RVAA)) { 5049 T.addKnownBits(NOT_CAPTURED_IN_RET); 5050 if (T.isKnown(NOT_CAPTURED_IN_MEM)) 5051 return ChangeStatus::UNCHANGED; 5052 if (NoUnwindAA.isKnownNoUnwind() && 5053 (IsVoidTy || RVAA->getState().isAtFixpoint())) { 5054 addKnownBits(NOT_CAPTURED_IN_RET); 5055 if (isKnown(NOT_CAPTURED_IN_MEM)) 5056 return indicateOptimisticFixpoint(); 5057 } 5058 } 5059 } 5060 5061 auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) { 5062 const auto &DerefAA = A.getAAFor<AADereferenceable>( 5063 *this, IRPosition::value(*O), DepClassTy::OPTIONAL); 5064 return DerefAA.getAssumedDereferenceableBytes(); 5065 }; 5066 5067 auto UseCheck = [&](const Use &U, bool &Follow) -> bool { 5068 switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) { 5069 case UseCaptureKind::NO_CAPTURE: 5070 return true; 5071 case UseCaptureKind::MAY_CAPTURE: 5072 return checkUse(A, T, U, Follow); 5073 case UseCaptureKind::PASSTHROUGH: 5074 Follow = true; 5075 return true; 5076 } 5077 llvm_unreachable("Unexpected use capture kind!"); 5078 }; 5079 5080 if (!A.checkForAllUses(UseCheck, *this, *V)) 5081 return indicatePessimisticFixpoint(); 5082 5083 AANoCapture::StateType &S = getState(); 5084 auto Assumed = S.getAssumed(); 5085 S.intersectAssumedBits(T.getAssumed()); 5086 if (!isAssumedNoCaptureMaybeReturned()) 5087 return indicatePessimisticFixpoint(); 5088 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 5089 : ChangeStatus::CHANGED; 5090 } 5091 5092 /// NoCapture attribute for function arguments. 5093 struct AANoCaptureArgument final : AANoCaptureImpl { 5094 AANoCaptureArgument(const IRPosition &IRP, Attributor &A) 5095 : AANoCaptureImpl(IRP, A) {} 5096 5097 /// See AbstractAttribute::trackStatistics() 5098 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } 5099 }; 5100 5101 /// NoCapture attribute for call site arguments. 5102 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { 5103 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) 5104 : AANoCaptureImpl(IRP, A) {} 5105 5106 /// See AbstractAttribute::initialize(...). 5107 void initialize(Attributor &A) override { 5108 if (Argument *Arg = getAssociatedArgument()) 5109 if (Arg->hasByValAttr()) 5110 indicateOptimisticFixpoint(); 5111 AANoCaptureImpl::initialize(A); 5112 } 5113 5114 /// See AbstractAttribute::updateImpl(...). 5115 ChangeStatus updateImpl(Attributor &A) override { 5116 // TODO: Once we have call site specific value information we can provide 5117 // call site specific liveness information and then it makes 5118 // sense to specialize attributes for call sites arguments instead of 5119 // redirecting requests to the callee argument. 5120 Argument *Arg = getAssociatedArgument(); 5121 if (!Arg) 5122 return indicatePessimisticFixpoint(); 5123 const IRPosition &ArgPos = IRPosition::argument(*Arg); 5124 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED); 5125 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 5126 } 5127 5128 /// See AbstractAttribute::trackStatistics() 5129 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)}; 5130 }; 5131 5132 /// NoCapture attribute for floating values. 5133 struct AANoCaptureFloating final : AANoCaptureImpl { 5134 AANoCaptureFloating(const IRPosition &IRP, Attributor &A) 5135 : AANoCaptureImpl(IRP, A) {} 5136 5137 /// See AbstractAttribute::trackStatistics() 5138 void trackStatistics() const override { 5139 STATS_DECLTRACK_FLOATING_ATTR(nocapture) 5140 } 5141 }; 5142 5143 /// NoCapture attribute for function return value. 5144 struct AANoCaptureReturned final : AANoCaptureImpl { 5145 AANoCaptureReturned(const IRPosition &IRP, Attributor &A) 5146 : AANoCaptureImpl(IRP, A) { 5147 llvm_unreachable("NoCapture is not applicable to function returns!"); 5148 } 5149 5150 /// See AbstractAttribute::initialize(...). 5151 void initialize(Attributor &A) override { 5152 llvm_unreachable("NoCapture is not applicable to function returns!"); 5153 } 5154 5155 /// See AbstractAttribute::updateImpl(...). 5156 ChangeStatus updateImpl(Attributor &A) override { 5157 llvm_unreachable("NoCapture is not applicable to function returns!"); 5158 } 5159 5160 /// See AbstractAttribute::trackStatistics() 5161 void trackStatistics() const override {} 5162 }; 5163 5164 /// NoCapture attribute deduction for a call site return value. 5165 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { 5166 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) 5167 : AANoCaptureImpl(IRP, A) {} 5168 5169 /// See AbstractAttribute::initialize(...). 5170 void initialize(Attributor &A) override { 5171 const Function *F = getAnchorScope(); 5172 // Check what state the associated function can actually capture. 5173 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 5174 } 5175 5176 /// See AbstractAttribute::trackStatistics() 5177 void trackStatistics() const override { 5178 STATS_DECLTRACK_CSRET_ATTR(nocapture) 5179 } 5180 }; 5181 } // namespace 5182 5183 /// ------------------ Value Simplify Attribute ---------------------------- 5184 5185 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) { 5186 // FIXME: Add a typecast support. 5187 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5188 SimplifiedAssociatedValue, Other, Ty); 5189 if (SimplifiedAssociatedValue == Optional<Value *>(nullptr)) 5190 return false; 5191 5192 LLVM_DEBUG({ 5193 if (SimplifiedAssociatedValue.hasValue()) 5194 dbgs() << "[ValueSimplify] is assumed to be " 5195 << **SimplifiedAssociatedValue << "\n"; 5196 else 5197 dbgs() << "[ValueSimplify] is assumed to be <none>\n"; 5198 }); 5199 return true; 5200 } 5201 5202 namespace { 5203 struct AAValueSimplifyImpl : AAValueSimplify { 5204 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) 5205 : AAValueSimplify(IRP, A) {} 5206 5207 /// See AbstractAttribute::initialize(...). 5208 void initialize(Attributor &A) override { 5209 if (getAssociatedValue().getType()->isVoidTy()) 5210 indicatePessimisticFixpoint(); 5211 if (A.hasSimplificationCallback(getIRPosition())) 5212 indicatePessimisticFixpoint(); 5213 } 5214 5215 /// See AbstractAttribute::getAsStr(). 5216 const std::string getAsStr() const override { 5217 LLVM_DEBUG({ 5218 errs() << "SAV: " << (bool)SimplifiedAssociatedValue << " "; 5219 if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue) 5220 errs() << "SAV: " << **SimplifiedAssociatedValue << " "; 5221 }); 5222 return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple") 5223 : "not-simple"; 5224 } 5225 5226 /// See AbstractAttribute::trackStatistics() 5227 void trackStatistics() const override {} 5228 5229 /// See AAValueSimplify::getAssumedSimplifiedValue() 5230 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5231 return SimplifiedAssociatedValue; 5232 } 5233 5234 /// Return a value we can use as replacement for the associated one, or 5235 /// nullptr if we don't have one that makes sense. 5236 Value *getReplacementValue(Attributor &A) const { 5237 Value *NewV; 5238 NewV = SimplifiedAssociatedValue.hasValue() 5239 ? SimplifiedAssociatedValue.getValue() 5240 : UndefValue::get(getAssociatedType()); 5241 if (!NewV) 5242 return nullptr; 5243 NewV = AA::getWithType(*NewV, *getAssociatedType()); 5244 if (!NewV || NewV == &getAssociatedValue()) 5245 return nullptr; 5246 const Instruction *CtxI = getCtxI(); 5247 if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache())) 5248 return nullptr; 5249 if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope())) 5250 return nullptr; 5251 return NewV; 5252 } 5253 5254 /// Helper function for querying AAValueSimplify and updating candicate. 5255 /// \param IRP The value position we are trying to unify with SimplifiedValue 5256 bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, 5257 const IRPosition &IRP, bool Simplify = true) { 5258 bool UsedAssumedInformation = false; 5259 Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue(); 5260 if (Simplify) 5261 QueryingValueSimplified = 5262 A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation); 5263 return unionAssumed(QueryingValueSimplified); 5264 } 5265 5266 /// Returns a candidate is found or not 5267 template <typename AAType> bool askSimplifiedValueFor(Attributor &A) { 5268 if (!getAssociatedValue().getType()->isIntegerTy()) 5269 return false; 5270 5271 // This will also pass the call base context. 5272 const auto &AA = 5273 A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE); 5274 5275 Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A); 5276 5277 if (!COpt.hasValue()) { 5278 SimplifiedAssociatedValue = llvm::None; 5279 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5280 return true; 5281 } 5282 if (auto *C = COpt.getValue()) { 5283 SimplifiedAssociatedValue = C; 5284 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5285 return true; 5286 } 5287 return false; 5288 } 5289 5290 bool askSimplifiedValueForOtherAAs(Attributor &A) { 5291 if (askSimplifiedValueFor<AAValueConstantRange>(A)) 5292 return true; 5293 if (askSimplifiedValueFor<AAPotentialValues>(A)) 5294 return true; 5295 return false; 5296 } 5297 5298 /// See AbstractAttribute::manifest(...). 5299 ChangeStatus manifest(Attributor &A) override { 5300 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5301 if (getAssociatedValue().user_empty()) 5302 return Changed; 5303 5304 if (auto *NewV = getReplacementValue(A)) { 5305 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> " 5306 << *NewV << " :: " << *this << "\n"); 5307 if (A.changeValueAfterManifest(getAssociatedValue(), *NewV)) 5308 Changed = ChangeStatus::CHANGED; 5309 } 5310 5311 return Changed | AAValueSimplify::manifest(A); 5312 } 5313 5314 /// See AbstractState::indicatePessimisticFixpoint(...). 5315 ChangeStatus indicatePessimisticFixpoint() override { 5316 SimplifiedAssociatedValue = &getAssociatedValue(); 5317 return AAValueSimplify::indicatePessimisticFixpoint(); 5318 } 5319 5320 static bool handleLoad(Attributor &A, const AbstractAttribute &AA, 5321 LoadInst &L, function_ref<bool(Value &)> Union) { 5322 auto UnionWrapper = [&](Value &V, Value &Obj) { 5323 if (isa<AllocaInst>(Obj)) 5324 return Union(V); 5325 if (!AA::isDynamicallyUnique(A, AA, V)) 5326 return false; 5327 if (!AA::isValidAtPosition(V, L, A.getInfoCache())) 5328 return false; 5329 return Union(V); 5330 }; 5331 5332 Value &Ptr = *L.getPointerOperand(); 5333 SmallVector<Value *, 8> Objects; 5334 bool UsedAssumedInformation = false; 5335 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L, 5336 UsedAssumedInformation)) 5337 return false; 5338 5339 const auto *TLI = 5340 A.getInfoCache().getTargetLibraryInfoForFunction(*L.getFunction()); 5341 for (Value *Obj : Objects) { 5342 LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n"); 5343 if (isa<UndefValue>(Obj)) 5344 continue; 5345 if (isa<ConstantPointerNull>(Obj)) { 5346 // A null pointer access can be undefined but any offset from null may 5347 // be OK. We do not try to optimize the latter. 5348 if (!NullPointerIsDefined(L.getFunction(), 5349 Ptr.getType()->getPointerAddressSpace()) && 5350 A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj) 5351 continue; 5352 return false; 5353 } 5354 Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType(), TLI); 5355 if (!InitialVal || !Union(*InitialVal)) 5356 return false; 5357 5358 LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store " 5359 "propagation, checking accesses next.\n"); 5360 5361 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) { 5362 LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n"); 5363 if (Acc.isWrittenValueYetUndetermined()) 5364 return true; 5365 Value *Content = Acc.getWrittenValue(); 5366 if (!Content) 5367 return false; 5368 Value *CastedContent = 5369 AA::getWithType(*Content, *AA.getAssociatedType()); 5370 if (!CastedContent) 5371 return false; 5372 if (IsExact) 5373 return UnionWrapper(*CastedContent, *Obj); 5374 if (auto *C = dyn_cast<Constant>(CastedContent)) 5375 if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C)) 5376 return UnionWrapper(*CastedContent, *Obj); 5377 return false; 5378 }; 5379 5380 auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj), 5381 DepClassTy::REQUIRED); 5382 if (!PI.forallInterferingAccesses(A, AA, L, CheckAccess)) 5383 return false; 5384 } 5385 return true; 5386 } 5387 }; 5388 5389 struct AAValueSimplifyArgument final : AAValueSimplifyImpl { 5390 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) 5391 : AAValueSimplifyImpl(IRP, A) {} 5392 5393 void initialize(Attributor &A) override { 5394 AAValueSimplifyImpl::initialize(A); 5395 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) 5396 indicatePessimisticFixpoint(); 5397 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated, 5398 Attribute::StructRet, Attribute::Nest, Attribute::ByVal}, 5399 /* IgnoreSubsumingPositions */ true)) 5400 indicatePessimisticFixpoint(); 5401 } 5402 5403 /// See AbstractAttribute::updateImpl(...). 5404 ChangeStatus updateImpl(Attributor &A) override { 5405 // Byval is only replacable if it is readonly otherwise we would write into 5406 // the replaced value and not the copy that byval creates implicitly. 5407 Argument *Arg = getAssociatedArgument(); 5408 if (Arg->hasByValAttr()) { 5409 // TODO: We probably need to verify synchronization is not an issue, e.g., 5410 // there is no race by not copying a constant byval. 5411 bool IsKnown; 5412 if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 5413 return indicatePessimisticFixpoint(); 5414 } 5415 5416 auto Before = SimplifiedAssociatedValue; 5417 5418 auto PredForCallSite = [&](AbstractCallSite ACS) { 5419 const IRPosition &ACSArgPos = 5420 IRPosition::callsite_argument(ACS, getCallSiteArgNo()); 5421 // Check if a coresponding argument was found or if it is on not 5422 // associated (which can happen for callback calls). 5423 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 5424 return false; 5425 5426 // Simplify the argument operand explicitly and check if the result is 5427 // valid in the current scope. This avoids refering to simplified values 5428 // in other functions, e.g., we don't want to say a an argument in a 5429 // static function is actually an argument in a different function. 5430 bool UsedAssumedInformation = false; 5431 Optional<Constant *> SimpleArgOp = 5432 A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation); 5433 if (!SimpleArgOp.hasValue()) 5434 return true; 5435 if (!SimpleArgOp.getValue()) 5436 return false; 5437 if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp)) 5438 return false; 5439 return unionAssumed(*SimpleArgOp); 5440 }; 5441 5442 // Generate a answer specific to a call site context. 5443 bool Success; 5444 bool UsedAssumedInformation = false; 5445 if (hasCallBaseContext() && 5446 getCallBaseContext()->getCalledFunction() == Arg->getParent()) 5447 Success = PredForCallSite( 5448 AbstractCallSite(&getCallBaseContext()->getCalledOperandUse())); 5449 else 5450 Success = A.checkForAllCallSites(PredForCallSite, *this, true, 5451 UsedAssumedInformation); 5452 5453 if (!Success) 5454 if (!askSimplifiedValueForOtherAAs(A)) 5455 return indicatePessimisticFixpoint(); 5456 5457 // If a candicate was found in this update, return CHANGED. 5458 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5459 : ChangeStatus ::CHANGED; 5460 } 5461 5462 /// See AbstractAttribute::trackStatistics() 5463 void trackStatistics() const override { 5464 STATS_DECLTRACK_ARG_ATTR(value_simplify) 5465 } 5466 }; 5467 5468 struct AAValueSimplifyReturned : AAValueSimplifyImpl { 5469 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) 5470 : AAValueSimplifyImpl(IRP, A) {} 5471 5472 /// See AAValueSimplify::getAssumedSimplifiedValue() 5473 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5474 if (!isValidState()) 5475 return nullptr; 5476 return SimplifiedAssociatedValue; 5477 } 5478 5479 /// See AbstractAttribute::updateImpl(...). 5480 ChangeStatus updateImpl(Attributor &A) override { 5481 auto Before = SimplifiedAssociatedValue; 5482 5483 auto ReturnInstCB = [&](Instruction &I) { 5484 auto &RI = cast<ReturnInst>(I); 5485 return checkAndUpdate( 5486 A, *this, 5487 IRPosition::value(*RI.getReturnValue(), getCallBaseContext())); 5488 }; 5489 5490 bool UsedAssumedInformation = false; 5491 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret}, 5492 UsedAssumedInformation)) 5493 if (!askSimplifiedValueForOtherAAs(A)) 5494 return indicatePessimisticFixpoint(); 5495 5496 // If a candicate was found in this update, return CHANGED. 5497 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5498 : ChangeStatus ::CHANGED; 5499 } 5500 5501 ChangeStatus manifest(Attributor &A) override { 5502 // We queried AAValueSimplify for the returned values so they will be 5503 // replaced if a simplified form was found. Nothing to do here. 5504 return ChangeStatus::UNCHANGED; 5505 } 5506 5507 /// See AbstractAttribute::trackStatistics() 5508 void trackStatistics() const override { 5509 STATS_DECLTRACK_FNRET_ATTR(value_simplify) 5510 } 5511 }; 5512 5513 struct AAValueSimplifyFloating : AAValueSimplifyImpl { 5514 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) 5515 : AAValueSimplifyImpl(IRP, A) {} 5516 5517 /// See AbstractAttribute::initialize(...). 5518 void initialize(Attributor &A) override { 5519 AAValueSimplifyImpl::initialize(A); 5520 Value &V = getAnchorValue(); 5521 5522 // TODO: add other stuffs 5523 if (isa<Constant>(V)) 5524 indicatePessimisticFixpoint(); 5525 } 5526 5527 /// Check if \p Cmp is a comparison we can simplify. 5528 /// 5529 /// We handle multiple cases, one in which at least one operand is an 5530 /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other 5531 /// operand. Return true if successful, in that case SimplifiedAssociatedValue 5532 /// will be updated. 5533 bool handleCmp(Attributor &A, CmpInst &Cmp) { 5534 auto Union = [&](Value &V) { 5535 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5536 SimplifiedAssociatedValue, &V, V.getType()); 5537 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5538 }; 5539 5540 Value *LHS = Cmp.getOperand(0); 5541 Value *RHS = Cmp.getOperand(1); 5542 5543 // Simplify the operands first. 5544 bool UsedAssumedInformation = false; 5545 const auto &SimplifiedLHS = 5546 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 5547 *this, UsedAssumedInformation); 5548 if (!SimplifiedLHS.hasValue()) 5549 return true; 5550 if (!SimplifiedLHS.getValue()) 5551 return false; 5552 LHS = *SimplifiedLHS; 5553 5554 const auto &SimplifiedRHS = 5555 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 5556 *this, UsedAssumedInformation); 5557 if (!SimplifiedRHS.hasValue()) 5558 return true; 5559 if (!SimplifiedRHS.getValue()) 5560 return false; 5561 RHS = *SimplifiedRHS; 5562 5563 LLVMContext &Ctx = Cmp.getContext(); 5564 // Handle the trivial case first in which we don't even need to think about 5565 // null or non-null. 5566 if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) { 5567 Constant *NewVal = 5568 ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual()); 5569 if (!Union(*NewVal)) 5570 return false; 5571 if (!UsedAssumedInformation) 5572 indicateOptimisticFixpoint(); 5573 return true; 5574 } 5575 5576 // From now on we only handle equalities (==, !=). 5577 ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp); 5578 if (!ICmp || !ICmp->isEquality()) 5579 return false; 5580 5581 bool LHSIsNull = isa<ConstantPointerNull>(LHS); 5582 bool RHSIsNull = isa<ConstantPointerNull>(RHS); 5583 if (!LHSIsNull && !RHSIsNull) 5584 return false; 5585 5586 // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the 5587 // non-nullptr operand and if we assume it's non-null we can conclude the 5588 // result of the comparison. 5589 assert((LHSIsNull || RHSIsNull) && 5590 "Expected nullptr versus non-nullptr comparison at this point"); 5591 5592 // The index is the operand that we assume is not null. 5593 unsigned PtrIdx = LHSIsNull; 5594 auto &PtrNonNullAA = A.getAAFor<AANonNull>( 5595 *this, IRPosition::value(*ICmp->getOperand(PtrIdx)), 5596 DepClassTy::REQUIRED); 5597 if (!PtrNonNullAA.isAssumedNonNull()) 5598 return false; 5599 UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull(); 5600 5601 // The new value depends on the predicate, true for != and false for ==. 5602 Constant *NewVal = ConstantInt::get( 5603 Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE); 5604 if (!Union(*NewVal)) 5605 return false; 5606 5607 if (!UsedAssumedInformation) 5608 indicateOptimisticFixpoint(); 5609 5610 return true; 5611 } 5612 5613 bool updateWithLoad(Attributor &A, LoadInst &L) { 5614 auto Union = [&](Value &V) { 5615 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5616 SimplifiedAssociatedValue, &V, L.getType()); 5617 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5618 }; 5619 return handleLoad(A, *this, L, Union); 5620 } 5621 5622 /// Use the generic, non-optimistic InstSimplfy functionality if we managed to 5623 /// simplify any operand of the instruction \p I. Return true if successful, 5624 /// in that case SimplifiedAssociatedValue will be updated. 5625 bool handleGenericInst(Attributor &A, Instruction &I) { 5626 bool SomeSimplified = false; 5627 bool UsedAssumedInformation = false; 5628 5629 SmallVector<Value *, 8> NewOps(I.getNumOperands()); 5630 int Idx = 0; 5631 for (Value *Op : I.operands()) { 5632 const auto &SimplifiedOp = 5633 A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()), 5634 *this, UsedAssumedInformation); 5635 // If we are not sure about any operand we are not sure about the entire 5636 // instruction, we'll wait. 5637 if (!SimplifiedOp.hasValue()) 5638 return true; 5639 5640 if (SimplifiedOp.getValue()) 5641 NewOps[Idx] = SimplifiedOp.getValue(); 5642 else 5643 NewOps[Idx] = Op; 5644 5645 SomeSimplified |= (NewOps[Idx] != Op); 5646 ++Idx; 5647 } 5648 5649 // We won't bother with the InstSimplify interface if we didn't simplify any 5650 // operand ourselves. 5651 if (!SomeSimplified) 5652 return false; 5653 5654 InformationCache &InfoCache = A.getInfoCache(); 5655 Function *F = I.getFunction(); 5656 const auto *DT = 5657 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 5658 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5659 auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 5660 OptimizationRemarkEmitter *ORE = nullptr; 5661 5662 const DataLayout &DL = I.getModule()->getDataLayout(); 5663 SimplifyQuery Q(DL, TLI, DT, AC, &I); 5664 if (Value *SimplifiedI = 5665 SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) { 5666 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5667 SimplifiedAssociatedValue, SimplifiedI, I.getType()); 5668 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5669 } 5670 return false; 5671 } 5672 5673 /// See AbstractAttribute::updateImpl(...). 5674 ChangeStatus updateImpl(Attributor &A) override { 5675 auto Before = SimplifiedAssociatedValue; 5676 5677 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &, 5678 bool Stripped) -> bool { 5679 auto &AA = A.getAAFor<AAValueSimplify>( 5680 *this, IRPosition::value(V, getCallBaseContext()), 5681 DepClassTy::REQUIRED); 5682 if (!Stripped && this == &AA) { 5683 5684 if (auto *I = dyn_cast<Instruction>(&V)) { 5685 if (auto *LI = dyn_cast<LoadInst>(&V)) 5686 if (updateWithLoad(A, *LI)) 5687 return true; 5688 if (auto *Cmp = dyn_cast<CmpInst>(&V)) 5689 if (handleCmp(A, *Cmp)) 5690 return true; 5691 if (handleGenericInst(A, *I)) 5692 return true; 5693 } 5694 // TODO: Look the instruction and check recursively. 5695 5696 LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V 5697 << "\n"); 5698 return false; 5699 } 5700 return checkAndUpdate(A, *this, 5701 IRPosition::value(V, getCallBaseContext())); 5702 }; 5703 5704 bool Dummy = false; 5705 bool UsedAssumedInformation = false; 5706 if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy, 5707 VisitValueCB, getCtxI(), 5708 UsedAssumedInformation, 5709 /* UseValueSimplify */ false)) 5710 if (!askSimplifiedValueForOtherAAs(A)) 5711 return indicatePessimisticFixpoint(); 5712 5713 // If a candicate was found in this update, return CHANGED. 5714 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5715 : ChangeStatus ::CHANGED; 5716 } 5717 5718 /// See AbstractAttribute::trackStatistics() 5719 void trackStatistics() const override { 5720 STATS_DECLTRACK_FLOATING_ATTR(value_simplify) 5721 } 5722 }; 5723 5724 struct AAValueSimplifyFunction : AAValueSimplifyImpl { 5725 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) 5726 : AAValueSimplifyImpl(IRP, A) {} 5727 5728 /// See AbstractAttribute::initialize(...). 5729 void initialize(Attributor &A) override { 5730 SimplifiedAssociatedValue = nullptr; 5731 indicateOptimisticFixpoint(); 5732 } 5733 /// See AbstractAttribute::initialize(...). 5734 ChangeStatus updateImpl(Attributor &A) override { 5735 llvm_unreachable( 5736 "AAValueSimplify(Function|CallSite)::updateImpl will not be called"); 5737 } 5738 /// See AbstractAttribute::trackStatistics() 5739 void trackStatistics() const override { 5740 STATS_DECLTRACK_FN_ATTR(value_simplify) 5741 } 5742 }; 5743 5744 struct AAValueSimplifyCallSite : AAValueSimplifyFunction { 5745 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) 5746 : AAValueSimplifyFunction(IRP, A) {} 5747 /// See AbstractAttribute::trackStatistics() 5748 void trackStatistics() const override { 5749 STATS_DECLTRACK_CS_ATTR(value_simplify) 5750 } 5751 }; 5752 5753 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl { 5754 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) 5755 : AAValueSimplifyImpl(IRP, A) {} 5756 5757 void initialize(Attributor &A) override { 5758 AAValueSimplifyImpl::initialize(A); 5759 Function *Fn = getAssociatedFunction(); 5760 if (!Fn) { 5761 indicatePessimisticFixpoint(); 5762 return; 5763 } 5764 for (Argument &Arg : Fn->args()) { 5765 if (Arg.hasReturnedAttr()) { 5766 auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()), 5767 Arg.getArgNo()); 5768 if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT && 5769 checkAndUpdate(A, *this, IRP)) 5770 indicateOptimisticFixpoint(); 5771 else 5772 indicatePessimisticFixpoint(); 5773 return; 5774 } 5775 } 5776 } 5777 5778 /// See AbstractAttribute::updateImpl(...). 5779 ChangeStatus updateImpl(Attributor &A) override { 5780 auto Before = SimplifiedAssociatedValue; 5781 auto &RetAA = A.getAAFor<AAReturnedValues>( 5782 *this, IRPosition::function(*getAssociatedFunction()), 5783 DepClassTy::REQUIRED); 5784 auto PredForReturned = 5785 [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 5786 bool UsedAssumedInformation = false; 5787 Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent( 5788 &RetVal, *cast<CallBase>(getCtxI()), *this, 5789 UsedAssumedInformation); 5790 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5791 SimplifiedAssociatedValue, CSRetVal, getAssociatedType()); 5792 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5793 }; 5794 if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned)) 5795 if (!askSimplifiedValueForOtherAAs(A)) 5796 return indicatePessimisticFixpoint(); 5797 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5798 : ChangeStatus ::CHANGED; 5799 } 5800 5801 void trackStatistics() const override { 5802 STATS_DECLTRACK_CSRET_ATTR(value_simplify) 5803 } 5804 }; 5805 5806 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { 5807 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) 5808 : AAValueSimplifyFloating(IRP, A) {} 5809 5810 /// See AbstractAttribute::manifest(...). 5811 ChangeStatus manifest(Attributor &A) override { 5812 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5813 5814 if (auto *NewV = getReplacementValue(A)) { 5815 Use &U = cast<CallBase>(&getAnchorValue()) 5816 ->getArgOperandUse(getCallSiteArgNo()); 5817 if (A.changeUseAfterManifest(U, *NewV)) 5818 Changed = ChangeStatus::CHANGED; 5819 } 5820 5821 return Changed | AAValueSimplify::manifest(A); 5822 } 5823 5824 void trackStatistics() const override { 5825 STATS_DECLTRACK_CSARG_ATTR(value_simplify) 5826 } 5827 }; 5828 } // namespace 5829 5830 /// ----------------------- Heap-To-Stack Conversion --------------------------- 5831 namespace { 5832 struct AAHeapToStackFunction final : public AAHeapToStack { 5833 5834 struct AllocationInfo { 5835 /// The call that allocates the memory. 5836 CallBase *const CB; 5837 5838 /// The library function id for the allocation. 5839 LibFunc LibraryFunctionId = NotLibFunc; 5840 5841 /// The status wrt. a rewrite. 5842 enum { 5843 STACK_DUE_TO_USE, 5844 STACK_DUE_TO_FREE, 5845 INVALID, 5846 } Status = STACK_DUE_TO_USE; 5847 5848 /// Flag to indicate if we encountered a use that might free this allocation 5849 /// but which is not in the deallocation infos. 5850 bool HasPotentiallyFreeingUnknownUses = false; 5851 5852 /// The set of free calls that use this allocation. 5853 SmallSetVector<CallBase *, 1> PotentialFreeCalls{}; 5854 }; 5855 5856 struct DeallocationInfo { 5857 /// The call that deallocates the memory. 5858 CallBase *const CB; 5859 5860 /// Flag to indicate if we don't know all objects this deallocation might 5861 /// free. 5862 bool MightFreeUnknownObjects = false; 5863 5864 /// The set of allocation calls that are potentially freed. 5865 SmallSetVector<CallBase *, 1> PotentialAllocationCalls{}; 5866 }; 5867 5868 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) 5869 : AAHeapToStack(IRP, A) {} 5870 5871 ~AAHeapToStackFunction() { 5872 // Ensure we call the destructor so we release any memory allocated in the 5873 // sets. 5874 for (auto &It : AllocationInfos) 5875 It.second->~AllocationInfo(); 5876 for (auto &It : DeallocationInfos) 5877 It.second->~DeallocationInfo(); 5878 } 5879 5880 void initialize(Attributor &A) override { 5881 AAHeapToStack::initialize(A); 5882 5883 const Function *F = getAnchorScope(); 5884 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5885 5886 auto AllocationIdentifierCB = [&](Instruction &I) { 5887 CallBase *CB = dyn_cast<CallBase>(&I); 5888 if (!CB) 5889 return true; 5890 if (isFreeCall(CB, TLI)) { 5891 DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB}; 5892 return true; 5893 } 5894 // To do heap to stack, we need to know that the allocation itself is 5895 // removable once uses are rewritten, and that we can initialize the 5896 // alloca to the same pattern as the original allocation result. 5897 if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) { 5898 auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext()); 5899 if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) { 5900 AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB}; 5901 AllocationInfos[CB] = AI; 5902 if (TLI) 5903 TLI->getLibFunc(*CB, AI->LibraryFunctionId); 5904 } 5905 } 5906 return true; 5907 }; 5908 5909 bool UsedAssumedInformation = false; 5910 bool Success = A.checkForAllCallLikeInstructions( 5911 AllocationIdentifierCB, *this, UsedAssumedInformation, 5912 /* CheckBBLivenessOnly */ false, 5913 /* CheckPotentiallyDead */ true); 5914 (void)Success; 5915 assert(Success && "Did not expect the call base visit callback to fail!"); 5916 5917 Attributor::SimplifictionCallbackTy SCB = 5918 [](const IRPosition &, const AbstractAttribute *, 5919 bool &) -> Optional<Value *> { return nullptr; }; 5920 for (const auto &It : AllocationInfos) 5921 A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first), 5922 SCB); 5923 for (const auto &It : DeallocationInfos) 5924 A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first), 5925 SCB); 5926 } 5927 5928 const std::string getAsStr() const override { 5929 unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0; 5930 for (const auto &It : AllocationInfos) { 5931 if (It.second->Status == AllocationInfo::INVALID) 5932 ++NumInvalidMallocs; 5933 else 5934 ++NumH2SMallocs; 5935 } 5936 return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" + 5937 std::to_string(NumInvalidMallocs); 5938 } 5939 5940 /// See AbstractAttribute::trackStatistics(). 5941 void trackStatistics() const override { 5942 STATS_DECL( 5943 MallocCalls, Function, 5944 "Number of malloc/calloc/aligned_alloc calls converted to allocas"); 5945 for (auto &It : AllocationInfos) 5946 if (It.second->Status != AllocationInfo::INVALID) 5947 ++BUILD_STAT_NAME(MallocCalls, Function); 5948 } 5949 5950 bool isAssumedHeapToStack(const CallBase &CB) const override { 5951 if (isValidState()) 5952 if (AllocationInfo *AI = 5953 AllocationInfos.lookup(const_cast<CallBase *>(&CB))) 5954 return AI->Status != AllocationInfo::INVALID; 5955 return false; 5956 } 5957 5958 bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override { 5959 if (!isValidState()) 5960 return false; 5961 5962 for (auto &It : AllocationInfos) { 5963 AllocationInfo &AI = *It.second; 5964 if (AI.Status == AllocationInfo::INVALID) 5965 continue; 5966 5967 if (AI.PotentialFreeCalls.count(&CB)) 5968 return true; 5969 } 5970 5971 return false; 5972 } 5973 5974 ChangeStatus manifest(Attributor &A) override { 5975 assert(getState().isValidState() && 5976 "Attempted to manifest an invalid state!"); 5977 5978 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 5979 Function *F = getAnchorScope(); 5980 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5981 5982 for (auto &It : AllocationInfos) { 5983 AllocationInfo &AI = *It.second; 5984 if (AI.Status == AllocationInfo::INVALID) 5985 continue; 5986 5987 for (CallBase *FreeCall : AI.PotentialFreeCalls) { 5988 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); 5989 A.deleteAfterManifest(*FreeCall); 5990 HasChanged = ChangeStatus::CHANGED; 5991 } 5992 5993 LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB 5994 << "\n"); 5995 5996 auto Remark = [&](OptimizationRemark OR) { 5997 LibFunc IsAllocShared; 5998 if (TLI->getLibFunc(*AI.CB, IsAllocShared)) 5999 if (IsAllocShared == LibFunc___kmpc_alloc_shared) 6000 return OR << "Moving globalized variable to the stack."; 6001 return OR << "Moving memory allocation from the heap to the stack."; 6002 }; 6003 if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 6004 A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark); 6005 else 6006 A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark); 6007 6008 const DataLayout &DL = A.getInfoCache().getDL(); 6009 Value *Size; 6010 Optional<APInt> SizeAPI = getSize(A, *this, AI); 6011 if (SizeAPI.hasValue()) { 6012 Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI); 6013 } else { 6014 LLVMContext &Ctx = AI.CB->getContext(); 6015 ObjectSizeOpts Opts; 6016 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts); 6017 SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB); 6018 assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() && 6019 cast<ConstantInt>(SizeOffsetPair.second)->isZero()); 6020 Size = SizeOffsetPair.first; 6021 } 6022 6023 Align Alignment(1); 6024 if (MaybeAlign RetAlign = AI.CB->getRetAlign()) 6025 Alignment = max(Alignment, RetAlign); 6026 if (Value *Align = getAllocAlignment(AI.CB, TLI)) { 6027 Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align); 6028 assert(AlignmentAPI.hasValue() && 6029 "Expected an alignment during manifest!"); 6030 Alignment = 6031 max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue())); 6032 } 6033 6034 // TODO: Hoist the alloca towards the function entry. 6035 unsigned AS = DL.getAllocaAddrSpace(); 6036 Instruction *Alloca = new AllocaInst(Type::getInt8Ty(F->getContext()), AS, 6037 Size, Alignment, "", AI.CB); 6038 6039 if (Alloca->getType() != AI.CB->getType()) 6040 Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 6041 Alloca, AI.CB->getType(), "malloc_cast", AI.CB); 6042 6043 auto *I8Ty = Type::getInt8Ty(F->getContext()); 6044 auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty); 6045 assert(InitVal && 6046 "Must be able to materialize initial memory state of allocation"); 6047 6048 A.changeValueAfterManifest(*AI.CB, *Alloca); 6049 6050 if (auto *II = dyn_cast<InvokeInst>(AI.CB)) { 6051 auto *NBB = II->getNormalDest(); 6052 BranchInst::Create(NBB, AI.CB->getParent()); 6053 A.deleteAfterManifest(*AI.CB); 6054 } else { 6055 A.deleteAfterManifest(*AI.CB); 6056 } 6057 6058 // Initialize the alloca with the same value as used by the allocation 6059 // function. We can skip undef as the initial value of an alloc is 6060 // undef, and the memset would simply end up being DSEd. 6061 if (!isa<UndefValue>(InitVal)) { 6062 IRBuilder<> Builder(Alloca->getNextNode()); 6063 // TODO: Use alignment above if align!=1 6064 Builder.CreateMemSet(Alloca, InitVal, Size, None); 6065 } 6066 HasChanged = ChangeStatus::CHANGED; 6067 } 6068 6069 return HasChanged; 6070 } 6071 6072 Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA, 6073 Value &V) { 6074 bool UsedAssumedInformation = false; 6075 Optional<Constant *> SimpleV = 6076 A.getAssumedConstant(V, AA, UsedAssumedInformation); 6077 if (!SimpleV.hasValue()) 6078 return APInt(64, 0); 6079 if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue())) 6080 return CI->getValue(); 6081 return llvm::None; 6082 } 6083 6084 Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA, 6085 AllocationInfo &AI) { 6086 auto Mapper = [&](const Value *V) -> const Value * { 6087 bool UsedAssumedInformation = false; 6088 if (Optional<Constant *> SimpleV = 6089 A.getAssumedConstant(*V, AA, UsedAssumedInformation)) 6090 if (*SimpleV) 6091 return *SimpleV; 6092 return V; 6093 }; 6094 6095 const Function *F = getAnchorScope(); 6096 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 6097 return getAllocSize(AI.CB, TLI, Mapper); 6098 } 6099 6100 /// Collection of all malloc-like calls in a function with associated 6101 /// information. 6102 MapVector<CallBase *, AllocationInfo *> AllocationInfos; 6103 6104 /// Collection of all free-like calls in a function with associated 6105 /// information. 6106 MapVector<CallBase *, DeallocationInfo *> DeallocationInfos; 6107 6108 ChangeStatus updateImpl(Attributor &A) override; 6109 }; 6110 6111 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) { 6112 ChangeStatus Changed = ChangeStatus::UNCHANGED; 6113 const Function *F = getAnchorScope(); 6114 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 6115 6116 const auto &LivenessAA = 6117 A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE); 6118 6119 MustBeExecutedContextExplorer &Explorer = 6120 A.getInfoCache().getMustBeExecutedContextExplorer(); 6121 6122 bool StackIsAccessibleByOtherThreads = 6123 A.getInfoCache().stackIsAccessibleByOtherThreads(); 6124 6125 // Flag to ensure we update our deallocation information at most once per 6126 // updateImpl call and only if we use the free check reasoning. 6127 bool HasUpdatedFrees = false; 6128 6129 auto UpdateFrees = [&]() { 6130 HasUpdatedFrees = true; 6131 6132 for (auto &It : DeallocationInfos) { 6133 DeallocationInfo &DI = *It.second; 6134 // For now we cannot use deallocations that have unknown inputs, skip 6135 // them. 6136 if (DI.MightFreeUnknownObjects) 6137 continue; 6138 6139 // No need to analyze dead calls, ignore them instead. 6140 bool UsedAssumedInformation = false; 6141 if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation, 6142 /* CheckBBLivenessOnly */ true)) 6143 continue; 6144 6145 // Use the optimistic version to get the freed objects, ignoring dead 6146 // branches etc. 6147 SmallVector<Value *, 8> Objects; 6148 if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects, 6149 *this, DI.CB, 6150 UsedAssumedInformation)) { 6151 LLVM_DEBUG( 6152 dbgs() 6153 << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n"); 6154 DI.MightFreeUnknownObjects = true; 6155 continue; 6156 } 6157 6158 // Check each object explicitly. 6159 for (auto *Obj : Objects) { 6160 // Free of null and undef can be ignored as no-ops (or UB in the latter 6161 // case). 6162 if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj)) 6163 continue; 6164 6165 CallBase *ObjCB = dyn_cast<CallBase>(Obj); 6166 if (!ObjCB) { 6167 LLVM_DEBUG(dbgs() 6168 << "[H2S] Free of a non-call object: " << *Obj << "\n"); 6169 DI.MightFreeUnknownObjects = true; 6170 continue; 6171 } 6172 6173 AllocationInfo *AI = AllocationInfos.lookup(ObjCB); 6174 if (!AI) { 6175 LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj 6176 << "\n"); 6177 DI.MightFreeUnknownObjects = true; 6178 continue; 6179 } 6180 6181 DI.PotentialAllocationCalls.insert(ObjCB); 6182 } 6183 } 6184 }; 6185 6186 auto FreeCheck = [&](AllocationInfo &AI) { 6187 // If the stack is not accessible by other threads, the "must-free" logic 6188 // doesn't apply as the pointer could be shared and needs to be places in 6189 // "shareable" memory. 6190 if (!StackIsAccessibleByOtherThreads) { 6191 auto &NoSyncAA = 6192 A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL); 6193 if (!NoSyncAA.isAssumedNoSync()) { 6194 LLVM_DEBUG( 6195 dbgs() << "[H2S] found an escaping use, stack is not accessible by " 6196 "other threads and function is not nosync:\n"); 6197 return false; 6198 } 6199 } 6200 if (!HasUpdatedFrees) 6201 UpdateFrees(); 6202 6203 // TODO: Allow multi exit functions that have different free calls. 6204 if (AI.PotentialFreeCalls.size() != 1) { 6205 LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but " 6206 << AI.PotentialFreeCalls.size() << "\n"); 6207 return false; 6208 } 6209 CallBase *UniqueFree = *AI.PotentialFreeCalls.begin(); 6210 DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree); 6211 if (!DI) { 6212 LLVM_DEBUG( 6213 dbgs() << "[H2S] unique free call was not known as deallocation call " 6214 << *UniqueFree << "\n"); 6215 return false; 6216 } 6217 if (DI->MightFreeUnknownObjects) { 6218 LLVM_DEBUG( 6219 dbgs() << "[H2S] unique free call might free unknown allocations\n"); 6220 return false; 6221 } 6222 if (DI->PotentialAllocationCalls.size() > 1) { 6223 LLVM_DEBUG(dbgs() << "[H2S] unique free call might free " 6224 << DI->PotentialAllocationCalls.size() 6225 << " different allocations\n"); 6226 return false; 6227 } 6228 if (*DI->PotentialAllocationCalls.begin() != AI.CB) { 6229 LLVM_DEBUG( 6230 dbgs() 6231 << "[H2S] unique free call not known to free this allocation but " 6232 << **DI->PotentialAllocationCalls.begin() << "\n"); 6233 return false; 6234 } 6235 Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode(); 6236 if (!Explorer.findInContextOf(UniqueFree, CtxI)) { 6237 LLVM_DEBUG( 6238 dbgs() 6239 << "[H2S] unique free call might not be executed with the allocation " 6240 << *UniqueFree << "\n"); 6241 return false; 6242 } 6243 return true; 6244 }; 6245 6246 auto UsesCheck = [&](AllocationInfo &AI) { 6247 bool ValidUsesOnly = true; 6248 6249 auto Pred = [&](const Use &U, bool &Follow) -> bool { 6250 Instruction *UserI = cast<Instruction>(U.getUser()); 6251 if (isa<LoadInst>(UserI)) 6252 return true; 6253 if (auto *SI = dyn_cast<StoreInst>(UserI)) { 6254 if (SI->getValueOperand() == U.get()) { 6255 LLVM_DEBUG(dbgs() 6256 << "[H2S] escaping store to memory: " << *UserI << "\n"); 6257 ValidUsesOnly = false; 6258 } else { 6259 // A store into the malloc'ed memory is fine. 6260 } 6261 return true; 6262 } 6263 if (auto *CB = dyn_cast<CallBase>(UserI)) { 6264 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) 6265 return true; 6266 if (DeallocationInfos.count(CB)) { 6267 AI.PotentialFreeCalls.insert(CB); 6268 return true; 6269 } 6270 6271 unsigned ArgNo = CB->getArgOperandNo(&U); 6272 6273 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 6274 *this, IRPosition::callsite_argument(*CB, ArgNo), 6275 DepClassTy::OPTIONAL); 6276 6277 // If a call site argument use is nofree, we are fine. 6278 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( 6279 *this, IRPosition::callsite_argument(*CB, ArgNo), 6280 DepClassTy::OPTIONAL); 6281 6282 bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture(); 6283 bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree(); 6284 if (MaybeCaptured || 6285 (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared && 6286 MaybeFreed)) { 6287 AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed; 6288 6289 // Emit a missed remark if this is missed OpenMP globalization. 6290 auto Remark = [&](OptimizationRemarkMissed ORM) { 6291 return ORM 6292 << "Could not move globalized variable to the stack. " 6293 "Variable is potentially captured in call. Mark " 6294 "parameter as `__attribute__((noescape))` to override."; 6295 }; 6296 6297 if (ValidUsesOnly && 6298 AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 6299 A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark); 6300 6301 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n"); 6302 ValidUsesOnly = false; 6303 } 6304 return true; 6305 } 6306 6307 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 6308 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 6309 Follow = true; 6310 return true; 6311 } 6312 // Unknown user for which we can not track uses further (in a way that 6313 // makes sense). 6314 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n"); 6315 ValidUsesOnly = false; 6316 return true; 6317 }; 6318 if (!A.checkForAllUses(Pred, *this, *AI.CB)) 6319 return false; 6320 return ValidUsesOnly; 6321 }; 6322 6323 // The actual update starts here. We look at all allocations and depending on 6324 // their status perform the appropriate check(s). 6325 for (auto &It : AllocationInfos) { 6326 AllocationInfo &AI = *It.second; 6327 if (AI.Status == AllocationInfo::INVALID) 6328 continue; 6329 6330 if (Value *Align = getAllocAlignment(AI.CB, TLI)) { 6331 Optional<APInt> APAlign = getAPInt(A, *this, *Align); 6332 if (!APAlign) { 6333 // Can't generate an alloca which respects the required alignment 6334 // on the allocation. 6335 LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB 6336 << "\n"); 6337 AI.Status = AllocationInfo::INVALID; 6338 Changed = ChangeStatus::CHANGED; 6339 continue; 6340 } else { 6341 if (APAlign->ugt(llvm::Value::MaximumAlignment) || 6342 !APAlign->isPowerOf2()) { 6343 LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign 6344 << "\n"); 6345 AI.Status = AllocationInfo::INVALID; 6346 Changed = ChangeStatus::CHANGED; 6347 continue; 6348 } 6349 } 6350 } 6351 6352 if (MaxHeapToStackSize != -1) { 6353 Optional<APInt> Size = getSize(A, *this, AI); 6354 if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) { 6355 LLVM_DEBUG({ 6356 if (!Size.hasValue()) 6357 dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n"; 6358 else 6359 dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " 6360 << MaxHeapToStackSize << "\n"; 6361 }); 6362 6363 AI.Status = AllocationInfo::INVALID; 6364 Changed = ChangeStatus::CHANGED; 6365 continue; 6366 } 6367 } 6368 6369 switch (AI.Status) { 6370 case AllocationInfo::STACK_DUE_TO_USE: 6371 if (UsesCheck(AI)) 6372 continue; 6373 AI.Status = AllocationInfo::STACK_DUE_TO_FREE; 6374 LLVM_FALLTHROUGH; 6375 case AllocationInfo::STACK_DUE_TO_FREE: 6376 if (FreeCheck(AI)) 6377 continue; 6378 AI.Status = AllocationInfo::INVALID; 6379 Changed = ChangeStatus::CHANGED; 6380 continue; 6381 case AllocationInfo::INVALID: 6382 llvm_unreachable("Invalid allocations should never reach this point!"); 6383 }; 6384 } 6385 6386 return Changed; 6387 } 6388 } // namespace 6389 6390 /// ----------------------- Privatizable Pointers ------------------------------ 6391 namespace { 6392 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { 6393 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) 6394 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} 6395 6396 ChangeStatus indicatePessimisticFixpoint() override { 6397 AAPrivatizablePtr::indicatePessimisticFixpoint(); 6398 PrivatizableType = nullptr; 6399 return ChangeStatus::CHANGED; 6400 } 6401 6402 /// Identify the type we can chose for a private copy of the underlying 6403 /// argument. None means it is not clear yet, nullptr means there is none. 6404 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; 6405 6406 /// Return a privatizable type that encloses both T0 and T1. 6407 /// TODO: This is merely a stub for now as we should manage a mapping as well. 6408 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { 6409 if (!T0.hasValue()) 6410 return T1; 6411 if (!T1.hasValue()) 6412 return T0; 6413 if (T0 == T1) 6414 return T0; 6415 return nullptr; 6416 } 6417 6418 Optional<Type *> getPrivatizableType() const override { 6419 return PrivatizableType; 6420 } 6421 6422 const std::string getAsStr() const override { 6423 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; 6424 } 6425 6426 protected: 6427 Optional<Type *> PrivatizableType; 6428 }; 6429 6430 // TODO: Do this for call site arguments (probably also other values) as well. 6431 6432 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { 6433 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) 6434 : AAPrivatizablePtrImpl(IRP, A) {} 6435 6436 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6437 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6438 // If this is a byval argument and we know all the call sites (so we can 6439 // rewrite them), there is no need to check them explicitly. 6440 bool UsedAssumedInformation = false; 6441 SmallVector<Attribute, 1> Attrs; 6442 getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true); 6443 if (!Attrs.empty() && 6444 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, 6445 true, UsedAssumedInformation)) 6446 return Attrs[0].getValueAsType(); 6447 6448 Optional<Type *> Ty; 6449 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 6450 6451 // Make sure the associated call site argument has the same type at all call 6452 // sites and it is an allocation we know is safe to privatize, for now that 6453 // means we only allow alloca instructions. 6454 // TODO: We can additionally analyze the accesses in the callee to create 6455 // the type from that information instead. That is a little more 6456 // involved and will be done in a follow up patch. 6457 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6458 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 6459 // Check if a coresponding argument was found or if it is one not 6460 // associated (which can happen for callback calls). 6461 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 6462 return false; 6463 6464 // Check that all call sites agree on a type. 6465 auto &PrivCSArgAA = 6466 A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED); 6467 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); 6468 6469 LLVM_DEBUG({ 6470 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; 6471 if (CSTy.hasValue() && CSTy.getValue()) 6472 CSTy.getValue()->print(dbgs()); 6473 else if (CSTy.hasValue()) 6474 dbgs() << "<nullptr>"; 6475 else 6476 dbgs() << "<none>"; 6477 }); 6478 6479 Ty = combineTypes(Ty, CSTy); 6480 6481 LLVM_DEBUG({ 6482 dbgs() << " : New Type: "; 6483 if (Ty.hasValue() && Ty.getValue()) 6484 Ty.getValue()->print(dbgs()); 6485 else if (Ty.hasValue()) 6486 dbgs() << "<nullptr>"; 6487 else 6488 dbgs() << "<none>"; 6489 dbgs() << "\n"; 6490 }); 6491 6492 return !Ty.hasValue() || Ty.getValue(); 6493 }; 6494 6495 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, 6496 UsedAssumedInformation)) 6497 return nullptr; 6498 return Ty; 6499 } 6500 6501 /// See AbstractAttribute::updateImpl(...). 6502 ChangeStatus updateImpl(Attributor &A) override { 6503 PrivatizableType = identifyPrivatizableType(A); 6504 if (!PrivatizableType.hasValue()) 6505 return ChangeStatus::UNCHANGED; 6506 if (!PrivatizableType.getValue()) 6507 return indicatePessimisticFixpoint(); 6508 6509 // The dependence is optional so we don't give up once we give up on the 6510 // alignment. 6511 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), 6512 DepClassTy::OPTIONAL); 6513 6514 // Avoid arguments with padding for now. 6515 if (!getIRPosition().hasAttr(Attribute::ByVal) && 6516 !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(), 6517 A.getInfoCache().getDL())) { 6518 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n"); 6519 return indicatePessimisticFixpoint(); 6520 } 6521 6522 // Collect the types that will replace the privatizable type in the function 6523 // signature. 6524 SmallVector<Type *, 16> ReplacementTypes; 6525 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 6526 6527 // Verify callee and caller agree on how the promoted argument would be 6528 // passed. 6529 Function &Fn = *getIRPosition().getAnchorScope(); 6530 const auto *TTI = 6531 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); 6532 if (!TTI) { 6533 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function " 6534 << Fn.getName() << "\n"); 6535 return indicatePessimisticFixpoint(); 6536 } 6537 6538 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6539 CallBase *CB = ACS.getInstruction(); 6540 return TTI->areTypesABICompatible( 6541 CB->getCaller(), CB->getCalledFunction(), ReplacementTypes); 6542 }; 6543 bool UsedAssumedInformation = false; 6544 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, 6545 UsedAssumedInformation)) { 6546 LLVM_DEBUG( 6547 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " 6548 << Fn.getName() << "\n"); 6549 return indicatePessimisticFixpoint(); 6550 } 6551 6552 // Register a rewrite of the argument. 6553 Argument *Arg = getAssociatedArgument(); 6554 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { 6555 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"); 6556 return indicatePessimisticFixpoint(); 6557 } 6558 6559 unsigned ArgNo = Arg->getArgNo(); 6560 6561 // Helper to check if for the given call site the associated argument is 6562 // passed to a callback where the privatization would be different. 6563 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { 6564 SmallVector<const Use *, 4> CallbackUses; 6565 AbstractCallSite::getCallbackUses(CB, CallbackUses); 6566 for (const Use *U : CallbackUses) { 6567 AbstractCallSite CBACS(U); 6568 assert(CBACS && CBACS.isCallbackCall()); 6569 for (Argument &CBArg : CBACS.getCalledFunction()->args()) { 6570 int CBArgNo = CBACS.getCallArgOperandNo(CBArg); 6571 6572 LLVM_DEBUG({ 6573 dbgs() 6574 << "[AAPrivatizablePtr] Argument " << *Arg 6575 << "check if can be privatized in the context of its parent (" 6576 << Arg->getParent()->getName() 6577 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6578 "callback (" 6579 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6580 << ")\n[AAPrivatizablePtr] " << CBArg << " : " 6581 << CBACS.getCallArgOperand(CBArg) << " vs " 6582 << CB.getArgOperand(ArgNo) << "\n" 6583 << "[AAPrivatizablePtr] " << CBArg << " : " 6584 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; 6585 }); 6586 6587 if (CBArgNo != int(ArgNo)) 6588 continue; 6589 const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6590 *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED); 6591 if (CBArgPrivAA.isValidState()) { 6592 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); 6593 if (!CBArgPrivTy.hasValue()) 6594 continue; 6595 if (CBArgPrivTy.getValue() == PrivatizableType) 6596 continue; 6597 } 6598 6599 LLVM_DEBUG({ 6600 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6601 << " cannot be privatized in the context of its parent (" 6602 << Arg->getParent()->getName() 6603 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6604 "callback (" 6605 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6606 << ").\n[AAPrivatizablePtr] for which the argument " 6607 "privatization is not compatible.\n"; 6608 }); 6609 return false; 6610 } 6611 } 6612 return true; 6613 }; 6614 6615 // Helper to check if for the given call site the associated argument is 6616 // passed to a direct call where the privatization would be different. 6617 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { 6618 CallBase *DC = cast<CallBase>(ACS.getInstruction()); 6619 int DCArgNo = ACS.getCallArgOperandNo(ArgNo); 6620 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() && 6621 "Expected a direct call operand for callback call operand"); 6622 6623 LLVM_DEBUG({ 6624 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6625 << " check if be privatized in the context of its parent (" 6626 << Arg->getParent()->getName() 6627 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6628 "direct call of (" 6629 << DCArgNo << "@" << DC->getCalledFunction()->getName() 6630 << ").\n"; 6631 }); 6632 6633 Function *DCCallee = DC->getCalledFunction(); 6634 if (unsigned(DCArgNo) < DCCallee->arg_size()) { 6635 const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6636 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)), 6637 DepClassTy::REQUIRED); 6638 if (DCArgPrivAA.isValidState()) { 6639 auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); 6640 if (!DCArgPrivTy.hasValue()) 6641 return true; 6642 if (DCArgPrivTy.getValue() == PrivatizableType) 6643 return true; 6644 } 6645 } 6646 6647 LLVM_DEBUG({ 6648 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6649 << " cannot be privatized in the context of its parent (" 6650 << Arg->getParent()->getName() 6651 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6652 "direct call of (" 6653 << ACS.getInstruction()->getCalledFunction()->getName() 6654 << ").\n[AAPrivatizablePtr] for which the argument " 6655 "privatization is not compatible.\n"; 6656 }); 6657 return false; 6658 }; 6659 6660 // Helper to check if the associated argument is used at the given abstract 6661 // call site in a way that is incompatible with the privatization assumed 6662 // here. 6663 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { 6664 if (ACS.isDirectCall()) 6665 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); 6666 if (ACS.isCallbackCall()) 6667 return IsCompatiblePrivArgOfDirectCS(ACS); 6668 return false; 6669 }; 6670 6671 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true, 6672 UsedAssumedInformation)) 6673 return indicatePessimisticFixpoint(); 6674 6675 return ChangeStatus::UNCHANGED; 6676 } 6677 6678 /// Given a type to private \p PrivType, collect the constituates (which are 6679 /// used) in \p ReplacementTypes. 6680 static void 6681 identifyReplacementTypes(Type *PrivType, 6682 SmallVectorImpl<Type *> &ReplacementTypes) { 6683 // TODO: For now we expand the privatization type to the fullest which can 6684 // lead to dead arguments that need to be removed later. 6685 assert(PrivType && "Expected privatizable type!"); 6686 6687 // Traverse the type, extract constituate types on the outermost level. 6688 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6689 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) 6690 ReplacementTypes.push_back(PrivStructType->getElementType(u)); 6691 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6692 ReplacementTypes.append(PrivArrayType->getNumElements(), 6693 PrivArrayType->getElementType()); 6694 } else { 6695 ReplacementTypes.push_back(PrivType); 6696 } 6697 } 6698 6699 /// Initialize \p Base according to the type \p PrivType at position \p IP. 6700 /// The values needed are taken from the arguments of \p F starting at 6701 /// position \p ArgNo. 6702 static void createInitialization(Type *PrivType, Value &Base, Function &F, 6703 unsigned ArgNo, Instruction &IP) { 6704 assert(PrivType && "Expected privatizable type!"); 6705 6706 IRBuilder<NoFolder> IRB(&IP); 6707 const DataLayout &DL = F.getParent()->getDataLayout(); 6708 6709 // Traverse the type, build GEPs and stores. 6710 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6711 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6712 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6713 Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo(); 6714 Value *Ptr = 6715 constructPointer(PointeeTy, PrivType, &Base, 6716 PrivStructLayout->getElementOffset(u), IRB, DL); 6717 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6718 } 6719 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6720 Type *PointeeTy = PrivArrayType->getElementType(); 6721 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6722 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6723 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6724 Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base, 6725 u * PointeeTySize, IRB, DL); 6726 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6727 } 6728 } else { 6729 new StoreInst(F.getArg(ArgNo), &Base, &IP); 6730 } 6731 } 6732 6733 /// Extract values from \p Base according to the type \p PrivType at the 6734 /// call position \p ACS. The values are appended to \p ReplacementValues. 6735 void createReplacementValues(Align Alignment, Type *PrivType, 6736 AbstractCallSite ACS, Value *Base, 6737 SmallVectorImpl<Value *> &ReplacementValues) { 6738 assert(Base && "Expected base value!"); 6739 assert(PrivType && "Expected privatizable type!"); 6740 Instruction *IP = ACS.getInstruction(); 6741 6742 IRBuilder<NoFolder> IRB(IP); 6743 const DataLayout &DL = IP->getModule()->getDataLayout(); 6744 6745 Type *PrivPtrType = PrivType->getPointerTo(); 6746 if (Base->getType() != PrivPtrType) 6747 Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 6748 Base, PrivPtrType, "", ACS.getInstruction()); 6749 6750 // Traverse the type, build GEPs and loads. 6751 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6752 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6753 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6754 Type *PointeeTy = PrivStructType->getElementType(u); 6755 Value *Ptr = 6756 constructPointer(PointeeTy->getPointerTo(), PrivType, Base, 6757 PrivStructLayout->getElementOffset(u), IRB, DL); 6758 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6759 L->setAlignment(Alignment); 6760 ReplacementValues.push_back(L); 6761 } 6762 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6763 Type *PointeeTy = PrivArrayType->getElementType(); 6764 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6765 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6766 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6767 Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base, 6768 u * PointeeTySize, IRB, DL); 6769 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6770 L->setAlignment(Alignment); 6771 ReplacementValues.push_back(L); 6772 } 6773 } else { 6774 LoadInst *L = new LoadInst(PrivType, Base, "", IP); 6775 L->setAlignment(Alignment); 6776 ReplacementValues.push_back(L); 6777 } 6778 } 6779 6780 /// See AbstractAttribute::manifest(...) 6781 ChangeStatus manifest(Attributor &A) override { 6782 if (!PrivatizableType.hasValue()) 6783 return ChangeStatus::UNCHANGED; 6784 assert(PrivatizableType.getValue() && "Expected privatizable type!"); 6785 6786 // Collect all tail calls in the function as we cannot allow new allocas to 6787 // escape into tail recursion. 6788 // TODO: Be smarter about new allocas escaping into tail calls. 6789 SmallVector<CallInst *, 16> TailCalls; 6790 bool UsedAssumedInformation = false; 6791 if (!A.checkForAllInstructions( 6792 [&](Instruction &I) { 6793 CallInst &CI = cast<CallInst>(I); 6794 if (CI.isTailCall()) 6795 TailCalls.push_back(&CI); 6796 return true; 6797 }, 6798 *this, {Instruction::Call}, UsedAssumedInformation)) 6799 return ChangeStatus::UNCHANGED; 6800 6801 Argument *Arg = getAssociatedArgument(); 6802 // Query AAAlign attribute for alignment of associated argument to 6803 // determine the best alignment of loads. 6804 const auto &AlignAA = 6805 A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE); 6806 6807 // Callback to repair the associated function. A new alloca is placed at the 6808 // beginning and initialized with the values passed through arguments. The 6809 // new alloca replaces the use of the old pointer argument. 6810 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = 6811 [=](const Attributor::ArgumentReplacementInfo &ARI, 6812 Function &ReplacementFn, Function::arg_iterator ArgIt) { 6813 BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); 6814 Instruction *IP = &*EntryBB.getFirstInsertionPt(); 6815 const DataLayout &DL = IP->getModule()->getDataLayout(); 6816 unsigned AS = DL.getAllocaAddrSpace(); 6817 Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS, 6818 Arg->getName() + ".priv", IP); 6819 createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn, 6820 ArgIt->getArgNo(), *IP); 6821 6822 if (AI->getType() != Arg->getType()) 6823 AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 6824 AI, Arg->getType(), "", IP); 6825 Arg->replaceAllUsesWith(AI); 6826 6827 for (CallInst *CI : TailCalls) 6828 CI->setTailCall(false); 6829 }; 6830 6831 // Callback to repair a call site of the associated function. The elements 6832 // of the privatizable type are loaded prior to the call and passed to the 6833 // new function version. 6834 Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = 6835 [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI, 6836 AbstractCallSite ACS, 6837 SmallVectorImpl<Value *> &NewArgOperands) { 6838 // When no alignment is specified for the load instruction, 6839 // natural alignment is assumed. 6840 createReplacementValues( 6841 assumeAligned(AlignAA.getAssumedAlign()), 6842 PrivatizableType.getValue(), ACS, 6843 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()), 6844 NewArgOperands); 6845 }; 6846 6847 // Collect the types that will replace the privatizable type in the function 6848 // signature. 6849 SmallVector<Type *, 16> ReplacementTypes; 6850 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 6851 6852 // Register a rewrite of the argument. 6853 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes, 6854 std::move(FnRepairCB), 6855 std::move(ACSRepairCB))) 6856 return ChangeStatus::CHANGED; 6857 return ChangeStatus::UNCHANGED; 6858 } 6859 6860 /// See AbstractAttribute::trackStatistics() 6861 void trackStatistics() const override { 6862 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr); 6863 } 6864 }; 6865 6866 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { 6867 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) 6868 : AAPrivatizablePtrImpl(IRP, A) {} 6869 6870 /// See AbstractAttribute::initialize(...). 6871 virtual void initialize(Attributor &A) override { 6872 // TODO: We can privatize more than arguments. 6873 indicatePessimisticFixpoint(); 6874 } 6875 6876 ChangeStatus updateImpl(Attributor &A) override { 6877 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" 6878 "updateImpl will not be called"); 6879 } 6880 6881 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6882 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6883 Value *Obj = getUnderlyingObject(&getAssociatedValue()); 6884 if (!Obj) { 6885 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n"); 6886 return nullptr; 6887 } 6888 6889 if (auto *AI = dyn_cast<AllocaInst>(Obj)) 6890 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 6891 if (CI->isOne()) 6892 return AI->getAllocatedType(); 6893 if (auto *Arg = dyn_cast<Argument>(Obj)) { 6894 auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>( 6895 *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED); 6896 if (PrivArgAA.isAssumedPrivatizablePtr()) 6897 return PrivArgAA.getPrivatizableType(); 6898 } 6899 6900 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " 6901 "alloca nor privatizable argument: " 6902 << *Obj << "!\n"); 6903 return nullptr; 6904 } 6905 6906 /// See AbstractAttribute::trackStatistics() 6907 void trackStatistics() const override { 6908 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr); 6909 } 6910 }; 6911 6912 struct AAPrivatizablePtrCallSiteArgument final 6913 : public AAPrivatizablePtrFloating { 6914 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) 6915 : AAPrivatizablePtrFloating(IRP, A) {} 6916 6917 /// See AbstractAttribute::initialize(...). 6918 void initialize(Attributor &A) override { 6919 if (getIRPosition().hasAttr(Attribute::ByVal)) 6920 indicateOptimisticFixpoint(); 6921 } 6922 6923 /// See AbstractAttribute::updateImpl(...). 6924 ChangeStatus updateImpl(Attributor &A) override { 6925 PrivatizableType = identifyPrivatizableType(A); 6926 if (!PrivatizableType.hasValue()) 6927 return ChangeStatus::UNCHANGED; 6928 if (!PrivatizableType.getValue()) 6929 return indicatePessimisticFixpoint(); 6930 6931 const IRPosition &IRP = getIRPosition(); 6932 auto &NoCaptureAA = 6933 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED); 6934 if (!NoCaptureAA.isAssumedNoCapture()) { 6935 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n"); 6936 return indicatePessimisticFixpoint(); 6937 } 6938 6939 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED); 6940 if (!NoAliasAA.isAssumedNoAlias()) { 6941 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n"); 6942 return indicatePessimisticFixpoint(); 6943 } 6944 6945 bool IsKnown; 6946 if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) { 6947 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n"); 6948 return indicatePessimisticFixpoint(); 6949 } 6950 6951 return ChangeStatus::UNCHANGED; 6952 } 6953 6954 /// See AbstractAttribute::trackStatistics() 6955 void trackStatistics() const override { 6956 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr); 6957 } 6958 }; 6959 6960 struct AAPrivatizablePtrCallSiteReturned final 6961 : public AAPrivatizablePtrFloating { 6962 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) 6963 : AAPrivatizablePtrFloating(IRP, A) {} 6964 6965 /// See AbstractAttribute::initialize(...). 6966 void initialize(Attributor &A) override { 6967 // TODO: We can privatize more than arguments. 6968 indicatePessimisticFixpoint(); 6969 } 6970 6971 /// See AbstractAttribute::trackStatistics() 6972 void trackStatistics() const override { 6973 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr); 6974 } 6975 }; 6976 6977 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { 6978 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) 6979 : AAPrivatizablePtrFloating(IRP, A) {} 6980 6981 /// See AbstractAttribute::initialize(...). 6982 void initialize(Attributor &A) override { 6983 // TODO: We can privatize more than arguments. 6984 indicatePessimisticFixpoint(); 6985 } 6986 6987 /// See AbstractAttribute::trackStatistics() 6988 void trackStatistics() const override { 6989 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr); 6990 } 6991 }; 6992 } // namespace 6993 6994 /// -------------------- Memory Behavior Attributes ---------------------------- 6995 /// Includes read-none, read-only, and write-only. 6996 /// ---------------------------------------------------------------------------- 6997 namespace { 6998 struct AAMemoryBehaviorImpl : public AAMemoryBehavior { 6999 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) 7000 : AAMemoryBehavior(IRP, A) {} 7001 7002 /// See AbstractAttribute::initialize(...). 7003 void initialize(Attributor &A) override { 7004 intersectAssumedBits(BEST_STATE); 7005 getKnownStateFromValue(getIRPosition(), getState()); 7006 AAMemoryBehavior::initialize(A); 7007 } 7008 7009 /// Return the memory behavior information encoded in the IR for \p IRP. 7010 static void getKnownStateFromValue(const IRPosition &IRP, 7011 BitIntegerState &State, 7012 bool IgnoreSubsumingPositions = false) { 7013 SmallVector<Attribute, 2> Attrs; 7014 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 7015 for (const Attribute &Attr : Attrs) { 7016 switch (Attr.getKindAsEnum()) { 7017 case Attribute::ReadNone: 7018 State.addKnownBits(NO_ACCESSES); 7019 break; 7020 case Attribute::ReadOnly: 7021 State.addKnownBits(NO_WRITES); 7022 break; 7023 case Attribute::WriteOnly: 7024 State.addKnownBits(NO_READS); 7025 break; 7026 default: 7027 llvm_unreachable("Unexpected attribute!"); 7028 } 7029 } 7030 7031 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) { 7032 if (!I->mayReadFromMemory()) 7033 State.addKnownBits(NO_READS); 7034 if (!I->mayWriteToMemory()) 7035 State.addKnownBits(NO_WRITES); 7036 } 7037 } 7038 7039 /// See AbstractAttribute::getDeducedAttributes(...). 7040 void getDeducedAttributes(LLVMContext &Ctx, 7041 SmallVectorImpl<Attribute> &Attrs) const override { 7042 assert(Attrs.size() == 0); 7043 if (isAssumedReadNone()) 7044 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 7045 else if (isAssumedReadOnly()) 7046 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly)); 7047 else if (isAssumedWriteOnly()) 7048 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly)); 7049 assert(Attrs.size() <= 1); 7050 } 7051 7052 /// See AbstractAttribute::manifest(...). 7053 ChangeStatus manifest(Attributor &A) override { 7054 if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true)) 7055 return ChangeStatus::UNCHANGED; 7056 7057 const IRPosition &IRP = getIRPosition(); 7058 7059 // Check if we would improve the existing attributes first. 7060 SmallVector<Attribute, 4> DeducedAttrs; 7061 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 7062 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 7063 return IRP.hasAttr(Attr.getKindAsEnum(), 7064 /* IgnoreSubsumingPositions */ true); 7065 })) 7066 return ChangeStatus::UNCHANGED; 7067 7068 // Clear existing attributes. 7069 IRP.removeAttrs(AttrKinds); 7070 7071 // Use the generic manifest method. 7072 return IRAttribute::manifest(A); 7073 } 7074 7075 /// See AbstractState::getAsStr(). 7076 const std::string getAsStr() const override { 7077 if (isAssumedReadNone()) 7078 return "readnone"; 7079 if (isAssumedReadOnly()) 7080 return "readonly"; 7081 if (isAssumedWriteOnly()) 7082 return "writeonly"; 7083 return "may-read/write"; 7084 } 7085 7086 /// The set of IR attributes AAMemoryBehavior deals with. 7087 static const Attribute::AttrKind AttrKinds[3]; 7088 }; 7089 7090 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { 7091 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; 7092 7093 /// Memory behavior attribute for a floating value. 7094 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { 7095 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) 7096 : AAMemoryBehaviorImpl(IRP, A) {} 7097 7098 /// See AbstractAttribute::updateImpl(...). 7099 ChangeStatus updateImpl(Attributor &A) override; 7100 7101 /// See AbstractAttribute::trackStatistics() 7102 void trackStatistics() const override { 7103 if (isAssumedReadNone()) 7104 STATS_DECLTRACK_FLOATING_ATTR(readnone) 7105 else if (isAssumedReadOnly()) 7106 STATS_DECLTRACK_FLOATING_ATTR(readonly) 7107 else if (isAssumedWriteOnly()) 7108 STATS_DECLTRACK_FLOATING_ATTR(writeonly) 7109 } 7110 7111 private: 7112 /// Return true if users of \p UserI might access the underlying 7113 /// variable/location described by \p U and should therefore be analyzed. 7114 bool followUsersOfUseIn(Attributor &A, const Use &U, 7115 const Instruction *UserI); 7116 7117 /// Update the state according to the effect of use \p U in \p UserI. 7118 void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI); 7119 }; 7120 7121 /// Memory behavior attribute for function argument. 7122 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { 7123 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) 7124 : AAMemoryBehaviorFloating(IRP, A) {} 7125 7126 /// See AbstractAttribute::initialize(...). 7127 void initialize(Attributor &A) override { 7128 intersectAssumedBits(BEST_STATE); 7129 const IRPosition &IRP = getIRPosition(); 7130 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we 7131 // can query it when we use has/getAttr. That would allow us to reuse the 7132 // initialize of the base class here. 7133 bool HasByVal = 7134 IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true); 7135 getKnownStateFromValue(IRP, getState(), 7136 /* IgnoreSubsumingPositions */ HasByVal); 7137 7138 // Initialize the use vector with all direct uses of the associated value. 7139 Argument *Arg = getAssociatedArgument(); 7140 if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) 7141 indicatePessimisticFixpoint(); 7142 } 7143 7144 ChangeStatus manifest(Attributor &A) override { 7145 // TODO: Pointer arguments are not supported on vectors of pointers yet. 7146 if (!getAssociatedValue().getType()->isPointerTy()) 7147 return ChangeStatus::UNCHANGED; 7148 7149 // TODO: From readattrs.ll: "inalloca parameters are always 7150 // considered written" 7151 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) { 7152 removeKnownBits(NO_WRITES); 7153 removeAssumedBits(NO_WRITES); 7154 } 7155 return AAMemoryBehaviorFloating::manifest(A); 7156 } 7157 7158 /// See AbstractAttribute::trackStatistics() 7159 void trackStatistics() const override { 7160 if (isAssumedReadNone()) 7161 STATS_DECLTRACK_ARG_ATTR(readnone) 7162 else if (isAssumedReadOnly()) 7163 STATS_DECLTRACK_ARG_ATTR(readonly) 7164 else if (isAssumedWriteOnly()) 7165 STATS_DECLTRACK_ARG_ATTR(writeonly) 7166 } 7167 }; 7168 7169 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { 7170 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) 7171 : AAMemoryBehaviorArgument(IRP, A) {} 7172 7173 /// See AbstractAttribute::initialize(...). 7174 void initialize(Attributor &A) override { 7175 // If we don't have an associated attribute this is either a variadic call 7176 // or an indirect call, either way, nothing to do here. 7177 Argument *Arg = getAssociatedArgument(); 7178 if (!Arg) { 7179 indicatePessimisticFixpoint(); 7180 return; 7181 } 7182 if (Arg->hasByValAttr()) { 7183 addKnownBits(NO_WRITES); 7184 removeKnownBits(NO_READS); 7185 removeAssumedBits(NO_READS); 7186 } 7187 AAMemoryBehaviorArgument::initialize(A); 7188 if (getAssociatedFunction()->isDeclaration()) 7189 indicatePessimisticFixpoint(); 7190 } 7191 7192 /// See AbstractAttribute::updateImpl(...). 7193 ChangeStatus updateImpl(Attributor &A) override { 7194 // TODO: Once we have call site specific value information we can provide 7195 // call site specific liveness liveness information and then it makes 7196 // sense to specialize attributes for call sites arguments instead of 7197 // redirecting requests to the callee argument. 7198 Argument *Arg = getAssociatedArgument(); 7199 const IRPosition &ArgPos = IRPosition::argument(*Arg); 7200 auto &ArgAA = 7201 A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED); 7202 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 7203 } 7204 7205 /// See AbstractAttribute::trackStatistics() 7206 void trackStatistics() const override { 7207 if (isAssumedReadNone()) 7208 STATS_DECLTRACK_CSARG_ATTR(readnone) 7209 else if (isAssumedReadOnly()) 7210 STATS_DECLTRACK_CSARG_ATTR(readonly) 7211 else if (isAssumedWriteOnly()) 7212 STATS_DECLTRACK_CSARG_ATTR(writeonly) 7213 } 7214 }; 7215 7216 /// Memory behavior attribute for a call site return position. 7217 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { 7218 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) 7219 : AAMemoryBehaviorFloating(IRP, A) {} 7220 7221 /// See AbstractAttribute::initialize(...). 7222 void initialize(Attributor &A) override { 7223 AAMemoryBehaviorImpl::initialize(A); 7224 Function *F = getAssociatedFunction(); 7225 if (!F || F->isDeclaration()) 7226 indicatePessimisticFixpoint(); 7227 } 7228 7229 /// See AbstractAttribute::manifest(...). 7230 ChangeStatus manifest(Attributor &A) override { 7231 // We do not annotate returned values. 7232 return ChangeStatus::UNCHANGED; 7233 } 7234 7235 /// See AbstractAttribute::trackStatistics() 7236 void trackStatistics() const override {} 7237 }; 7238 7239 /// An AA to represent the memory behavior function attributes. 7240 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { 7241 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) 7242 : AAMemoryBehaviorImpl(IRP, A) {} 7243 7244 /// See AbstractAttribute::updateImpl(Attributor &A). 7245 virtual ChangeStatus updateImpl(Attributor &A) override; 7246 7247 /// See AbstractAttribute::manifest(...). 7248 ChangeStatus manifest(Attributor &A) override { 7249 Function &F = cast<Function>(getAnchorValue()); 7250 if (isAssumedReadNone()) { 7251 F.removeFnAttr(Attribute::ArgMemOnly); 7252 F.removeFnAttr(Attribute::InaccessibleMemOnly); 7253 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); 7254 } 7255 return AAMemoryBehaviorImpl::manifest(A); 7256 } 7257 7258 /// See AbstractAttribute::trackStatistics() 7259 void trackStatistics() const override { 7260 if (isAssumedReadNone()) 7261 STATS_DECLTRACK_FN_ATTR(readnone) 7262 else if (isAssumedReadOnly()) 7263 STATS_DECLTRACK_FN_ATTR(readonly) 7264 else if (isAssumedWriteOnly()) 7265 STATS_DECLTRACK_FN_ATTR(writeonly) 7266 } 7267 }; 7268 7269 /// AAMemoryBehavior attribute for call sites. 7270 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl { 7271 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) 7272 : AAMemoryBehaviorImpl(IRP, A) {} 7273 7274 /// See AbstractAttribute::initialize(...). 7275 void initialize(Attributor &A) override { 7276 AAMemoryBehaviorImpl::initialize(A); 7277 Function *F = getAssociatedFunction(); 7278 if (!F || F->isDeclaration()) 7279 indicatePessimisticFixpoint(); 7280 } 7281 7282 /// See AbstractAttribute::updateImpl(...). 7283 ChangeStatus updateImpl(Attributor &A) override { 7284 // TODO: Once we have call site specific value information we can provide 7285 // call site specific liveness liveness information and then it makes 7286 // sense to specialize attributes for call sites arguments instead of 7287 // redirecting requests to the callee argument. 7288 Function *F = getAssociatedFunction(); 7289 const IRPosition &FnPos = IRPosition::function(*F); 7290 auto &FnAA = 7291 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED); 7292 return clampStateAndIndicateChange(getState(), FnAA.getState()); 7293 } 7294 7295 /// See AbstractAttribute::trackStatistics() 7296 void trackStatistics() const override { 7297 if (isAssumedReadNone()) 7298 STATS_DECLTRACK_CS_ATTR(readnone) 7299 else if (isAssumedReadOnly()) 7300 STATS_DECLTRACK_CS_ATTR(readonly) 7301 else if (isAssumedWriteOnly()) 7302 STATS_DECLTRACK_CS_ATTR(writeonly) 7303 } 7304 }; 7305 7306 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { 7307 7308 // The current assumed state used to determine a change. 7309 auto AssumedState = getAssumed(); 7310 7311 auto CheckRWInst = [&](Instruction &I) { 7312 // If the instruction has an own memory behavior state, use it to restrict 7313 // the local state. No further analysis is required as the other memory 7314 // state is as optimistic as it gets. 7315 if (const auto *CB = dyn_cast<CallBase>(&I)) { 7316 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 7317 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 7318 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7319 return !isAtFixpoint(); 7320 } 7321 7322 // Remove access kind modifiers if necessary. 7323 if (I.mayReadFromMemory()) 7324 removeAssumedBits(NO_READS); 7325 if (I.mayWriteToMemory()) 7326 removeAssumedBits(NO_WRITES); 7327 return !isAtFixpoint(); 7328 }; 7329 7330 bool UsedAssumedInformation = false; 7331 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 7332 UsedAssumedInformation)) 7333 return indicatePessimisticFixpoint(); 7334 7335 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7336 : ChangeStatus::UNCHANGED; 7337 } 7338 7339 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { 7340 7341 const IRPosition &IRP = getIRPosition(); 7342 const IRPosition &FnPos = IRPosition::function_scope(IRP); 7343 AAMemoryBehavior::StateType &S = getState(); 7344 7345 // First, check the function scope. We take the known information and we avoid 7346 // work if the assumed information implies the current assumed information for 7347 // this attribute. This is a valid for all but byval arguments. 7348 Argument *Arg = IRP.getAssociatedArgument(); 7349 AAMemoryBehavior::base_t FnMemAssumedState = 7350 AAMemoryBehavior::StateType::getWorstState(); 7351 if (!Arg || !Arg->hasByValAttr()) { 7352 const auto &FnMemAA = 7353 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL); 7354 FnMemAssumedState = FnMemAA.getAssumed(); 7355 S.addKnownBits(FnMemAA.getKnown()); 7356 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed()) 7357 return ChangeStatus::UNCHANGED; 7358 } 7359 7360 // The current assumed state used to determine a change. 7361 auto AssumedState = S.getAssumed(); 7362 7363 // Make sure the value is not captured (except through "return"), if 7364 // it is, any information derived would be irrelevant anyway as we cannot 7365 // check the potential aliases introduced by the capture. However, no need 7366 // to fall back to anythign less optimistic than the function state. 7367 const auto &ArgNoCaptureAA = 7368 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL); 7369 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 7370 S.intersectAssumedBits(FnMemAssumedState); 7371 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7372 : ChangeStatus::UNCHANGED; 7373 } 7374 7375 // Visit and expand uses until all are analyzed or a fixpoint is reached. 7376 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 7377 Instruction *UserI = cast<Instruction>(U.getUser()); 7378 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI 7379 << " \n"); 7380 7381 // Droppable users, e.g., llvm::assume does not actually perform any action. 7382 if (UserI->isDroppable()) 7383 return true; 7384 7385 // Check if the users of UserI should also be visited. 7386 Follow = followUsersOfUseIn(A, U, UserI); 7387 7388 // If UserI might touch memory we analyze the use in detail. 7389 if (UserI->mayReadOrWriteMemory()) 7390 analyzeUseIn(A, U, UserI); 7391 7392 return !isAtFixpoint(); 7393 }; 7394 7395 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) 7396 return indicatePessimisticFixpoint(); 7397 7398 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7399 : ChangeStatus::UNCHANGED; 7400 } 7401 7402 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U, 7403 const Instruction *UserI) { 7404 // The loaded value is unrelated to the pointer argument, no need to 7405 // follow the users of the load. 7406 if (isa<LoadInst>(UserI)) 7407 return false; 7408 7409 // By default we follow all uses assuming UserI might leak information on U, 7410 // we have special handling for call sites operands though. 7411 const auto *CB = dyn_cast<CallBase>(UserI); 7412 if (!CB || !CB->isArgOperand(&U)) 7413 return true; 7414 7415 // If the use is a call argument known not to be captured, the users of 7416 // the call do not need to be visited because they have to be unrelated to 7417 // the input. Note that this check is not trivial even though we disallow 7418 // general capturing of the underlying argument. The reason is that the 7419 // call might the argument "through return", which we allow and for which we 7420 // need to check call users. 7421 if (U.get()->getType()->isPointerTy()) { 7422 unsigned ArgNo = CB->getArgOperandNo(&U); 7423 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 7424 *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL); 7425 return !ArgNoCaptureAA.isAssumedNoCapture(); 7426 } 7427 7428 return true; 7429 } 7430 7431 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U, 7432 const Instruction *UserI) { 7433 assert(UserI->mayReadOrWriteMemory()); 7434 7435 switch (UserI->getOpcode()) { 7436 default: 7437 // TODO: Handle all atomics and other side-effect operations we know of. 7438 break; 7439 case Instruction::Load: 7440 // Loads cause the NO_READS property to disappear. 7441 removeAssumedBits(NO_READS); 7442 return; 7443 7444 case Instruction::Store: 7445 // Stores cause the NO_WRITES property to disappear if the use is the 7446 // pointer operand. Note that while capturing was taken care of somewhere 7447 // else we need to deal with stores of the value that is not looked through. 7448 if (cast<StoreInst>(UserI)->getPointerOperand() == U.get()) 7449 removeAssumedBits(NO_WRITES); 7450 else 7451 indicatePessimisticFixpoint(); 7452 return; 7453 7454 case Instruction::Call: 7455 case Instruction::CallBr: 7456 case Instruction::Invoke: { 7457 // For call sites we look at the argument memory behavior attribute (this 7458 // could be recursive!) in order to restrict our own state. 7459 const auto *CB = cast<CallBase>(UserI); 7460 7461 // Give up on operand bundles. 7462 if (CB->isBundleOperand(&U)) { 7463 indicatePessimisticFixpoint(); 7464 return; 7465 } 7466 7467 // Calling a function does read the function pointer, maybe write it if the 7468 // function is self-modifying. 7469 if (CB->isCallee(&U)) { 7470 removeAssumedBits(NO_READS); 7471 break; 7472 } 7473 7474 // Adjust the possible access behavior based on the information on the 7475 // argument. 7476 IRPosition Pos; 7477 if (U.get()->getType()->isPointerTy()) 7478 Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)); 7479 else 7480 Pos = IRPosition::callsite_function(*CB); 7481 const auto &MemBehaviorAA = 7482 A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL); 7483 // "assumed" has at most the same bits as the MemBehaviorAA assumed 7484 // and at least "known". 7485 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7486 return; 7487 } 7488 }; 7489 7490 // Generally, look at the "may-properties" and adjust the assumed state if we 7491 // did not trigger special handling before. 7492 if (UserI->mayReadFromMemory()) 7493 removeAssumedBits(NO_READS); 7494 if (UserI->mayWriteToMemory()) 7495 removeAssumedBits(NO_WRITES); 7496 } 7497 } // namespace 7498 7499 /// -------------------- Memory Locations Attributes --------------------------- 7500 /// Includes read-none, argmemonly, inaccessiblememonly, 7501 /// inaccessiblememorargmemonly 7502 /// ---------------------------------------------------------------------------- 7503 7504 std::string AAMemoryLocation::getMemoryLocationsAsStr( 7505 AAMemoryLocation::MemoryLocationsKind MLK) { 7506 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) 7507 return "all memory"; 7508 if (MLK == AAMemoryLocation::NO_LOCATIONS) 7509 return "no memory"; 7510 std::string S = "memory:"; 7511 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) 7512 S += "stack,"; 7513 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) 7514 S += "constant,"; 7515 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) 7516 S += "internal global,"; 7517 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) 7518 S += "external global,"; 7519 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) 7520 S += "argument,"; 7521 if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) 7522 S += "inaccessible,"; 7523 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) 7524 S += "malloced,"; 7525 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) 7526 S += "unknown,"; 7527 S.pop_back(); 7528 return S; 7529 } 7530 7531 namespace { 7532 struct AAMemoryLocationImpl : public AAMemoryLocation { 7533 7534 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) 7535 : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { 7536 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7537 AccessKind2Accesses[u] = nullptr; 7538 } 7539 7540 ~AAMemoryLocationImpl() { 7541 // The AccessSets are allocated via a BumpPtrAllocator, we call 7542 // the destructor manually. 7543 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7544 if (AccessKind2Accesses[u]) 7545 AccessKind2Accesses[u]->~AccessSet(); 7546 } 7547 7548 /// See AbstractAttribute::initialize(...). 7549 void initialize(Attributor &A) override { 7550 intersectAssumedBits(BEST_STATE); 7551 getKnownStateFromValue(A, getIRPosition(), getState()); 7552 AAMemoryLocation::initialize(A); 7553 } 7554 7555 /// Return the memory behavior information encoded in the IR for \p IRP. 7556 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, 7557 BitIntegerState &State, 7558 bool IgnoreSubsumingPositions = false) { 7559 // For internal functions we ignore `argmemonly` and 7560 // `inaccessiblememorargmemonly` as we might break it via interprocedural 7561 // constant propagation. It is unclear if this is the best way but it is 7562 // unlikely this will cause real performance problems. If we are deriving 7563 // attributes for the anchor function we even remove the attribute in 7564 // addition to ignoring it. 7565 bool UseArgMemOnly = true; 7566 Function *AnchorFn = IRP.getAnchorScope(); 7567 if (AnchorFn && A.isRunOn(*AnchorFn)) 7568 UseArgMemOnly = !AnchorFn->hasLocalLinkage(); 7569 7570 SmallVector<Attribute, 2> Attrs; 7571 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 7572 for (const Attribute &Attr : Attrs) { 7573 switch (Attr.getKindAsEnum()) { 7574 case Attribute::ReadNone: 7575 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM); 7576 break; 7577 case Attribute::InaccessibleMemOnly: 7578 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 7579 break; 7580 case Attribute::ArgMemOnly: 7581 if (UseArgMemOnly) 7582 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true)); 7583 else 7584 IRP.removeAttrs({Attribute::ArgMemOnly}); 7585 break; 7586 case Attribute::InaccessibleMemOrArgMemOnly: 7587 if (UseArgMemOnly) 7588 State.addKnownBits(inverseLocation( 7589 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 7590 else 7591 IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly}); 7592 break; 7593 default: 7594 llvm_unreachable("Unexpected attribute!"); 7595 } 7596 } 7597 } 7598 7599 /// See AbstractAttribute::getDeducedAttributes(...). 7600 void getDeducedAttributes(LLVMContext &Ctx, 7601 SmallVectorImpl<Attribute> &Attrs) const override { 7602 assert(Attrs.size() == 0); 7603 if (isAssumedReadNone()) { 7604 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 7605 } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { 7606 if (isAssumedInaccessibleMemOnly()) 7607 Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly)); 7608 else if (isAssumedArgMemOnly()) 7609 Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly)); 7610 else if (isAssumedInaccessibleOrArgMemOnly()) 7611 Attrs.push_back( 7612 Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly)); 7613 } 7614 assert(Attrs.size() <= 1); 7615 } 7616 7617 /// See AbstractAttribute::manifest(...). 7618 ChangeStatus manifest(Attributor &A) override { 7619 const IRPosition &IRP = getIRPosition(); 7620 7621 // Check if we would improve the existing attributes first. 7622 SmallVector<Attribute, 4> DeducedAttrs; 7623 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 7624 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 7625 return IRP.hasAttr(Attr.getKindAsEnum(), 7626 /* IgnoreSubsumingPositions */ true); 7627 })) 7628 return ChangeStatus::UNCHANGED; 7629 7630 // Clear existing attributes. 7631 IRP.removeAttrs(AttrKinds); 7632 if (isAssumedReadNone()) 7633 IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds); 7634 7635 // Use the generic manifest method. 7636 return IRAttribute::manifest(A); 7637 } 7638 7639 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). 7640 bool checkForAllAccessesToMemoryKind( 7641 function_ref<bool(const Instruction *, const Value *, AccessKind, 7642 MemoryLocationsKind)> 7643 Pred, 7644 MemoryLocationsKind RequestedMLK) const override { 7645 if (!isValidState()) 7646 return false; 7647 7648 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); 7649 if (AssumedMLK == NO_LOCATIONS) 7650 return true; 7651 7652 unsigned Idx = 0; 7653 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; 7654 CurMLK *= 2, ++Idx) { 7655 if (CurMLK & RequestedMLK) 7656 continue; 7657 7658 if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) 7659 for (const AccessInfo &AI : *Accesses) 7660 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) 7661 return false; 7662 } 7663 7664 return true; 7665 } 7666 7667 ChangeStatus indicatePessimisticFixpoint() override { 7668 // If we give up and indicate a pessimistic fixpoint this instruction will 7669 // become an access for all potential access kinds: 7670 // TODO: Add pointers for argmemonly and globals to improve the results of 7671 // checkForAllAccessesToMemoryKind. 7672 bool Changed = false; 7673 MemoryLocationsKind KnownMLK = getKnown(); 7674 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 7675 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) 7676 if (!(CurMLK & KnownMLK)) 7677 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed, 7678 getAccessKindFromInst(I)); 7679 return AAMemoryLocation::indicatePessimisticFixpoint(); 7680 } 7681 7682 protected: 7683 /// Helper struct to tie together an instruction that has a read or write 7684 /// effect with the pointer it accesses (if any). 7685 struct AccessInfo { 7686 7687 /// The instruction that caused the access. 7688 const Instruction *I; 7689 7690 /// The base pointer that is accessed, or null if unknown. 7691 const Value *Ptr; 7692 7693 /// The kind of access (read/write/read+write). 7694 AccessKind Kind; 7695 7696 bool operator==(const AccessInfo &RHS) const { 7697 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; 7698 } 7699 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { 7700 if (LHS.I != RHS.I) 7701 return LHS.I < RHS.I; 7702 if (LHS.Ptr != RHS.Ptr) 7703 return LHS.Ptr < RHS.Ptr; 7704 if (LHS.Kind != RHS.Kind) 7705 return LHS.Kind < RHS.Kind; 7706 return false; 7707 } 7708 }; 7709 7710 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the 7711 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. 7712 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; 7713 AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()]; 7714 7715 /// Categorize the pointer arguments of CB that might access memory in 7716 /// AccessedLoc and update the state and access map accordingly. 7717 void 7718 categorizeArgumentPointerLocations(Attributor &A, CallBase &CB, 7719 AAMemoryLocation::StateType &AccessedLocs, 7720 bool &Changed); 7721 7722 /// Return the kind(s) of location that may be accessed by \p V. 7723 AAMemoryLocation::MemoryLocationsKind 7724 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); 7725 7726 /// Return the access kind as determined by \p I. 7727 AccessKind getAccessKindFromInst(const Instruction *I) { 7728 AccessKind AK = READ_WRITE; 7729 if (I) { 7730 AK = I->mayReadFromMemory() ? READ : NONE; 7731 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); 7732 } 7733 return AK; 7734 } 7735 7736 /// Update the state \p State and the AccessKind2Accesses given that \p I is 7737 /// an access of kind \p AK to a \p MLK memory location with the access 7738 /// pointer \p Ptr. 7739 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, 7740 MemoryLocationsKind MLK, const Instruction *I, 7741 const Value *Ptr, bool &Changed, 7742 AccessKind AK = READ_WRITE) { 7743 7744 assert(isPowerOf2_32(MLK) && "Expected a single location set!"); 7745 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)]; 7746 if (!Accesses) 7747 Accesses = new (Allocator) AccessSet(); 7748 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second; 7749 State.removeAssumedBits(MLK); 7750 } 7751 7752 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or 7753 /// arguments, and update the state and access map accordingly. 7754 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, 7755 AAMemoryLocation::StateType &State, bool &Changed); 7756 7757 /// Used to allocate access sets. 7758 BumpPtrAllocator &Allocator; 7759 7760 /// The set of IR attributes AAMemoryLocation deals with. 7761 static const Attribute::AttrKind AttrKinds[4]; 7762 }; 7763 7764 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = { 7765 Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly, 7766 Attribute::InaccessibleMemOrArgMemOnly}; 7767 7768 void AAMemoryLocationImpl::categorizePtrValue( 7769 Attributor &A, const Instruction &I, const Value &Ptr, 7770 AAMemoryLocation::StateType &State, bool &Changed) { 7771 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for " 7772 << Ptr << " [" 7773 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n"); 7774 7775 SmallVector<Value *, 8> Objects; 7776 bool UsedAssumedInformation = false; 7777 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I, 7778 UsedAssumedInformation, 7779 /* Intraprocedural */ true)) { 7780 LLVM_DEBUG( 7781 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"); 7782 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed, 7783 getAccessKindFromInst(&I)); 7784 return; 7785 } 7786 7787 for (Value *Obj : Objects) { 7788 // TODO: recognize the TBAA used for constant accesses. 7789 MemoryLocationsKind MLK = NO_LOCATIONS; 7790 if (isa<UndefValue>(Obj)) 7791 continue; 7792 if (isa<Argument>(Obj)) { 7793 // TODO: For now we do not treat byval arguments as local copies performed 7794 // on the call edge, though, we should. To make that happen we need to 7795 // teach various passes, e.g., DSE, about the copy effect of a byval. That 7796 // would also allow us to mark functions only accessing byval arguments as 7797 // readnone again, atguably their acceses have no effect outside of the 7798 // function, like accesses to allocas. 7799 MLK = NO_ARGUMENT_MEM; 7800 } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) { 7801 // Reading constant memory is not treated as a read "effect" by the 7802 // function attr pass so we won't neither. Constants defined by TBAA are 7803 // similar. (We know we do not write it because it is constant.) 7804 if (auto *GVar = dyn_cast<GlobalVariable>(GV)) 7805 if (GVar->isConstant()) 7806 continue; 7807 7808 if (GV->hasLocalLinkage()) 7809 MLK = NO_GLOBAL_INTERNAL_MEM; 7810 else 7811 MLK = NO_GLOBAL_EXTERNAL_MEM; 7812 } else if (isa<ConstantPointerNull>(Obj) && 7813 !NullPointerIsDefined(getAssociatedFunction(), 7814 Ptr.getType()->getPointerAddressSpace())) { 7815 continue; 7816 } else if (isa<AllocaInst>(Obj)) { 7817 MLK = NO_LOCAL_MEM; 7818 } else if (const auto *CB = dyn_cast<CallBase>(Obj)) { 7819 const auto &NoAliasAA = A.getAAFor<AANoAlias>( 7820 *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL); 7821 if (NoAliasAA.isAssumedNoAlias()) 7822 MLK = NO_MALLOCED_MEM; 7823 else 7824 MLK = NO_UNKOWN_MEM; 7825 } else { 7826 MLK = NO_UNKOWN_MEM; 7827 } 7828 7829 assert(MLK != NO_LOCATIONS && "No location specified!"); 7830 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: " 7831 << *Obj << " -> " << getMemoryLocationsAsStr(MLK) 7832 << "\n"); 7833 updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed, 7834 getAccessKindFromInst(&I)); 7835 } 7836 7837 LLVM_DEBUG( 7838 dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: " 7839 << getMemoryLocationsAsStr(State.getAssumed()) << "\n"); 7840 } 7841 7842 void AAMemoryLocationImpl::categorizeArgumentPointerLocations( 7843 Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs, 7844 bool &Changed) { 7845 for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) { 7846 7847 // Skip non-pointer arguments. 7848 const Value *ArgOp = CB.getArgOperand(ArgNo); 7849 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 7850 continue; 7851 7852 // Skip readnone arguments. 7853 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo); 7854 const auto &ArgOpMemLocationAA = 7855 A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL); 7856 7857 if (ArgOpMemLocationAA.isAssumedReadNone()) 7858 continue; 7859 7860 // Categorize potentially accessed pointer arguments as if there was an 7861 // access instruction with them as pointer. 7862 categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed); 7863 } 7864 } 7865 7866 AAMemoryLocation::MemoryLocationsKind 7867 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, 7868 bool &Changed) { 7869 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for " 7870 << I << "\n"); 7871 7872 AAMemoryLocation::StateType AccessedLocs; 7873 AccessedLocs.intersectAssumedBits(NO_LOCATIONS); 7874 7875 if (auto *CB = dyn_cast<CallBase>(&I)) { 7876 7877 // First check if we assume any memory is access is visible. 7878 const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>( 7879 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 7880 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I 7881 << " [" << CBMemLocationAA << "]\n"); 7882 7883 if (CBMemLocationAA.isAssumedReadNone()) 7884 return NO_LOCATIONS; 7885 7886 if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) { 7887 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr, 7888 Changed, getAccessKindFromInst(&I)); 7889 return AccessedLocs.getAssumed(); 7890 } 7891 7892 uint32_t CBAssumedNotAccessedLocs = 7893 CBMemLocationAA.getAssumedNotAccessedLocation(); 7894 7895 // Set the argmemonly and global bit as we handle them separately below. 7896 uint32_t CBAssumedNotAccessedLocsNoArgMem = 7897 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; 7898 7899 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { 7900 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) 7901 continue; 7902 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed, 7903 getAccessKindFromInst(&I)); 7904 } 7905 7906 // Now handle global memory if it might be accessed. This is slightly tricky 7907 // as NO_GLOBAL_MEM has multiple bits set. 7908 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); 7909 if (HasGlobalAccesses) { 7910 auto AccessPred = [&](const Instruction *, const Value *Ptr, 7911 AccessKind Kind, MemoryLocationsKind MLK) { 7912 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed, 7913 getAccessKindFromInst(&I)); 7914 return true; 7915 }; 7916 if (!CBMemLocationAA.checkForAllAccessesToMemoryKind( 7917 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false))) 7918 return AccessedLocs.getWorstState(); 7919 } 7920 7921 LLVM_DEBUG( 7922 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " 7923 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 7924 7925 // Now handle argument memory if it might be accessed. 7926 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); 7927 if (HasArgAccesses) 7928 categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed); 7929 7930 LLVM_DEBUG( 7931 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " 7932 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 7933 7934 return AccessedLocs.getAssumed(); 7935 } 7936 7937 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) { 7938 LLVM_DEBUG( 7939 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " 7940 << I << " [" << *Ptr << "]\n"); 7941 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed); 7942 return AccessedLocs.getAssumed(); 7943 } 7944 7945 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " 7946 << I << "\n"); 7947 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed, 7948 getAccessKindFromInst(&I)); 7949 return AccessedLocs.getAssumed(); 7950 } 7951 7952 /// An AA to represent the memory behavior function attributes. 7953 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { 7954 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) 7955 : AAMemoryLocationImpl(IRP, A) {} 7956 7957 /// See AbstractAttribute::updateImpl(Attributor &A). 7958 virtual ChangeStatus updateImpl(Attributor &A) override { 7959 7960 const auto &MemBehaviorAA = 7961 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 7962 if (MemBehaviorAA.isAssumedReadNone()) { 7963 if (MemBehaviorAA.isKnownReadNone()) 7964 return indicateOptimisticFixpoint(); 7965 assert(isAssumedReadNone() && 7966 "AAMemoryLocation was not read-none but AAMemoryBehavior was!"); 7967 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 7968 return ChangeStatus::UNCHANGED; 7969 } 7970 7971 // The current assumed state used to determine a change. 7972 auto AssumedState = getAssumed(); 7973 bool Changed = false; 7974 7975 auto CheckRWInst = [&](Instruction &I) { 7976 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); 7977 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I 7978 << ": " << getMemoryLocationsAsStr(MLK) << "\n"); 7979 removeAssumedBits(inverseLocation(MLK, false, false)); 7980 // Stop once only the valid bit set in the *not assumed location*, thus 7981 // once we don't actually exclude any memory locations in the state. 7982 return getAssumedNotAccessedLocation() != VALID_STATE; 7983 }; 7984 7985 bool UsedAssumedInformation = false; 7986 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 7987 UsedAssumedInformation)) 7988 return indicatePessimisticFixpoint(); 7989 7990 Changed |= AssumedState != getAssumed(); 7991 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 7992 } 7993 7994 /// See AbstractAttribute::trackStatistics() 7995 void trackStatistics() const override { 7996 if (isAssumedReadNone()) 7997 STATS_DECLTRACK_FN_ATTR(readnone) 7998 else if (isAssumedArgMemOnly()) 7999 STATS_DECLTRACK_FN_ATTR(argmemonly) 8000 else if (isAssumedInaccessibleMemOnly()) 8001 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly) 8002 else if (isAssumedInaccessibleOrArgMemOnly()) 8003 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly) 8004 } 8005 }; 8006 8007 /// AAMemoryLocation attribute for call sites. 8008 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { 8009 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) 8010 : AAMemoryLocationImpl(IRP, A) {} 8011 8012 /// See AbstractAttribute::initialize(...). 8013 void initialize(Attributor &A) override { 8014 AAMemoryLocationImpl::initialize(A); 8015 Function *F = getAssociatedFunction(); 8016 if (!F || F->isDeclaration()) 8017 indicatePessimisticFixpoint(); 8018 } 8019 8020 /// See AbstractAttribute::updateImpl(...). 8021 ChangeStatus updateImpl(Attributor &A) override { 8022 // TODO: Once we have call site specific value information we can provide 8023 // call site specific liveness liveness information and then it makes 8024 // sense to specialize attributes for call sites arguments instead of 8025 // redirecting requests to the callee argument. 8026 Function *F = getAssociatedFunction(); 8027 const IRPosition &FnPos = IRPosition::function(*F); 8028 auto &FnAA = 8029 A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED); 8030 bool Changed = false; 8031 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 8032 AccessKind Kind, MemoryLocationsKind MLK) { 8033 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed, 8034 getAccessKindFromInst(I)); 8035 return true; 8036 }; 8037 if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS)) 8038 return indicatePessimisticFixpoint(); 8039 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 8040 } 8041 8042 /// See AbstractAttribute::trackStatistics() 8043 void trackStatistics() const override { 8044 if (isAssumedReadNone()) 8045 STATS_DECLTRACK_CS_ATTR(readnone) 8046 } 8047 }; 8048 } // namespace 8049 8050 /// ------------------ Value Constant Range Attribute ------------------------- 8051 8052 namespace { 8053 struct AAValueConstantRangeImpl : AAValueConstantRange { 8054 using StateType = IntegerRangeState; 8055 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) 8056 : AAValueConstantRange(IRP, A) {} 8057 8058 /// See AbstractAttribute::initialize(..). 8059 void initialize(Attributor &A) override { 8060 if (A.hasSimplificationCallback(getIRPosition())) { 8061 indicatePessimisticFixpoint(); 8062 return; 8063 } 8064 8065 // Intersect a range given by SCEV. 8066 intersectKnown(getConstantRangeFromSCEV(A, getCtxI())); 8067 8068 // Intersect a range given by LVI. 8069 intersectKnown(getConstantRangeFromLVI(A, getCtxI())); 8070 } 8071 8072 /// See AbstractAttribute::getAsStr(). 8073 const std::string getAsStr() const override { 8074 std::string Str; 8075 llvm::raw_string_ostream OS(Str); 8076 OS << "range(" << getBitWidth() << ")<"; 8077 getKnown().print(OS); 8078 OS << " / "; 8079 getAssumed().print(OS); 8080 OS << ">"; 8081 return OS.str(); 8082 } 8083 8084 /// Helper function to get a SCEV expr for the associated value at program 8085 /// point \p I. 8086 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { 8087 if (!getAnchorScope()) 8088 return nullptr; 8089 8090 ScalarEvolution *SE = 8091 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 8092 *getAnchorScope()); 8093 8094 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( 8095 *getAnchorScope()); 8096 8097 if (!SE || !LI) 8098 return nullptr; 8099 8100 const SCEV *S = SE->getSCEV(&getAssociatedValue()); 8101 if (!I) 8102 return S; 8103 8104 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent())); 8105 } 8106 8107 /// Helper function to get a range from SCEV for the associated value at 8108 /// program point \p I. 8109 ConstantRange getConstantRangeFromSCEV(Attributor &A, 8110 const Instruction *I = nullptr) const { 8111 if (!getAnchorScope()) 8112 return getWorstState(getBitWidth()); 8113 8114 ScalarEvolution *SE = 8115 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 8116 *getAnchorScope()); 8117 8118 const SCEV *S = getSCEV(A, I); 8119 if (!SE || !S) 8120 return getWorstState(getBitWidth()); 8121 8122 return SE->getUnsignedRange(S); 8123 } 8124 8125 /// Helper function to get a range from LVI for the associated value at 8126 /// program point \p I. 8127 ConstantRange 8128 getConstantRangeFromLVI(Attributor &A, 8129 const Instruction *CtxI = nullptr) const { 8130 if (!getAnchorScope()) 8131 return getWorstState(getBitWidth()); 8132 8133 LazyValueInfo *LVI = 8134 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( 8135 *getAnchorScope()); 8136 8137 if (!LVI || !CtxI) 8138 return getWorstState(getBitWidth()); 8139 return LVI->getConstantRange(&getAssociatedValue(), 8140 const_cast<Instruction *>(CtxI)); 8141 } 8142 8143 /// Return true if \p CtxI is valid for querying outside analyses. 8144 /// This basically makes sure we do not ask intra-procedural analysis 8145 /// about a context in the wrong function or a context that violates 8146 /// dominance assumptions they might have. The \p AllowAACtxI flag indicates 8147 /// if the original context of this AA is OK or should be considered invalid. 8148 bool isValidCtxInstructionForOutsideAnalysis(Attributor &A, 8149 const Instruction *CtxI, 8150 bool AllowAACtxI) const { 8151 if (!CtxI || (!AllowAACtxI && CtxI == getCtxI())) 8152 return false; 8153 8154 // Our context might be in a different function, neither intra-procedural 8155 // analysis (ScalarEvolution nor LazyValueInfo) can handle that. 8156 if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction())) 8157 return false; 8158 8159 // If the context is not dominated by the value there are paths to the 8160 // context that do not define the value. This cannot be handled by 8161 // LazyValueInfo so we need to bail. 8162 if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) { 8163 InformationCache &InfoCache = A.getInfoCache(); 8164 const DominatorTree *DT = 8165 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( 8166 *I->getFunction()); 8167 return DT && DT->dominates(I, CtxI); 8168 } 8169 8170 return true; 8171 } 8172 8173 /// See AAValueConstantRange::getKnownConstantRange(..). 8174 ConstantRange 8175 getKnownConstantRange(Attributor &A, 8176 const Instruction *CtxI = nullptr) const override { 8177 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8178 /* AllowAACtxI */ false)) 8179 return getKnown(); 8180 8181 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8182 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8183 return getKnown().intersectWith(SCEVR).intersectWith(LVIR); 8184 } 8185 8186 /// See AAValueConstantRange::getAssumedConstantRange(..). 8187 ConstantRange 8188 getAssumedConstantRange(Attributor &A, 8189 const Instruction *CtxI = nullptr) const override { 8190 // TODO: Make SCEV use Attributor assumption. 8191 // We may be able to bound a variable range via assumptions in 8192 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to 8193 // evolve to x^2 + x, then we can say that y is in [2, 12]. 8194 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8195 /* AllowAACtxI */ false)) 8196 return getAssumed(); 8197 8198 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8199 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8200 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR); 8201 } 8202 8203 /// Helper function to create MDNode for range metadata. 8204 static MDNode * 8205 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, 8206 const ConstantRange &AssumedConstantRange) { 8207 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get( 8208 Ty, AssumedConstantRange.getLower())), 8209 ConstantAsMetadata::get(ConstantInt::get( 8210 Ty, AssumedConstantRange.getUpper()))}; 8211 return MDNode::get(Ctx, LowAndHigh); 8212 } 8213 8214 /// Return true if \p Assumed is included in \p KnownRanges. 8215 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { 8216 8217 if (Assumed.isFullSet()) 8218 return false; 8219 8220 if (!KnownRanges) 8221 return true; 8222 8223 // If multiple ranges are annotated in IR, we give up to annotate assumed 8224 // range for now. 8225 8226 // TODO: If there exists a known range which containts assumed range, we 8227 // can say assumed range is better. 8228 if (KnownRanges->getNumOperands() > 2) 8229 return false; 8230 8231 ConstantInt *Lower = 8232 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0)); 8233 ConstantInt *Upper = 8234 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1)); 8235 8236 ConstantRange Known(Lower->getValue(), Upper->getValue()); 8237 return Known.contains(Assumed) && Known != Assumed; 8238 } 8239 8240 /// Helper function to set range metadata. 8241 static bool 8242 setRangeMetadataIfisBetterRange(Instruction *I, 8243 const ConstantRange &AssumedConstantRange) { 8244 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range); 8245 if (isBetterRange(AssumedConstantRange, OldRangeMD)) { 8246 if (!AssumedConstantRange.isEmptySet()) { 8247 I->setMetadata(LLVMContext::MD_range, 8248 getMDNodeForConstantRange(I->getType(), I->getContext(), 8249 AssumedConstantRange)); 8250 return true; 8251 } 8252 } 8253 return false; 8254 } 8255 8256 /// See AbstractAttribute::manifest() 8257 ChangeStatus manifest(Attributor &A) override { 8258 ChangeStatus Changed = ChangeStatus::UNCHANGED; 8259 ConstantRange AssumedConstantRange = getAssumedConstantRange(A); 8260 assert(!AssumedConstantRange.isFullSet() && "Invalid state"); 8261 8262 auto &V = getAssociatedValue(); 8263 if (!AssumedConstantRange.isEmptySet() && 8264 !AssumedConstantRange.isSingleElement()) { 8265 if (Instruction *I = dyn_cast<Instruction>(&V)) { 8266 assert(I == getCtxI() && "Should not annotate an instruction which is " 8267 "not the context instruction"); 8268 if (isa<CallInst>(I) || isa<LoadInst>(I)) 8269 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) 8270 Changed = ChangeStatus::CHANGED; 8271 } 8272 } 8273 8274 return Changed; 8275 } 8276 }; 8277 8278 struct AAValueConstantRangeArgument final 8279 : AAArgumentFromCallSiteArguments< 8280 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8281 true /* BridgeCallBaseContext */> { 8282 using Base = AAArgumentFromCallSiteArguments< 8283 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8284 true /* BridgeCallBaseContext */>; 8285 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) 8286 : Base(IRP, A) {} 8287 8288 /// See AbstractAttribute::initialize(..). 8289 void initialize(Attributor &A) override { 8290 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8291 indicatePessimisticFixpoint(); 8292 } else { 8293 Base::initialize(A); 8294 } 8295 } 8296 8297 /// See AbstractAttribute::trackStatistics() 8298 void trackStatistics() const override { 8299 STATS_DECLTRACK_ARG_ATTR(value_range) 8300 } 8301 }; 8302 8303 struct AAValueConstantRangeReturned 8304 : AAReturnedFromReturnedValues<AAValueConstantRange, 8305 AAValueConstantRangeImpl, 8306 AAValueConstantRangeImpl::StateType, 8307 /* PropogateCallBaseContext */ true> { 8308 using Base = 8309 AAReturnedFromReturnedValues<AAValueConstantRange, 8310 AAValueConstantRangeImpl, 8311 AAValueConstantRangeImpl::StateType, 8312 /* PropogateCallBaseContext */ true>; 8313 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) 8314 : Base(IRP, A) {} 8315 8316 /// See AbstractAttribute::initialize(...). 8317 void initialize(Attributor &A) override {} 8318 8319 /// See AbstractAttribute::trackStatistics() 8320 void trackStatistics() const override { 8321 STATS_DECLTRACK_FNRET_ATTR(value_range) 8322 } 8323 }; 8324 8325 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { 8326 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) 8327 : AAValueConstantRangeImpl(IRP, A) {} 8328 8329 /// See AbstractAttribute::initialize(...). 8330 void initialize(Attributor &A) override { 8331 AAValueConstantRangeImpl::initialize(A); 8332 if (isAtFixpoint()) 8333 return; 8334 8335 Value &V = getAssociatedValue(); 8336 8337 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8338 unionAssumed(ConstantRange(C->getValue())); 8339 indicateOptimisticFixpoint(); 8340 return; 8341 } 8342 8343 if (isa<UndefValue>(&V)) { 8344 // Collapse the undef state to 0. 8345 unionAssumed(ConstantRange(APInt(getBitWidth(), 0))); 8346 indicateOptimisticFixpoint(); 8347 return; 8348 } 8349 8350 if (isa<CallBase>(&V)) 8351 return; 8352 8353 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V)) 8354 return; 8355 8356 // If it is a load instruction with range metadata, use it. 8357 if (LoadInst *LI = dyn_cast<LoadInst>(&V)) 8358 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) { 8359 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8360 return; 8361 } 8362 8363 // We can work with PHI and select instruction as we traverse their operands 8364 // during update. 8365 if (isa<SelectInst>(V) || isa<PHINode>(V)) 8366 return; 8367 8368 // Otherwise we give up. 8369 indicatePessimisticFixpoint(); 8370 8371 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: " 8372 << getAssociatedValue() << "\n"); 8373 } 8374 8375 bool calculateBinaryOperator( 8376 Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, 8377 const Instruction *CtxI, 8378 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8379 Value *LHS = BinOp->getOperand(0); 8380 Value *RHS = BinOp->getOperand(1); 8381 8382 // Simplify the operands first. 8383 bool UsedAssumedInformation = false; 8384 const auto &SimplifiedLHS = 8385 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8386 *this, UsedAssumedInformation); 8387 if (!SimplifiedLHS.hasValue()) 8388 return true; 8389 if (!SimplifiedLHS.getValue()) 8390 return false; 8391 LHS = *SimplifiedLHS; 8392 8393 const auto &SimplifiedRHS = 8394 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8395 *this, UsedAssumedInformation); 8396 if (!SimplifiedRHS.hasValue()) 8397 return true; 8398 if (!SimplifiedRHS.getValue()) 8399 return false; 8400 RHS = *SimplifiedRHS; 8401 8402 // TODO: Allow non integers as well. 8403 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8404 return false; 8405 8406 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8407 *this, IRPosition::value(*LHS, getCallBaseContext()), 8408 DepClassTy::REQUIRED); 8409 QuerriedAAs.push_back(&LHSAA); 8410 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8411 8412 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8413 *this, IRPosition::value(*RHS, getCallBaseContext()), 8414 DepClassTy::REQUIRED); 8415 QuerriedAAs.push_back(&RHSAA); 8416 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8417 8418 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange); 8419 8420 T.unionAssumed(AssumedRange); 8421 8422 // TODO: Track a known state too. 8423 8424 return T.isValidState(); 8425 } 8426 8427 bool calculateCastInst( 8428 Attributor &A, CastInst *CastI, IntegerRangeState &T, 8429 const Instruction *CtxI, 8430 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8431 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!"); 8432 // TODO: Allow non integers as well. 8433 Value *OpV = CastI->getOperand(0); 8434 8435 // Simplify the operand first. 8436 bool UsedAssumedInformation = false; 8437 const auto &SimplifiedOpV = 8438 A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()), 8439 *this, UsedAssumedInformation); 8440 if (!SimplifiedOpV.hasValue()) 8441 return true; 8442 if (!SimplifiedOpV.getValue()) 8443 return false; 8444 OpV = *SimplifiedOpV; 8445 8446 if (!OpV->getType()->isIntegerTy()) 8447 return false; 8448 8449 auto &OpAA = A.getAAFor<AAValueConstantRange>( 8450 *this, IRPosition::value(*OpV, getCallBaseContext()), 8451 DepClassTy::REQUIRED); 8452 QuerriedAAs.push_back(&OpAA); 8453 T.unionAssumed( 8454 OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth())); 8455 return T.isValidState(); 8456 } 8457 8458 bool 8459 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, 8460 const Instruction *CtxI, 8461 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8462 Value *LHS = CmpI->getOperand(0); 8463 Value *RHS = CmpI->getOperand(1); 8464 8465 // Simplify the operands first. 8466 bool UsedAssumedInformation = false; 8467 const auto &SimplifiedLHS = 8468 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8469 *this, UsedAssumedInformation); 8470 if (!SimplifiedLHS.hasValue()) 8471 return true; 8472 if (!SimplifiedLHS.getValue()) 8473 return false; 8474 LHS = *SimplifiedLHS; 8475 8476 const auto &SimplifiedRHS = 8477 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8478 *this, UsedAssumedInformation); 8479 if (!SimplifiedRHS.hasValue()) 8480 return true; 8481 if (!SimplifiedRHS.getValue()) 8482 return false; 8483 RHS = *SimplifiedRHS; 8484 8485 // TODO: Allow non integers as well. 8486 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8487 return false; 8488 8489 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8490 *this, IRPosition::value(*LHS, getCallBaseContext()), 8491 DepClassTy::REQUIRED); 8492 QuerriedAAs.push_back(&LHSAA); 8493 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8494 *this, IRPosition::value(*RHS, getCallBaseContext()), 8495 DepClassTy::REQUIRED); 8496 QuerriedAAs.push_back(&RHSAA); 8497 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8498 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8499 8500 // If one of them is empty set, we can't decide. 8501 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) 8502 return true; 8503 8504 bool MustTrue = false, MustFalse = false; 8505 8506 auto AllowedRegion = 8507 ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange); 8508 8509 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet()) 8510 MustFalse = true; 8511 8512 if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange)) 8513 MustTrue = true; 8514 8515 assert((!MustTrue || !MustFalse) && 8516 "Either MustTrue or MustFalse should be false!"); 8517 8518 if (MustTrue) 8519 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); 8520 else if (MustFalse) 8521 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); 8522 else 8523 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); 8524 8525 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA 8526 << " " << RHSAA << "\n"); 8527 8528 // TODO: Track a known state too. 8529 return T.isValidState(); 8530 } 8531 8532 /// See AbstractAttribute::updateImpl(...). 8533 ChangeStatus updateImpl(Attributor &A) override { 8534 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 8535 IntegerRangeState &T, bool Stripped) -> bool { 8536 Instruction *I = dyn_cast<Instruction>(&V); 8537 if (!I || isa<CallBase>(I)) { 8538 8539 // Simplify the operand first. 8540 bool UsedAssumedInformation = false; 8541 const auto &SimplifiedOpV = 8542 A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()), 8543 *this, UsedAssumedInformation); 8544 if (!SimplifiedOpV.hasValue()) 8545 return true; 8546 if (!SimplifiedOpV.getValue()) 8547 return false; 8548 Value *VPtr = *SimplifiedOpV; 8549 8550 // If the value is not instruction, we query AA to Attributor. 8551 const auto &AA = A.getAAFor<AAValueConstantRange>( 8552 *this, IRPosition::value(*VPtr, getCallBaseContext()), 8553 DepClassTy::REQUIRED); 8554 8555 // Clamp operator is not used to utilize a program point CtxI. 8556 T.unionAssumed(AA.getAssumedConstantRange(A, CtxI)); 8557 8558 return T.isValidState(); 8559 } 8560 8561 SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; 8562 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) { 8563 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) 8564 return false; 8565 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) { 8566 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) 8567 return false; 8568 } else if (auto *CastI = dyn_cast<CastInst>(I)) { 8569 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) 8570 return false; 8571 } else { 8572 // Give up with other instructions. 8573 // TODO: Add other instructions 8574 8575 T.indicatePessimisticFixpoint(); 8576 return false; 8577 } 8578 8579 // Catch circular reasoning in a pessimistic way for now. 8580 // TODO: Check how the range evolves and if we stripped anything, see also 8581 // AADereferenceable or AAAlign for similar situations. 8582 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { 8583 if (QueriedAA != this) 8584 continue; 8585 // If we are in a stady state we do not need to worry. 8586 if (T.getAssumed() == getState().getAssumed()) 8587 continue; 8588 T.indicatePessimisticFixpoint(); 8589 } 8590 8591 return T.isValidState(); 8592 }; 8593 8594 IntegerRangeState T(getBitWidth()); 8595 8596 bool UsedAssumedInformation = false; 8597 if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T, 8598 VisitValueCB, getCtxI(), 8599 UsedAssumedInformation, 8600 /* UseValueSimplify */ false)) 8601 return indicatePessimisticFixpoint(); 8602 8603 // Ensure that long def-use chains can't cause circular reasoning either by 8604 // introducing a cutoff below. 8605 if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED) 8606 return ChangeStatus::UNCHANGED; 8607 if (++NumChanges > MaxNumChanges) { 8608 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges 8609 << " but only " << MaxNumChanges 8610 << " are allowed to avoid cyclic reasoning."); 8611 return indicatePessimisticFixpoint(); 8612 } 8613 return ChangeStatus::CHANGED; 8614 } 8615 8616 /// See AbstractAttribute::trackStatistics() 8617 void trackStatistics() const override { 8618 STATS_DECLTRACK_FLOATING_ATTR(value_range) 8619 } 8620 8621 /// Tracker to bail after too many widening steps of the constant range. 8622 int NumChanges = 0; 8623 8624 /// Upper bound for the number of allowed changes (=widening steps) for the 8625 /// constant range before we give up. 8626 static constexpr int MaxNumChanges = 5; 8627 }; 8628 8629 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { 8630 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) 8631 : AAValueConstantRangeImpl(IRP, A) {} 8632 8633 /// See AbstractAttribute::initialize(...). 8634 ChangeStatus updateImpl(Attributor &A) override { 8635 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will " 8636 "not be called"); 8637 } 8638 8639 /// See AbstractAttribute::trackStatistics() 8640 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) } 8641 }; 8642 8643 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { 8644 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) 8645 : AAValueConstantRangeFunction(IRP, A) {} 8646 8647 /// See AbstractAttribute::trackStatistics() 8648 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) } 8649 }; 8650 8651 struct AAValueConstantRangeCallSiteReturned 8652 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8653 AAValueConstantRangeImpl, 8654 AAValueConstantRangeImpl::StateType, 8655 /* IntroduceCallBaseContext */ true> { 8656 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) 8657 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8658 AAValueConstantRangeImpl, 8659 AAValueConstantRangeImpl::StateType, 8660 /* IntroduceCallBaseContext */ true>(IRP, 8661 A) { 8662 } 8663 8664 /// See AbstractAttribute::initialize(...). 8665 void initialize(Attributor &A) override { 8666 // If it is a load instruction with range metadata, use the metadata. 8667 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue())) 8668 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range)) 8669 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8670 8671 AAValueConstantRangeImpl::initialize(A); 8672 } 8673 8674 /// See AbstractAttribute::trackStatistics() 8675 void trackStatistics() const override { 8676 STATS_DECLTRACK_CSRET_ATTR(value_range) 8677 } 8678 }; 8679 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { 8680 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) 8681 : AAValueConstantRangeFloating(IRP, A) {} 8682 8683 /// See AbstractAttribute::manifest() 8684 ChangeStatus manifest(Attributor &A) override { 8685 return ChangeStatus::UNCHANGED; 8686 } 8687 8688 /// See AbstractAttribute::trackStatistics() 8689 void trackStatistics() const override { 8690 STATS_DECLTRACK_CSARG_ATTR(value_range) 8691 } 8692 }; 8693 } // namespace 8694 8695 /// ------------------ Potential Values Attribute ------------------------- 8696 8697 namespace { 8698 struct AAPotentialValuesImpl : AAPotentialValues { 8699 using StateType = PotentialConstantIntValuesState; 8700 8701 AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A) 8702 : AAPotentialValues(IRP, A) {} 8703 8704 /// See AbstractAttribute::initialize(..). 8705 void initialize(Attributor &A) override { 8706 if (A.hasSimplificationCallback(getIRPosition())) 8707 indicatePessimisticFixpoint(); 8708 else 8709 AAPotentialValues::initialize(A); 8710 } 8711 8712 /// See AbstractAttribute::getAsStr(). 8713 const std::string getAsStr() const override { 8714 std::string Str; 8715 llvm::raw_string_ostream OS(Str); 8716 OS << getState(); 8717 return OS.str(); 8718 } 8719 8720 /// See AbstractAttribute::updateImpl(...). 8721 ChangeStatus updateImpl(Attributor &A) override { 8722 return indicatePessimisticFixpoint(); 8723 } 8724 }; 8725 8726 struct AAPotentialValuesArgument final 8727 : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 8728 PotentialConstantIntValuesState> { 8729 using Base = 8730 AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 8731 PotentialConstantIntValuesState>; 8732 AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A) 8733 : Base(IRP, A) {} 8734 8735 /// See AbstractAttribute::initialize(..). 8736 void initialize(Attributor &A) override { 8737 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8738 indicatePessimisticFixpoint(); 8739 } else { 8740 Base::initialize(A); 8741 } 8742 } 8743 8744 /// See AbstractAttribute::trackStatistics() 8745 void trackStatistics() const override { 8746 STATS_DECLTRACK_ARG_ATTR(potential_values) 8747 } 8748 }; 8749 8750 struct AAPotentialValuesReturned 8751 : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> { 8752 using Base = 8753 AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>; 8754 AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A) 8755 : Base(IRP, A) {} 8756 8757 /// See AbstractAttribute::trackStatistics() 8758 void trackStatistics() const override { 8759 STATS_DECLTRACK_FNRET_ATTR(potential_values) 8760 } 8761 }; 8762 8763 struct AAPotentialValuesFloating : AAPotentialValuesImpl { 8764 AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A) 8765 : AAPotentialValuesImpl(IRP, A) {} 8766 8767 /// See AbstractAttribute::initialize(..). 8768 void initialize(Attributor &A) override { 8769 AAPotentialValuesImpl::initialize(A); 8770 if (isAtFixpoint()) 8771 return; 8772 8773 Value &V = getAssociatedValue(); 8774 8775 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8776 unionAssumed(C->getValue()); 8777 indicateOptimisticFixpoint(); 8778 return; 8779 } 8780 8781 if (isa<UndefValue>(&V)) { 8782 unionAssumedWithUndef(); 8783 indicateOptimisticFixpoint(); 8784 return; 8785 } 8786 8787 if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V)) 8788 return; 8789 8790 if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V)) 8791 return; 8792 8793 indicatePessimisticFixpoint(); 8794 8795 LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: " 8796 << getAssociatedValue() << "\n"); 8797 } 8798 8799 static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS, 8800 const APInt &RHS) { 8801 return ICmpInst::compare(LHS, RHS, ICI->getPredicate()); 8802 } 8803 8804 static APInt calculateCastInst(const CastInst *CI, const APInt &Src, 8805 uint32_t ResultBitWidth) { 8806 Instruction::CastOps CastOp = CI->getOpcode(); 8807 switch (CastOp) { 8808 default: 8809 llvm_unreachable("unsupported or not integer cast"); 8810 case Instruction::Trunc: 8811 return Src.trunc(ResultBitWidth); 8812 case Instruction::SExt: 8813 return Src.sext(ResultBitWidth); 8814 case Instruction::ZExt: 8815 return Src.zext(ResultBitWidth); 8816 case Instruction::BitCast: 8817 return Src; 8818 } 8819 } 8820 8821 static APInt calculateBinaryOperator(const BinaryOperator *BinOp, 8822 const APInt &LHS, const APInt &RHS, 8823 bool &SkipOperation, bool &Unsupported) { 8824 Instruction::BinaryOps BinOpcode = BinOp->getOpcode(); 8825 // Unsupported is set to true when the binary operator is not supported. 8826 // SkipOperation is set to true when UB occur with the given operand pair 8827 // (LHS, RHS). 8828 // TODO: we should look at nsw and nuw keywords to handle operations 8829 // that create poison or undef value. 8830 switch (BinOpcode) { 8831 default: 8832 Unsupported = true; 8833 return LHS; 8834 case Instruction::Add: 8835 return LHS + RHS; 8836 case Instruction::Sub: 8837 return LHS - RHS; 8838 case Instruction::Mul: 8839 return LHS * RHS; 8840 case Instruction::UDiv: 8841 if (RHS.isZero()) { 8842 SkipOperation = true; 8843 return LHS; 8844 } 8845 return LHS.udiv(RHS); 8846 case Instruction::SDiv: 8847 if (RHS.isZero()) { 8848 SkipOperation = true; 8849 return LHS; 8850 } 8851 return LHS.sdiv(RHS); 8852 case Instruction::URem: 8853 if (RHS.isZero()) { 8854 SkipOperation = true; 8855 return LHS; 8856 } 8857 return LHS.urem(RHS); 8858 case Instruction::SRem: 8859 if (RHS.isZero()) { 8860 SkipOperation = true; 8861 return LHS; 8862 } 8863 return LHS.srem(RHS); 8864 case Instruction::Shl: 8865 return LHS.shl(RHS); 8866 case Instruction::LShr: 8867 return LHS.lshr(RHS); 8868 case Instruction::AShr: 8869 return LHS.ashr(RHS); 8870 case Instruction::And: 8871 return LHS & RHS; 8872 case Instruction::Or: 8873 return LHS | RHS; 8874 case Instruction::Xor: 8875 return LHS ^ RHS; 8876 } 8877 } 8878 8879 bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp, 8880 const APInt &LHS, const APInt &RHS) { 8881 bool SkipOperation = false; 8882 bool Unsupported = false; 8883 APInt Result = 8884 calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported); 8885 if (Unsupported) 8886 return false; 8887 // If SkipOperation is true, we can ignore this operand pair (L, R). 8888 if (!SkipOperation) 8889 unionAssumed(Result); 8890 return isValidState(); 8891 } 8892 8893 ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) { 8894 auto AssumedBefore = getAssumed(); 8895 Value *LHS = ICI->getOperand(0); 8896 Value *RHS = ICI->getOperand(1); 8897 8898 // Simplify the operands first. 8899 bool UsedAssumedInformation = false; 8900 const auto &SimplifiedLHS = 8901 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8902 *this, UsedAssumedInformation); 8903 if (!SimplifiedLHS.hasValue()) 8904 return ChangeStatus::UNCHANGED; 8905 if (!SimplifiedLHS.getValue()) 8906 return indicatePessimisticFixpoint(); 8907 LHS = *SimplifiedLHS; 8908 8909 const auto &SimplifiedRHS = 8910 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8911 *this, UsedAssumedInformation); 8912 if (!SimplifiedRHS.hasValue()) 8913 return ChangeStatus::UNCHANGED; 8914 if (!SimplifiedRHS.getValue()) 8915 return indicatePessimisticFixpoint(); 8916 RHS = *SimplifiedRHS; 8917 8918 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8919 return indicatePessimisticFixpoint(); 8920 8921 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 8922 DepClassTy::REQUIRED); 8923 if (!LHSAA.isValidState()) 8924 return indicatePessimisticFixpoint(); 8925 8926 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 8927 DepClassTy::REQUIRED); 8928 if (!RHSAA.isValidState()) 8929 return indicatePessimisticFixpoint(); 8930 8931 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 8932 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 8933 8934 // TODO: make use of undef flag to limit potential values aggressively. 8935 bool MaybeTrue = false, MaybeFalse = false; 8936 const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0); 8937 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 8938 // The result of any comparison between undefs can be soundly replaced 8939 // with undef. 8940 unionAssumedWithUndef(); 8941 } else if (LHSAA.undefIsContained()) { 8942 for (const APInt &R : RHSAAPVS) { 8943 bool CmpResult = calculateICmpInst(ICI, Zero, R); 8944 MaybeTrue |= CmpResult; 8945 MaybeFalse |= !CmpResult; 8946 if (MaybeTrue & MaybeFalse) 8947 return indicatePessimisticFixpoint(); 8948 } 8949 } else if (RHSAA.undefIsContained()) { 8950 for (const APInt &L : LHSAAPVS) { 8951 bool CmpResult = calculateICmpInst(ICI, L, Zero); 8952 MaybeTrue |= CmpResult; 8953 MaybeFalse |= !CmpResult; 8954 if (MaybeTrue & MaybeFalse) 8955 return indicatePessimisticFixpoint(); 8956 } 8957 } else { 8958 for (const APInt &L : LHSAAPVS) { 8959 for (const APInt &R : RHSAAPVS) { 8960 bool CmpResult = calculateICmpInst(ICI, L, R); 8961 MaybeTrue |= CmpResult; 8962 MaybeFalse |= !CmpResult; 8963 if (MaybeTrue & MaybeFalse) 8964 return indicatePessimisticFixpoint(); 8965 } 8966 } 8967 } 8968 if (MaybeTrue) 8969 unionAssumed(APInt(/* numBits */ 1, /* val */ 1)); 8970 if (MaybeFalse) 8971 unionAssumed(APInt(/* numBits */ 1, /* val */ 0)); 8972 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 8973 : ChangeStatus::CHANGED; 8974 } 8975 8976 ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) { 8977 auto AssumedBefore = getAssumed(); 8978 Value *LHS = SI->getTrueValue(); 8979 Value *RHS = SI->getFalseValue(); 8980 8981 // Simplify the operands first. 8982 bool UsedAssumedInformation = false; 8983 const auto &SimplifiedLHS = 8984 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8985 *this, UsedAssumedInformation); 8986 if (!SimplifiedLHS.hasValue()) 8987 return ChangeStatus::UNCHANGED; 8988 if (!SimplifiedLHS.getValue()) 8989 return indicatePessimisticFixpoint(); 8990 LHS = *SimplifiedLHS; 8991 8992 const auto &SimplifiedRHS = 8993 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8994 *this, UsedAssumedInformation); 8995 if (!SimplifiedRHS.hasValue()) 8996 return ChangeStatus::UNCHANGED; 8997 if (!SimplifiedRHS.getValue()) 8998 return indicatePessimisticFixpoint(); 8999 RHS = *SimplifiedRHS; 9000 9001 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 9002 return indicatePessimisticFixpoint(); 9003 9004 Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this, 9005 UsedAssumedInformation); 9006 9007 // Check if we only need one operand. 9008 bool OnlyLeft = false, OnlyRight = false; 9009 if (C.hasValue() && *C && (*C)->isOneValue()) 9010 OnlyLeft = true; 9011 else if (C.hasValue() && *C && (*C)->isZeroValue()) 9012 OnlyRight = true; 9013 9014 const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr; 9015 if (!OnlyRight) { 9016 LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 9017 DepClassTy::REQUIRED); 9018 if (!LHSAA->isValidState()) 9019 return indicatePessimisticFixpoint(); 9020 } 9021 if (!OnlyLeft) { 9022 RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 9023 DepClassTy::REQUIRED); 9024 if (!RHSAA->isValidState()) 9025 return indicatePessimisticFixpoint(); 9026 } 9027 9028 if (!LHSAA || !RHSAA) { 9029 // select (true/false), lhs, rhs 9030 auto *OpAA = LHSAA ? LHSAA : RHSAA; 9031 9032 if (OpAA->undefIsContained()) 9033 unionAssumedWithUndef(); 9034 else 9035 unionAssumed(*OpAA); 9036 9037 } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) { 9038 // select i1 *, undef , undef => undef 9039 unionAssumedWithUndef(); 9040 } else { 9041 unionAssumed(*LHSAA); 9042 unionAssumed(*RHSAA); 9043 } 9044 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9045 : ChangeStatus::CHANGED; 9046 } 9047 9048 ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) { 9049 auto AssumedBefore = getAssumed(); 9050 if (!CI->isIntegerCast()) 9051 return indicatePessimisticFixpoint(); 9052 assert(CI->getNumOperands() == 1 && "Expected cast to be unary!"); 9053 uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth(); 9054 Value *Src = CI->getOperand(0); 9055 9056 // Simplify the operand first. 9057 bool UsedAssumedInformation = false; 9058 const auto &SimplifiedSrc = 9059 A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()), 9060 *this, UsedAssumedInformation); 9061 if (!SimplifiedSrc.hasValue()) 9062 return ChangeStatus::UNCHANGED; 9063 if (!SimplifiedSrc.getValue()) 9064 return indicatePessimisticFixpoint(); 9065 Src = *SimplifiedSrc; 9066 9067 auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src), 9068 DepClassTy::REQUIRED); 9069 if (!SrcAA.isValidState()) 9070 return indicatePessimisticFixpoint(); 9071 const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet(); 9072 if (SrcAA.undefIsContained()) 9073 unionAssumedWithUndef(); 9074 else { 9075 for (const APInt &S : SrcAAPVS) { 9076 APInt T = calculateCastInst(CI, S, ResultBitWidth); 9077 unionAssumed(T); 9078 } 9079 } 9080 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9081 : ChangeStatus::CHANGED; 9082 } 9083 9084 ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) { 9085 auto AssumedBefore = getAssumed(); 9086 Value *LHS = BinOp->getOperand(0); 9087 Value *RHS = BinOp->getOperand(1); 9088 9089 // Simplify the operands first. 9090 bool UsedAssumedInformation = false; 9091 const auto &SimplifiedLHS = 9092 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 9093 *this, UsedAssumedInformation); 9094 if (!SimplifiedLHS.hasValue()) 9095 return ChangeStatus::UNCHANGED; 9096 if (!SimplifiedLHS.getValue()) 9097 return indicatePessimisticFixpoint(); 9098 LHS = *SimplifiedLHS; 9099 9100 const auto &SimplifiedRHS = 9101 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 9102 *this, UsedAssumedInformation); 9103 if (!SimplifiedRHS.hasValue()) 9104 return ChangeStatus::UNCHANGED; 9105 if (!SimplifiedRHS.getValue()) 9106 return indicatePessimisticFixpoint(); 9107 RHS = *SimplifiedRHS; 9108 9109 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 9110 return indicatePessimisticFixpoint(); 9111 9112 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 9113 DepClassTy::REQUIRED); 9114 if (!LHSAA.isValidState()) 9115 return indicatePessimisticFixpoint(); 9116 9117 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 9118 DepClassTy::REQUIRED); 9119 if (!RHSAA.isValidState()) 9120 return indicatePessimisticFixpoint(); 9121 9122 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 9123 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 9124 const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0); 9125 9126 // TODO: make use of undef flag to limit potential values aggressively. 9127 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 9128 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero)) 9129 return indicatePessimisticFixpoint(); 9130 } else if (LHSAA.undefIsContained()) { 9131 for (const APInt &R : RHSAAPVS) { 9132 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R)) 9133 return indicatePessimisticFixpoint(); 9134 } 9135 } else if (RHSAA.undefIsContained()) { 9136 for (const APInt &L : LHSAAPVS) { 9137 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero)) 9138 return indicatePessimisticFixpoint(); 9139 } 9140 } else { 9141 for (const APInt &L : LHSAAPVS) { 9142 for (const APInt &R : RHSAAPVS) { 9143 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R)) 9144 return indicatePessimisticFixpoint(); 9145 } 9146 } 9147 } 9148 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9149 : ChangeStatus::CHANGED; 9150 } 9151 9152 ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) { 9153 auto AssumedBefore = getAssumed(); 9154 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 9155 Value *IncomingValue = PHI->getIncomingValue(u); 9156 9157 // Simplify the operand first. 9158 bool UsedAssumedInformation = false; 9159 const auto &SimplifiedIncomingValue = A.getAssumedSimplified( 9160 IRPosition::value(*IncomingValue, getCallBaseContext()), *this, 9161 UsedAssumedInformation); 9162 if (!SimplifiedIncomingValue.hasValue()) 9163 continue; 9164 if (!SimplifiedIncomingValue.getValue()) 9165 return indicatePessimisticFixpoint(); 9166 IncomingValue = *SimplifiedIncomingValue; 9167 9168 auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>( 9169 *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED); 9170 if (!PotentialValuesAA.isValidState()) 9171 return indicatePessimisticFixpoint(); 9172 if (PotentialValuesAA.undefIsContained()) 9173 unionAssumedWithUndef(); 9174 else 9175 unionAssumed(PotentialValuesAA.getAssumed()); 9176 } 9177 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9178 : ChangeStatus::CHANGED; 9179 } 9180 9181 ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) { 9182 if (!L.getType()->isIntegerTy()) 9183 return indicatePessimisticFixpoint(); 9184 9185 auto Union = [&](Value &V) { 9186 if (isa<UndefValue>(V)) { 9187 unionAssumedWithUndef(); 9188 return true; 9189 } 9190 if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) { 9191 unionAssumed(CI->getValue()); 9192 return true; 9193 } 9194 return false; 9195 }; 9196 auto AssumedBefore = getAssumed(); 9197 9198 if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union)) 9199 return indicatePessimisticFixpoint(); 9200 9201 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9202 : ChangeStatus::CHANGED; 9203 } 9204 9205 /// See AbstractAttribute::updateImpl(...). 9206 ChangeStatus updateImpl(Attributor &A) override { 9207 Value &V = getAssociatedValue(); 9208 Instruction *I = dyn_cast<Instruction>(&V); 9209 9210 if (auto *ICI = dyn_cast<ICmpInst>(I)) 9211 return updateWithICmpInst(A, ICI); 9212 9213 if (auto *SI = dyn_cast<SelectInst>(I)) 9214 return updateWithSelectInst(A, SI); 9215 9216 if (auto *CI = dyn_cast<CastInst>(I)) 9217 return updateWithCastInst(A, CI); 9218 9219 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) 9220 return updateWithBinaryOperator(A, BinOp); 9221 9222 if (auto *PHI = dyn_cast<PHINode>(I)) 9223 return updateWithPHINode(A, PHI); 9224 9225 if (auto *L = dyn_cast<LoadInst>(I)) 9226 return updateWithLoad(A, *L); 9227 9228 return indicatePessimisticFixpoint(); 9229 } 9230 9231 /// See AbstractAttribute::trackStatistics() 9232 void trackStatistics() const override { 9233 STATS_DECLTRACK_FLOATING_ATTR(potential_values) 9234 } 9235 }; 9236 9237 struct AAPotentialValuesFunction : AAPotentialValuesImpl { 9238 AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A) 9239 : AAPotentialValuesImpl(IRP, A) {} 9240 9241 /// See AbstractAttribute::initialize(...). 9242 ChangeStatus updateImpl(Attributor &A) override { 9243 llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will " 9244 "not be called"); 9245 } 9246 9247 /// See AbstractAttribute::trackStatistics() 9248 void trackStatistics() const override { 9249 STATS_DECLTRACK_FN_ATTR(potential_values) 9250 } 9251 }; 9252 9253 struct AAPotentialValuesCallSite : AAPotentialValuesFunction { 9254 AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A) 9255 : AAPotentialValuesFunction(IRP, A) {} 9256 9257 /// See AbstractAttribute::trackStatistics() 9258 void trackStatistics() const override { 9259 STATS_DECLTRACK_CS_ATTR(potential_values) 9260 } 9261 }; 9262 9263 struct AAPotentialValuesCallSiteReturned 9264 : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> { 9265 AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A) 9266 : AACallSiteReturnedFromReturned<AAPotentialValues, 9267 AAPotentialValuesImpl>(IRP, A) {} 9268 9269 /// See AbstractAttribute::trackStatistics() 9270 void trackStatistics() const override { 9271 STATS_DECLTRACK_CSRET_ATTR(potential_values) 9272 } 9273 }; 9274 9275 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating { 9276 AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A) 9277 : AAPotentialValuesFloating(IRP, A) {} 9278 9279 /// See AbstractAttribute::initialize(..). 9280 void initialize(Attributor &A) override { 9281 AAPotentialValuesImpl::initialize(A); 9282 if (isAtFixpoint()) 9283 return; 9284 9285 Value &V = getAssociatedValue(); 9286 9287 if (auto *C = dyn_cast<ConstantInt>(&V)) { 9288 unionAssumed(C->getValue()); 9289 indicateOptimisticFixpoint(); 9290 return; 9291 } 9292 9293 if (isa<UndefValue>(&V)) { 9294 unionAssumedWithUndef(); 9295 indicateOptimisticFixpoint(); 9296 return; 9297 } 9298 } 9299 9300 /// See AbstractAttribute::updateImpl(...). 9301 ChangeStatus updateImpl(Attributor &A) override { 9302 Value &V = getAssociatedValue(); 9303 auto AssumedBefore = getAssumed(); 9304 auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V), 9305 DepClassTy::REQUIRED); 9306 const auto &S = AA.getAssumed(); 9307 unionAssumed(S); 9308 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9309 : ChangeStatus::CHANGED; 9310 } 9311 9312 /// See AbstractAttribute::trackStatistics() 9313 void trackStatistics() const override { 9314 STATS_DECLTRACK_CSARG_ATTR(potential_values) 9315 } 9316 }; 9317 9318 /// ------------------------ NoUndef Attribute --------------------------------- 9319 struct AANoUndefImpl : AANoUndef { 9320 AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {} 9321 9322 /// See AbstractAttribute::initialize(...). 9323 void initialize(Attributor &A) override { 9324 if (getIRPosition().hasAttr({Attribute::NoUndef})) { 9325 indicateOptimisticFixpoint(); 9326 return; 9327 } 9328 Value &V = getAssociatedValue(); 9329 if (isa<UndefValue>(V)) 9330 indicatePessimisticFixpoint(); 9331 else if (isa<FreezeInst>(V)) 9332 indicateOptimisticFixpoint(); 9333 else if (getPositionKind() != IRPosition::IRP_RETURNED && 9334 isGuaranteedNotToBeUndefOrPoison(&V)) 9335 indicateOptimisticFixpoint(); 9336 else 9337 AANoUndef::initialize(A); 9338 } 9339 9340 /// See followUsesInMBEC 9341 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 9342 AANoUndef::StateType &State) { 9343 const Value *UseV = U->get(); 9344 const DominatorTree *DT = nullptr; 9345 AssumptionCache *AC = nullptr; 9346 InformationCache &InfoCache = A.getInfoCache(); 9347 if (Function *F = getAnchorScope()) { 9348 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 9349 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 9350 } 9351 State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT)); 9352 bool TrackUse = false; 9353 // Track use for instructions which must produce undef or poison bits when 9354 // at least one operand contains such bits. 9355 if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I)) 9356 TrackUse = true; 9357 return TrackUse; 9358 } 9359 9360 /// See AbstractAttribute::getAsStr(). 9361 const std::string getAsStr() const override { 9362 return getAssumed() ? "noundef" : "may-undef-or-poison"; 9363 } 9364 9365 ChangeStatus manifest(Attributor &A) override { 9366 // We don't manifest noundef attribute for dead positions because the 9367 // associated values with dead positions would be replaced with undef 9368 // values. 9369 bool UsedAssumedInformation = false; 9370 if (A.isAssumedDead(getIRPosition(), nullptr, nullptr, 9371 UsedAssumedInformation)) 9372 return ChangeStatus::UNCHANGED; 9373 // A position whose simplified value does not have any value is 9374 // considered to be dead. We don't manifest noundef in such positions for 9375 // the same reason above. 9376 if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation) 9377 .hasValue()) 9378 return ChangeStatus::UNCHANGED; 9379 return AANoUndef::manifest(A); 9380 } 9381 }; 9382 9383 struct AANoUndefFloating : public AANoUndefImpl { 9384 AANoUndefFloating(const IRPosition &IRP, Attributor &A) 9385 : AANoUndefImpl(IRP, A) {} 9386 9387 /// See AbstractAttribute::initialize(...). 9388 void initialize(Attributor &A) override { 9389 AANoUndefImpl::initialize(A); 9390 if (!getState().isAtFixpoint()) 9391 if (Instruction *CtxI = getCtxI()) 9392 followUsesInMBEC(*this, A, getState(), *CtxI); 9393 } 9394 9395 /// See AbstractAttribute::updateImpl(...). 9396 ChangeStatus updateImpl(Attributor &A) override { 9397 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 9398 AANoUndef::StateType &T, bool Stripped) -> bool { 9399 const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V), 9400 DepClassTy::REQUIRED); 9401 if (!Stripped && this == &AA) { 9402 T.indicatePessimisticFixpoint(); 9403 } else { 9404 const AANoUndef::StateType &S = 9405 static_cast<const AANoUndef::StateType &>(AA.getState()); 9406 T ^= S; 9407 } 9408 return T.isValidState(); 9409 }; 9410 9411 StateType T; 9412 bool UsedAssumedInformation = false; 9413 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 9414 VisitValueCB, getCtxI(), 9415 UsedAssumedInformation)) 9416 return indicatePessimisticFixpoint(); 9417 9418 return clampStateAndIndicateChange(getState(), T); 9419 } 9420 9421 /// See AbstractAttribute::trackStatistics() 9422 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9423 }; 9424 9425 struct AANoUndefReturned final 9426 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> { 9427 AANoUndefReturned(const IRPosition &IRP, Attributor &A) 9428 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {} 9429 9430 /// See AbstractAttribute::trackStatistics() 9431 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9432 }; 9433 9434 struct AANoUndefArgument final 9435 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> { 9436 AANoUndefArgument(const IRPosition &IRP, Attributor &A) 9437 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {} 9438 9439 /// See AbstractAttribute::trackStatistics() 9440 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) } 9441 }; 9442 9443 struct AANoUndefCallSiteArgument final : AANoUndefFloating { 9444 AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A) 9445 : AANoUndefFloating(IRP, A) {} 9446 9447 /// See AbstractAttribute::trackStatistics() 9448 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) } 9449 }; 9450 9451 struct AANoUndefCallSiteReturned final 9452 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> { 9453 AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A) 9454 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {} 9455 9456 /// See AbstractAttribute::trackStatistics() 9457 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) } 9458 }; 9459 9460 struct AACallEdgesImpl : public AACallEdges { 9461 AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {} 9462 9463 virtual const SetVector<Function *> &getOptimisticEdges() const override { 9464 return CalledFunctions; 9465 } 9466 9467 virtual bool hasUnknownCallee() const override { return HasUnknownCallee; } 9468 9469 virtual bool hasNonAsmUnknownCallee() const override { 9470 return HasUnknownCalleeNonAsm; 9471 } 9472 9473 const std::string getAsStr() const override { 9474 return "CallEdges[" + std::to_string(HasUnknownCallee) + "," + 9475 std::to_string(CalledFunctions.size()) + "]"; 9476 } 9477 9478 void trackStatistics() const override {} 9479 9480 protected: 9481 void addCalledFunction(Function *Fn, ChangeStatus &Change) { 9482 if (CalledFunctions.insert(Fn)) { 9483 Change = ChangeStatus::CHANGED; 9484 LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName() 9485 << "\n"); 9486 } 9487 } 9488 9489 void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) { 9490 if (!HasUnknownCallee) 9491 Change = ChangeStatus::CHANGED; 9492 if (NonAsm && !HasUnknownCalleeNonAsm) 9493 Change = ChangeStatus::CHANGED; 9494 HasUnknownCalleeNonAsm |= NonAsm; 9495 HasUnknownCallee = true; 9496 } 9497 9498 private: 9499 /// Optimistic set of functions that might be called by this position. 9500 SetVector<Function *> CalledFunctions; 9501 9502 /// Is there any call with a unknown callee. 9503 bool HasUnknownCallee = false; 9504 9505 /// Is there any call with a unknown callee, excluding any inline asm. 9506 bool HasUnknownCalleeNonAsm = false; 9507 }; 9508 9509 struct AACallEdgesCallSite : public AACallEdgesImpl { 9510 AACallEdgesCallSite(const IRPosition &IRP, Attributor &A) 9511 : AACallEdgesImpl(IRP, A) {} 9512 /// See AbstractAttribute::updateImpl(...). 9513 ChangeStatus updateImpl(Attributor &A) override { 9514 ChangeStatus Change = ChangeStatus::UNCHANGED; 9515 9516 auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown, 9517 bool Stripped) -> bool { 9518 if (Function *Fn = dyn_cast<Function>(&V)) { 9519 addCalledFunction(Fn, Change); 9520 } else { 9521 LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n"); 9522 setHasUnknownCallee(true, Change); 9523 } 9524 9525 // Explore all values. 9526 return true; 9527 }; 9528 9529 // Process any value that we might call. 9530 auto ProcessCalledOperand = [&](Value *V) { 9531 bool DummyValue = false; 9532 bool UsedAssumedInformation = false; 9533 if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this, 9534 DummyValue, VisitValue, nullptr, 9535 UsedAssumedInformation, false)) { 9536 // If we haven't gone through all values, assume that there are unknown 9537 // callees. 9538 setHasUnknownCallee(true, Change); 9539 } 9540 }; 9541 9542 CallBase *CB = cast<CallBase>(getCtxI()); 9543 9544 if (CB->isInlineAsm()) { 9545 if (!hasAssumption(*CB->getCaller(), "ompx_no_call_asm") && 9546 !hasAssumption(*CB, "ompx_no_call_asm")) 9547 setHasUnknownCallee(false, Change); 9548 return Change; 9549 } 9550 9551 // Process callee metadata if available. 9552 if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) { 9553 for (auto &Op : MD->operands()) { 9554 Function *Callee = mdconst::dyn_extract_or_null<Function>(Op); 9555 if (Callee) 9556 addCalledFunction(Callee, Change); 9557 } 9558 return Change; 9559 } 9560 9561 // The most simple case. 9562 ProcessCalledOperand(CB->getCalledOperand()); 9563 9564 // Process callback functions. 9565 SmallVector<const Use *, 4u> CallbackUses; 9566 AbstractCallSite::getCallbackUses(*CB, CallbackUses); 9567 for (const Use *U : CallbackUses) 9568 ProcessCalledOperand(U->get()); 9569 9570 return Change; 9571 } 9572 }; 9573 9574 struct AACallEdgesFunction : public AACallEdgesImpl { 9575 AACallEdgesFunction(const IRPosition &IRP, Attributor &A) 9576 : AACallEdgesImpl(IRP, A) {} 9577 9578 /// See AbstractAttribute::updateImpl(...). 9579 ChangeStatus updateImpl(Attributor &A) override { 9580 ChangeStatus Change = ChangeStatus::UNCHANGED; 9581 9582 auto ProcessCallInst = [&](Instruction &Inst) { 9583 CallBase &CB = cast<CallBase>(Inst); 9584 9585 auto &CBEdges = A.getAAFor<AACallEdges>( 9586 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9587 if (CBEdges.hasNonAsmUnknownCallee()) 9588 setHasUnknownCallee(true, Change); 9589 if (CBEdges.hasUnknownCallee()) 9590 setHasUnknownCallee(false, Change); 9591 9592 for (Function *F : CBEdges.getOptimisticEdges()) 9593 addCalledFunction(F, Change); 9594 9595 return true; 9596 }; 9597 9598 // Visit all callable instructions. 9599 bool UsedAssumedInformation = false; 9600 if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this, 9601 UsedAssumedInformation, 9602 /* CheckBBLivenessOnly */ true)) { 9603 // If we haven't looked at all call like instructions, assume that there 9604 // are unknown callees. 9605 setHasUnknownCallee(true, Change); 9606 } 9607 9608 return Change; 9609 } 9610 }; 9611 9612 struct AAFunctionReachabilityFunction : public AAFunctionReachability { 9613 private: 9614 struct QuerySet { 9615 void markReachable(const Function &Fn) { 9616 Reachable.insert(&Fn); 9617 Unreachable.erase(&Fn); 9618 } 9619 9620 /// If there is no information about the function None is returned. 9621 Optional<bool> isCachedReachable(const Function &Fn) { 9622 // Assume that we can reach the function. 9623 // TODO: Be more specific with the unknown callee. 9624 if (CanReachUnknownCallee) 9625 return true; 9626 9627 if (Reachable.count(&Fn)) 9628 return true; 9629 9630 if (Unreachable.count(&Fn)) 9631 return false; 9632 9633 return llvm::None; 9634 } 9635 9636 /// Set of functions that we know for sure is reachable. 9637 DenseSet<const Function *> Reachable; 9638 9639 /// Set of functions that are unreachable, but might become reachable. 9640 DenseSet<const Function *> Unreachable; 9641 9642 /// If we can reach a function with a call to a unknown function we assume 9643 /// that we can reach any function. 9644 bool CanReachUnknownCallee = false; 9645 }; 9646 9647 struct QueryResolver : public QuerySet { 9648 ChangeStatus update(Attributor &A, const AAFunctionReachability &AA, 9649 ArrayRef<const AACallEdges *> AAEdgesList) { 9650 ChangeStatus Change = ChangeStatus::UNCHANGED; 9651 9652 for (auto *AAEdges : AAEdgesList) { 9653 if (AAEdges->hasUnknownCallee()) { 9654 if (!CanReachUnknownCallee) 9655 Change = ChangeStatus::CHANGED; 9656 CanReachUnknownCallee = true; 9657 return Change; 9658 } 9659 } 9660 9661 for (const Function *Fn : make_early_inc_range(Unreachable)) { 9662 if (checkIfReachable(A, AA, AAEdgesList, *Fn)) { 9663 Change = ChangeStatus::CHANGED; 9664 markReachable(*Fn); 9665 } 9666 } 9667 return Change; 9668 } 9669 9670 bool isReachable(Attributor &A, AAFunctionReachability &AA, 9671 ArrayRef<const AACallEdges *> AAEdgesList, 9672 const Function &Fn) { 9673 Optional<bool> Cached = isCachedReachable(Fn); 9674 if (Cached.hasValue()) 9675 return Cached.getValue(); 9676 9677 // The query was not cached, thus it is new. We need to request an update 9678 // explicitly to make sure this the information is properly run to a 9679 // fixpoint. 9680 A.registerForUpdate(AA); 9681 9682 // We need to assume that this function can't reach Fn to prevent 9683 // an infinite loop if this function is recursive. 9684 Unreachable.insert(&Fn); 9685 9686 bool Result = checkIfReachable(A, AA, AAEdgesList, Fn); 9687 if (Result) 9688 markReachable(Fn); 9689 return Result; 9690 } 9691 9692 bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA, 9693 ArrayRef<const AACallEdges *> AAEdgesList, 9694 const Function &Fn) const { 9695 9696 // Handle the most trivial case first. 9697 for (auto *AAEdges : AAEdgesList) { 9698 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9699 9700 if (Edges.count(const_cast<Function *>(&Fn))) 9701 return true; 9702 } 9703 9704 SmallVector<const AAFunctionReachability *, 8> Deps; 9705 for (auto &AAEdges : AAEdgesList) { 9706 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9707 9708 for (Function *Edge : Edges) { 9709 // Functions that do not call back into the module can be ignored. 9710 if (Edge->hasFnAttribute(Attribute::NoCallback)) 9711 continue; 9712 9713 // We don't need a dependency if the result is reachable. 9714 const AAFunctionReachability &EdgeReachability = 9715 A.getAAFor<AAFunctionReachability>( 9716 AA, IRPosition::function(*Edge), DepClassTy::NONE); 9717 Deps.push_back(&EdgeReachability); 9718 9719 if (EdgeReachability.canReach(A, Fn)) 9720 return true; 9721 } 9722 } 9723 9724 // The result is false for now, set dependencies and leave. 9725 for (auto *Dep : Deps) 9726 A.recordDependence(*Dep, AA, DepClassTy::REQUIRED); 9727 9728 return false; 9729 } 9730 }; 9731 9732 /// Get call edges that can be reached by this instruction. 9733 bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability, 9734 const Instruction &Inst, 9735 SmallVector<const AACallEdges *> &Result) const { 9736 // Determine call like instructions that we can reach from the inst. 9737 auto CheckCallBase = [&](Instruction &CBInst) { 9738 if (!Reachability.isAssumedReachable(A, Inst, CBInst)) 9739 return true; 9740 9741 auto &CB = cast<CallBase>(CBInst); 9742 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9743 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9744 9745 Result.push_back(&AAEdges); 9746 return true; 9747 }; 9748 9749 bool UsedAssumedInformation = false; 9750 return A.checkForAllCallLikeInstructions(CheckCallBase, *this, 9751 UsedAssumedInformation, 9752 /* CheckBBLivenessOnly */ true); 9753 } 9754 9755 public: 9756 AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A) 9757 : AAFunctionReachability(IRP, A) {} 9758 9759 bool canReach(Attributor &A, const Function &Fn) const override { 9760 if (!isValidState()) 9761 return true; 9762 9763 const AACallEdges &AAEdges = 9764 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9765 9766 // Attributor returns attributes as const, so this function has to be 9767 // const for users of this attribute to use it without having to do 9768 // a const_cast. 9769 // This is a hack for us to be able to cache queries. 9770 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9771 bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis, 9772 {&AAEdges}, Fn); 9773 9774 return Result; 9775 } 9776 9777 /// Can \p CB reach \p Fn 9778 bool canReach(Attributor &A, CallBase &CB, 9779 const Function &Fn) const override { 9780 if (!isValidState()) 9781 return true; 9782 9783 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9784 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9785 9786 // Attributor returns attributes as const, so this function has to be 9787 // const for users of this attribute to use it without having to do 9788 // a const_cast. 9789 // This is a hack for us to be able to cache queries. 9790 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9791 QueryResolver &CBQuery = NonConstThis->CBQueries[&CB]; 9792 9793 bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn); 9794 9795 return Result; 9796 } 9797 9798 bool instructionCanReach(Attributor &A, const Instruction &Inst, 9799 const Function &Fn, 9800 bool UseBackwards) const override { 9801 if (!isValidState()) 9802 return true; 9803 9804 if (UseBackwards) 9805 return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr); 9806 9807 const auto &Reachability = A.getAAFor<AAReachability>( 9808 *this, IRPosition::function(*getAssociatedFunction()), 9809 DepClassTy::REQUIRED); 9810 9811 SmallVector<const AACallEdges *> CallEdges; 9812 bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges); 9813 // Attributor returns attributes as const, so this function has to be 9814 // const for users of this attribute to use it without having to do 9815 // a const_cast. 9816 // This is a hack for us to be able to cache queries. 9817 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9818 QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst]; 9819 if (!AllKnown) 9820 InstQSet.CanReachUnknownCallee = true; 9821 9822 return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn); 9823 } 9824 9825 /// See AbstractAttribute::updateImpl(...). 9826 ChangeStatus updateImpl(Attributor &A) override { 9827 const AACallEdges &AAEdges = 9828 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9829 ChangeStatus Change = ChangeStatus::UNCHANGED; 9830 9831 Change |= WholeFunction.update(A, *this, {&AAEdges}); 9832 9833 for (auto &CBPair : CBQueries) { 9834 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9835 *this, IRPosition::callsite_function(*CBPair.first), 9836 DepClassTy::REQUIRED); 9837 9838 Change |= CBPair.second.update(A, *this, {&AAEdges}); 9839 } 9840 9841 // Update the Instruction queries. 9842 if (!InstQueries.empty()) { 9843 const AAReachability *Reachability = &A.getAAFor<AAReachability>( 9844 *this, IRPosition::function(*getAssociatedFunction()), 9845 DepClassTy::REQUIRED); 9846 9847 // Check for local callbases first. 9848 for (auto &InstPair : InstQueries) { 9849 SmallVector<const AACallEdges *> CallEdges; 9850 bool AllKnown = 9851 getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges); 9852 // Update will return change if we this effects any queries. 9853 if (!AllKnown) 9854 InstPair.second.CanReachUnknownCallee = true; 9855 Change |= InstPair.second.update(A, *this, CallEdges); 9856 } 9857 } 9858 9859 return Change; 9860 } 9861 9862 const std::string getAsStr() const override { 9863 size_t QueryCount = 9864 WholeFunction.Reachable.size() + WholeFunction.Unreachable.size(); 9865 9866 return "FunctionReachability [" + 9867 std::to_string(WholeFunction.Reachable.size()) + "," + 9868 std::to_string(QueryCount) + "]"; 9869 } 9870 9871 void trackStatistics() const override {} 9872 9873 private: 9874 bool canReachUnknownCallee() const override { 9875 return WholeFunction.CanReachUnknownCallee; 9876 } 9877 9878 /// Used to answer if a the whole function can reacha a specific function. 9879 QueryResolver WholeFunction; 9880 9881 /// Used to answer if a call base inside this function can reach a specific 9882 /// function. 9883 MapVector<const CallBase *, QueryResolver> CBQueries; 9884 9885 /// This is for instruction queries than scan "forward". 9886 MapVector<const Instruction *, QueryResolver> InstQueries; 9887 }; 9888 } // namespace 9889 9890 /// ---------------------- Assumption Propagation ------------------------------ 9891 namespace { 9892 struct AAAssumptionInfoImpl : public AAAssumptionInfo { 9893 AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A, 9894 const DenseSet<StringRef> &Known) 9895 : AAAssumptionInfo(IRP, A, Known) {} 9896 9897 bool hasAssumption(const StringRef Assumption) const override { 9898 return isValidState() && setContains(Assumption); 9899 } 9900 9901 /// See AbstractAttribute::getAsStr() 9902 const std::string getAsStr() const override { 9903 const SetContents &Known = getKnown(); 9904 const SetContents &Assumed = getAssumed(); 9905 9906 const std::string KnownStr = 9907 llvm::join(Known.getSet().begin(), Known.getSet().end(), ","); 9908 const std::string AssumedStr = 9909 (Assumed.isUniversal()) 9910 ? "Universal" 9911 : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ","); 9912 9913 return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]"; 9914 } 9915 }; 9916 9917 /// Propagates assumption information from parent functions to all of their 9918 /// successors. An assumption can be propagated if the containing function 9919 /// dominates the called function. 9920 /// 9921 /// We start with a "known" set of assumptions already valid for the associated 9922 /// function and an "assumed" set that initially contains all possible 9923 /// assumptions. The assumed set is inter-procedurally updated by narrowing its 9924 /// contents as concrete values are known. The concrete values are seeded by the 9925 /// first nodes that are either entries into the call graph, or contains no 9926 /// assumptions. Each node is updated as the intersection of the assumed state 9927 /// with all of its predecessors. 9928 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl { 9929 AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A) 9930 : AAAssumptionInfoImpl(IRP, A, 9931 getAssumptions(*IRP.getAssociatedFunction())) {} 9932 9933 /// See AbstractAttribute::manifest(...). 9934 ChangeStatus manifest(Attributor &A) override { 9935 const auto &Assumptions = getKnown(); 9936 9937 // Don't manifest a universal set if it somehow made it here. 9938 if (Assumptions.isUniversal()) 9939 return ChangeStatus::UNCHANGED; 9940 9941 Function *AssociatedFunction = getAssociatedFunction(); 9942 9943 bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet()); 9944 9945 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9946 } 9947 9948 /// See AbstractAttribute::updateImpl(...). 9949 ChangeStatus updateImpl(Attributor &A) override { 9950 bool Changed = false; 9951 9952 auto CallSitePred = [&](AbstractCallSite ACS) { 9953 const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>( 9954 *this, IRPosition::callsite_function(*ACS.getInstruction()), 9955 DepClassTy::REQUIRED); 9956 // Get the set of assumptions shared by all of this function's callers. 9957 Changed |= getIntersection(AssumptionAA.getAssumed()); 9958 return !getAssumed().empty() || !getKnown().empty(); 9959 }; 9960 9961 bool UsedAssumedInformation = false; 9962 // Get the intersection of all assumptions held by this node's predecessors. 9963 // If we don't know all the call sites then this is either an entry into the 9964 // call graph or an empty node. This node is known to only contain its own 9965 // assumptions and can be propagated to its successors. 9966 if (!A.checkForAllCallSites(CallSitePred, *this, true, 9967 UsedAssumedInformation)) 9968 return indicatePessimisticFixpoint(); 9969 9970 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9971 } 9972 9973 void trackStatistics() const override {} 9974 }; 9975 9976 /// Assumption Info defined for call sites. 9977 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl { 9978 9979 AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A) 9980 : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {} 9981 9982 /// See AbstractAttribute::initialize(...). 9983 void initialize(Attributor &A) override { 9984 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 9985 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 9986 } 9987 9988 /// See AbstractAttribute::manifest(...). 9989 ChangeStatus manifest(Attributor &A) override { 9990 // Don't manifest a universal set if it somehow made it here. 9991 if (getKnown().isUniversal()) 9992 return ChangeStatus::UNCHANGED; 9993 9994 CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue()); 9995 bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet()); 9996 9997 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9998 } 9999 10000 /// See AbstractAttribute::updateImpl(...). 10001 ChangeStatus updateImpl(Attributor &A) override { 10002 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 10003 auto &AssumptionAA = 10004 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 10005 bool Changed = getIntersection(AssumptionAA.getAssumed()); 10006 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 10007 } 10008 10009 /// See AbstractAttribute::trackStatistics() 10010 void trackStatistics() const override {} 10011 10012 private: 10013 /// Helper to initialized the known set as all the assumptions this call and 10014 /// the callee contain. 10015 DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) { 10016 const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue()); 10017 auto Assumptions = getAssumptions(CB); 10018 if (Function *F = IRP.getAssociatedFunction()) 10019 set_union(Assumptions, getAssumptions(*F)); 10020 if (Function *F = IRP.getAssociatedFunction()) 10021 set_union(Assumptions, getAssumptions(*F)); 10022 return Assumptions; 10023 } 10024 }; 10025 } // namespace 10026 10027 AACallGraphNode *AACallEdgeIterator::operator*() const { 10028 return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>( 10029 &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I)))); 10030 } 10031 10032 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); } 10033 10034 const char AAReturnedValues::ID = 0; 10035 const char AANoUnwind::ID = 0; 10036 const char AANoSync::ID = 0; 10037 const char AANoFree::ID = 0; 10038 const char AANonNull::ID = 0; 10039 const char AANoRecurse::ID = 0; 10040 const char AAWillReturn::ID = 0; 10041 const char AAUndefinedBehavior::ID = 0; 10042 const char AANoAlias::ID = 0; 10043 const char AAReachability::ID = 0; 10044 const char AANoReturn::ID = 0; 10045 const char AAIsDead::ID = 0; 10046 const char AADereferenceable::ID = 0; 10047 const char AAAlign::ID = 0; 10048 const char AANoCapture::ID = 0; 10049 const char AAValueSimplify::ID = 0; 10050 const char AAHeapToStack::ID = 0; 10051 const char AAPrivatizablePtr::ID = 0; 10052 const char AAMemoryBehavior::ID = 0; 10053 const char AAMemoryLocation::ID = 0; 10054 const char AAValueConstantRange::ID = 0; 10055 const char AAPotentialValues::ID = 0; 10056 const char AANoUndef::ID = 0; 10057 const char AACallEdges::ID = 0; 10058 const char AAFunctionReachability::ID = 0; 10059 const char AAPointerInfo::ID = 0; 10060 const char AAAssumptionInfo::ID = 0; 10061 10062 // Macro magic to create the static generator function for attributes that 10063 // follow the naming scheme. 10064 10065 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ 10066 case IRPosition::PK: \ 10067 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); 10068 10069 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ 10070 case IRPosition::PK: \ 10071 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ 10072 ++NumAAs; \ 10073 break; 10074 10075 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10076 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10077 CLASS *AA = nullptr; \ 10078 switch (IRP.getPositionKind()) { \ 10079 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10080 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 10081 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 10082 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10083 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 10084 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 10085 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10086 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10087 } \ 10088 return *AA; \ 10089 } 10090 10091 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10092 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10093 CLASS *AA = nullptr; \ 10094 switch (IRP.getPositionKind()) { \ 10095 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10096 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ 10097 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 10098 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10099 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10100 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 10101 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10102 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10103 } \ 10104 return *AA; \ 10105 } 10106 10107 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10108 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10109 CLASS *AA = nullptr; \ 10110 switch (IRP.getPositionKind()) { \ 10111 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10112 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10113 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10114 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10115 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10116 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 10117 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10118 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10119 } \ 10120 return *AA; \ 10121 } 10122 10123 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10124 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10125 CLASS *AA = nullptr; \ 10126 switch (IRP.getPositionKind()) { \ 10127 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10128 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 10129 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 10130 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10131 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 10132 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 10133 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 10134 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10135 } \ 10136 return *AA; \ 10137 } 10138 10139 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10140 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10141 CLASS *AA = nullptr; \ 10142 switch (IRP.getPositionKind()) { \ 10143 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10144 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10145 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10146 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10147 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10148 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10149 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10150 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10151 } \ 10152 return *AA; \ 10153 } 10154 10155 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) 10156 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) 10157 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) 10158 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) 10159 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) 10160 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) 10161 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) 10162 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges) 10163 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo) 10164 10165 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) 10166 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) 10167 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) 10168 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) 10169 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) 10170 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) 10171 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) 10172 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues) 10173 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef) 10174 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo) 10175 10176 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) 10177 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) 10178 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) 10179 10180 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) 10181 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability) 10182 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) 10183 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability) 10184 10185 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) 10186 10187 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION 10188 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION 10189 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION 10190 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION 10191 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION 10192 #undef SWITCH_PK_CREATE 10193 #undef SWITCH_PK_INV 10194