1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // See the Attributor.h file comment and the class descriptions in that file for 10 // more information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/IPO/Attributor.h" 15 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/SCCIterator.h" 18 #include "llvm/ADT/SetOperations.h" 19 #include "llvm/ADT/SmallPtrSet.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumeBundleQueries.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/LazyValueInfo.h" 27 #include "llvm/Analysis/MemoryBuiltins.h" 28 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 29 #include "llvm/Analysis/ScalarEvolution.h" 30 #include "llvm/Analysis/TargetTransformInfo.h" 31 #include "llvm/Analysis/ValueTracking.h" 32 #include "llvm/IR/Assumptions.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/IRBuilder.h" 35 #include "llvm/IR/Instruction.h" 36 #include "llvm/IR/Instructions.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/IR/NoFolder.h" 39 #include "llvm/Support/Alignment.h" 40 #include "llvm/Support/Casting.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/FileSystem.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Transforms/IPO/ArgumentPromotion.h" 46 #include "llvm/Transforms/Utils/Local.h" 47 #include <cassert> 48 49 using namespace llvm; 50 51 #define DEBUG_TYPE "attributor" 52 53 static cl::opt<bool> ManifestInternal( 54 "attributor-manifest-internal", cl::Hidden, 55 cl::desc("Manifest Attributor internal string attributes."), 56 cl::init(false)); 57 58 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), 59 cl::Hidden); 60 61 template <> 62 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0; 63 64 static cl::opt<unsigned, true> MaxPotentialValues( 65 "attributor-max-potential-values", cl::Hidden, 66 cl::desc("Maximum number of potential values to be " 67 "tracked for each position."), 68 cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues), 69 cl::init(7)); 70 71 STATISTIC(NumAAs, "Number of abstract attributes created"); 72 73 // Some helper macros to deal with statistics tracking. 74 // 75 // Usage: 76 // For simple IR attribute tracking overload trackStatistics in the abstract 77 // attribute and choose the right STATS_DECLTRACK_********* macro, 78 // e.g.,: 79 // void trackStatistics() const override { 80 // STATS_DECLTRACK_ARG_ATTR(returned) 81 // } 82 // If there is a single "increment" side one can use the macro 83 // STATS_DECLTRACK with a custom message. If there are multiple increment 84 // sides, STATS_DECL and STATS_TRACK can also be used separately. 85 // 86 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ 87 ("Number of " #TYPE " marked '" #NAME "'") 88 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME 89 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); 90 #define STATS_DECL(NAME, TYPE, MSG) \ 91 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); 92 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); 93 #define STATS_DECLTRACK(NAME, TYPE, MSG) \ 94 { \ 95 STATS_DECL(NAME, TYPE, MSG) \ 96 STATS_TRACK(NAME, TYPE) \ 97 } 98 #define STATS_DECLTRACK_ARG_ATTR(NAME) \ 99 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) 100 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ 101 STATS_DECLTRACK(NAME, CSArguments, \ 102 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) 103 #define STATS_DECLTRACK_FN_ATTR(NAME) \ 104 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) 105 #define STATS_DECLTRACK_CS_ATTR(NAME) \ 106 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) 107 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ 108 STATS_DECLTRACK(NAME, FunctionReturn, \ 109 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) 110 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ 111 STATS_DECLTRACK(NAME, CSReturn, \ 112 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) 113 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ 114 STATS_DECLTRACK(NAME, Floating, \ 115 ("Number of floating values known to be '" #NAME "'")) 116 117 // Specialization of the operator<< for abstract attributes subclasses. This 118 // disambiguates situations where multiple operators are applicable. 119 namespace llvm { 120 #define PIPE_OPERATOR(CLASS) \ 121 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ 122 return OS << static_cast<const AbstractAttribute &>(AA); \ 123 } 124 125 PIPE_OPERATOR(AAIsDead) 126 PIPE_OPERATOR(AANoUnwind) 127 PIPE_OPERATOR(AANoSync) 128 PIPE_OPERATOR(AANoRecurse) 129 PIPE_OPERATOR(AAWillReturn) 130 PIPE_OPERATOR(AANoReturn) 131 PIPE_OPERATOR(AAReturnedValues) 132 PIPE_OPERATOR(AANonNull) 133 PIPE_OPERATOR(AANoAlias) 134 PIPE_OPERATOR(AADereferenceable) 135 PIPE_OPERATOR(AAAlign) 136 PIPE_OPERATOR(AANoCapture) 137 PIPE_OPERATOR(AAValueSimplify) 138 PIPE_OPERATOR(AANoFree) 139 PIPE_OPERATOR(AAHeapToStack) 140 PIPE_OPERATOR(AAReachability) 141 PIPE_OPERATOR(AAMemoryBehavior) 142 PIPE_OPERATOR(AAMemoryLocation) 143 PIPE_OPERATOR(AAValueConstantRange) 144 PIPE_OPERATOR(AAPrivatizablePtr) 145 PIPE_OPERATOR(AAUndefinedBehavior) 146 PIPE_OPERATOR(AAPotentialValues) 147 PIPE_OPERATOR(AANoUndef) 148 PIPE_OPERATOR(AACallEdges) 149 PIPE_OPERATOR(AAFunctionReachability) 150 PIPE_OPERATOR(AAPointerInfo) 151 PIPE_OPERATOR(AAAssumptionInfo) 152 153 #undef PIPE_OPERATOR 154 155 template <> 156 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, 157 const DerefState &R) { 158 ChangeStatus CS0 = 159 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); 160 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); 161 return CS0 | CS1; 162 } 163 164 } // namespace llvm 165 166 /// Get pointer operand of memory accessing instruction. If \p I is 167 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, 168 /// is set to false and the instruction is volatile, return nullptr. 169 static const Value *getPointerOperand(const Instruction *I, 170 bool AllowVolatile) { 171 if (!AllowVolatile && I->isVolatile()) 172 return nullptr; 173 174 if (auto *LI = dyn_cast<LoadInst>(I)) { 175 return LI->getPointerOperand(); 176 } 177 178 if (auto *SI = dyn_cast<StoreInst>(I)) { 179 return SI->getPointerOperand(); 180 } 181 182 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { 183 return CXI->getPointerOperand(); 184 } 185 186 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { 187 return RMWI->getPointerOperand(); 188 } 189 190 return nullptr; 191 } 192 193 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and 194 /// advanced by \p Offset bytes. To aid later analysis the method tries to build 195 /// getelement pointer instructions that traverse the natural type of \p Ptr if 196 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence 197 /// through a cast to i8*. 198 /// 199 /// TODO: This could probably live somewhere more prominantly if it doesn't 200 /// already exist. 201 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr, 202 int64_t Offset, IRBuilder<NoFolder> &IRB, 203 const DataLayout &DL) { 204 assert(Offset >= 0 && "Negative offset not supported yet!"); 205 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset 206 << "-bytes as " << *ResTy << "\n"); 207 208 if (Offset) { 209 Type *Ty = PtrElemTy; 210 APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset); 211 SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset); 212 213 SmallVector<Value *, 4> ValIndices; 214 std::string GEPName = Ptr->getName().str(); 215 for (const APInt &Index : IntIndices) { 216 ValIndices.push_back(IRB.getInt(Index)); 217 GEPName += "." + std::to_string(Index.getZExtValue()); 218 } 219 220 // Create a GEP for the indices collected above. 221 Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName); 222 223 // If an offset is left we use byte-wise adjustment. 224 if (IntOffset != 0) { 225 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); 226 Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset), 227 GEPName + ".b" + Twine(IntOffset.getZExtValue())); 228 } 229 } 230 231 // Ensure the result has the requested type. 232 Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast"); 233 234 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n"); 235 return Ptr; 236 } 237 238 /// Recursively visit all values that might become \p IRP at some point. This 239 /// will be done by looking through cast instructions, selects, phis, and calls 240 /// with the "returned" attribute. Once we cannot look through the value any 241 /// further, the callback \p VisitValueCB is invoked and passed the current 242 /// value, the \p State, and a flag to indicate if we stripped anything. 243 /// Stripped means that we unpacked the value associated with \p IRP at least 244 /// once. Note that the value used for the callback may still be the value 245 /// associated with \p IRP (due to PHIs). To limit how much effort is invested, 246 /// we will never visit more values than specified by \p MaxValues. 247 template <typename StateTy> 248 static bool genericValueTraversal( 249 Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA, 250 StateTy &State, 251 function_ref<bool(Value &, const Instruction *, StateTy &, bool)> 252 VisitValueCB, 253 const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16, 254 function_ref<Value *(Value *)> StripCB = nullptr) { 255 256 const AAIsDead *LivenessAA = nullptr; 257 if (IRP.getAnchorScope()) 258 LivenessAA = &A.getAAFor<AAIsDead>( 259 QueryingAA, 260 IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()), 261 DepClassTy::NONE); 262 bool AnyDead = false; 263 264 Value *InitialV = &IRP.getAssociatedValue(); 265 using Item = std::pair<Value *, const Instruction *>; 266 SmallSet<Item, 16> Visited; 267 SmallVector<Item, 16> Worklist; 268 Worklist.push_back({InitialV, CtxI}); 269 270 int Iteration = 0; 271 do { 272 Item I = Worklist.pop_back_val(); 273 Value *V = I.first; 274 CtxI = I.second; 275 if (StripCB) 276 V = StripCB(V); 277 278 // Check if we should process the current value. To prevent endless 279 // recursion keep a record of the values we followed! 280 if (!Visited.insert(I).second) 281 continue; 282 283 // Make sure we limit the compile time for complex expressions. 284 if (Iteration++ >= MaxValues) 285 return false; 286 287 // Explicitly look through calls with a "returned" attribute if we do 288 // not have a pointer as stripPointerCasts only works on them. 289 Value *NewV = nullptr; 290 if (V->getType()->isPointerTy()) { 291 NewV = V->stripPointerCasts(); 292 } else { 293 auto *CB = dyn_cast<CallBase>(V); 294 if (CB && CB->getCalledFunction()) { 295 for (Argument &Arg : CB->getCalledFunction()->args()) 296 if (Arg.hasReturnedAttr()) { 297 NewV = CB->getArgOperand(Arg.getArgNo()); 298 break; 299 } 300 } 301 } 302 if (NewV && NewV != V) { 303 Worklist.push_back({NewV, CtxI}); 304 continue; 305 } 306 307 // Look through select instructions, visit assumed potential values. 308 if (auto *SI = dyn_cast<SelectInst>(V)) { 309 bool UsedAssumedInformation = false; 310 Optional<Constant *> C = A.getAssumedConstant( 311 *SI->getCondition(), QueryingAA, UsedAssumedInformation); 312 bool NoValueYet = !C.hasValue(); 313 if (NoValueYet || isa_and_nonnull<UndefValue>(*C)) 314 continue; 315 if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) { 316 if (CI->isZero()) 317 Worklist.push_back({SI->getFalseValue(), CtxI}); 318 else 319 Worklist.push_back({SI->getTrueValue(), CtxI}); 320 continue; 321 } 322 // We could not simplify the condition, assume both values.( 323 Worklist.push_back({SI->getTrueValue(), CtxI}); 324 Worklist.push_back({SI->getFalseValue(), CtxI}); 325 continue; 326 } 327 328 // Look through phi nodes, visit all live operands. 329 if (auto *PHI = dyn_cast<PHINode>(V)) { 330 assert(LivenessAA && 331 "Expected liveness in the presence of instructions!"); 332 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 333 BasicBlock *IncomingBB = PHI->getIncomingBlock(u); 334 bool UsedAssumedInformation = false; 335 if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA, 336 LivenessAA, UsedAssumedInformation, 337 /* CheckBBLivenessOnly */ true)) { 338 AnyDead = true; 339 continue; 340 } 341 Worklist.push_back( 342 {PHI->getIncomingValue(u), IncomingBB->getTerminator()}); 343 } 344 continue; 345 } 346 347 if (UseValueSimplify && !isa<Constant>(V)) { 348 bool UsedAssumedInformation = false; 349 Optional<Value *> SimpleV = 350 A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation); 351 if (!SimpleV.hasValue()) 352 continue; 353 if (!SimpleV.getValue()) 354 return false; 355 Value *NewV = SimpleV.getValue(); 356 if (NewV != V) { 357 Worklist.push_back({NewV, CtxI}); 358 continue; 359 } 360 } 361 362 // Once a leaf is reached we inform the user through the callback. 363 if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) 364 return false; 365 } while (!Worklist.empty()); 366 367 // If we actually used liveness information so we have to record a dependence. 368 if (AnyDead) 369 A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL); 370 371 // All values have been visited. 372 return true; 373 } 374 375 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr, 376 SmallVectorImpl<Value *> &Objects, 377 const AbstractAttribute &QueryingAA, 378 const Instruction *CtxI) { 379 auto StripCB = [&](Value *V) { return getUnderlyingObject(V); }; 380 SmallPtrSet<Value *, 8> SeenObjects; 381 auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *, 382 SmallVectorImpl<Value *> &Objects, 383 bool) -> bool { 384 if (SeenObjects.insert(&Val).second) 385 Objects.push_back(&Val); 386 return true; 387 }; 388 if (!genericValueTraversal<decltype(Objects)>( 389 A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI, 390 true, 32, StripCB)) 391 return false; 392 return true; 393 } 394 395 const Value *stripAndAccumulateMinimalOffsets( 396 Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val, 397 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, 398 bool UseAssumed = false) { 399 400 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { 401 const IRPosition &Pos = IRPosition::value(V); 402 // Only track dependence if we are going to use the assumed info. 403 const AAValueConstantRange &ValueConstantRangeAA = 404 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, 405 UseAssumed ? DepClassTy::OPTIONAL 406 : DepClassTy::NONE); 407 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() 408 : ValueConstantRangeAA.getKnown(); 409 // We can only use the lower part of the range because the upper part can 410 // be higher than what the value can really be. 411 ROffset = Range.getSignedMin(); 412 return true; 413 }; 414 415 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, 416 /* AllowInvariant */ false, 417 AttributorAnalysis); 418 } 419 420 static const Value *getMinimalBaseOfAccessPointerOperand( 421 Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I, 422 int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) { 423 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); 424 if (!Ptr) 425 return nullptr; 426 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 427 const Value *Base = stripAndAccumulateMinimalOffsets( 428 A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds); 429 430 BytesOffset = OffsetAPInt.getSExtValue(); 431 return Base; 432 } 433 434 static const Value * 435 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset, 436 const DataLayout &DL, 437 bool AllowNonInbounds = false) { 438 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); 439 if (!Ptr) 440 return nullptr; 441 442 return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL, 443 AllowNonInbounds); 444 } 445 446 /// Clamp the information known for all returned values of a function 447 /// (identified by \p QueryingAA) into \p S. 448 template <typename AAType, typename StateType = typename AAType::StateType> 449 static void clampReturnedValueStates( 450 Attributor &A, const AAType &QueryingAA, StateType &S, 451 const IRPosition::CallBaseContext *CBContext = nullptr) { 452 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " 453 << QueryingAA << " into " << S << "\n"); 454 455 assert((QueryingAA.getIRPosition().getPositionKind() == 456 IRPosition::IRP_RETURNED || 457 QueryingAA.getIRPosition().getPositionKind() == 458 IRPosition::IRP_CALL_SITE_RETURNED) && 459 "Can only clamp returned value states for a function returned or call " 460 "site returned position!"); 461 462 // Use an optional state as there might not be any return values and we want 463 // to join (IntegerState::operator&) the state of all there are. 464 Optional<StateType> T; 465 466 // Callback for each possibly returned value. 467 auto CheckReturnValue = [&](Value &RV) -> bool { 468 const IRPosition &RVPos = IRPosition::value(RV, CBContext); 469 const AAType &AA = 470 A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED); 471 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() 472 << " @ " << RVPos << "\n"); 473 const StateType &AAS = AA.getState(); 474 if (T.hasValue()) 475 *T &= AAS; 476 else 477 T = AAS; 478 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T 479 << "\n"); 480 return T->isValidState(); 481 }; 482 483 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) 484 S.indicatePessimisticFixpoint(); 485 else if (T.hasValue()) 486 S ^= *T; 487 } 488 489 namespace { 490 /// Helper class for generic deduction: return value -> returned position. 491 template <typename AAType, typename BaseType, 492 typename StateType = typename BaseType::StateType, 493 bool PropagateCallBaseContext = false> 494 struct AAReturnedFromReturnedValues : public BaseType { 495 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) 496 : BaseType(IRP, A) {} 497 498 /// See AbstractAttribute::updateImpl(...). 499 ChangeStatus updateImpl(Attributor &A) override { 500 StateType S(StateType::getBestState(this->getState())); 501 clampReturnedValueStates<AAType, StateType>( 502 A, *this, S, 503 PropagateCallBaseContext ? this->getCallBaseContext() : nullptr); 504 // TODO: If we know we visited all returned values, thus no are assumed 505 // dead, we can take the known information from the state T. 506 return clampStateAndIndicateChange<StateType>(this->getState(), S); 507 } 508 }; 509 510 /// Clamp the information known at all call sites for a given argument 511 /// (identified by \p QueryingAA) into \p S. 512 template <typename AAType, typename StateType = typename AAType::StateType> 513 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, 514 StateType &S) { 515 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " 516 << QueryingAA << " into " << S << "\n"); 517 518 assert(QueryingAA.getIRPosition().getPositionKind() == 519 IRPosition::IRP_ARGUMENT && 520 "Can only clamp call site argument states for an argument position!"); 521 522 // Use an optional state as there might not be any return values and we want 523 // to join (IntegerState::operator&) the state of all there are. 524 Optional<StateType> T; 525 526 // The argument number which is also the call site argument number. 527 unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo(); 528 529 auto CallSiteCheck = [&](AbstractCallSite ACS) { 530 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 531 // Check if a coresponding argument was found or if it is on not associated 532 // (which can happen for callback calls). 533 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 534 return false; 535 536 const AAType &AA = 537 A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED); 538 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() 539 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n"); 540 const StateType &AAS = AA.getState(); 541 if (T.hasValue()) 542 *T &= AAS; 543 else 544 T = AAS; 545 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T 546 << "\n"); 547 return T->isValidState(); 548 }; 549 550 bool AllCallSitesKnown; 551 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, 552 AllCallSitesKnown)) 553 S.indicatePessimisticFixpoint(); 554 else if (T.hasValue()) 555 S ^= *T; 556 } 557 558 /// This function is the bridge between argument position and the call base 559 /// context. 560 template <typename AAType, typename BaseType, 561 typename StateType = typename AAType::StateType> 562 bool getArgumentStateFromCallBaseContext(Attributor &A, 563 BaseType &QueryingAttribute, 564 IRPosition &Pos, StateType &State) { 565 assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && 566 "Expected an 'argument' position !"); 567 const CallBase *CBContext = Pos.getCallBaseContext(); 568 if (!CBContext) 569 return false; 570 571 int ArgNo = Pos.getCallSiteArgNo(); 572 assert(ArgNo >= 0 && "Invalid Arg No!"); 573 574 const auto &AA = A.getAAFor<AAType>( 575 QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo), 576 DepClassTy::REQUIRED); 577 const StateType &CBArgumentState = 578 static_cast<const StateType &>(AA.getState()); 579 580 LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument" 581 << "Position:" << Pos << "CB Arg state:" << CBArgumentState 582 << "\n"); 583 584 // NOTE: If we want to do call site grouping it should happen here. 585 State ^= CBArgumentState; 586 return true; 587 } 588 589 /// Helper class for generic deduction: call site argument -> argument position. 590 template <typename AAType, typename BaseType, 591 typename StateType = typename AAType::StateType, 592 bool BridgeCallBaseContext = false> 593 struct AAArgumentFromCallSiteArguments : public BaseType { 594 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) 595 : BaseType(IRP, A) {} 596 597 /// See AbstractAttribute::updateImpl(...). 598 ChangeStatus updateImpl(Attributor &A) override { 599 StateType S = StateType::getBestState(this->getState()); 600 601 if (BridgeCallBaseContext) { 602 bool Success = 603 getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>( 604 A, *this, this->getIRPosition(), S); 605 if (Success) 606 return clampStateAndIndicateChange<StateType>(this->getState(), S); 607 } 608 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); 609 610 // TODO: If we know we visited all incoming values, thus no are assumed 611 // dead, we can take the known information from the state T. 612 return clampStateAndIndicateChange<StateType>(this->getState(), S); 613 } 614 }; 615 616 /// Helper class for generic replication: function returned -> cs returned. 617 template <typename AAType, typename BaseType, 618 typename StateType = typename BaseType::StateType, 619 bool IntroduceCallBaseContext = false> 620 struct AACallSiteReturnedFromReturned : public BaseType { 621 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) 622 : BaseType(IRP, A) {} 623 624 /// See AbstractAttribute::updateImpl(...). 625 ChangeStatus updateImpl(Attributor &A) override { 626 assert(this->getIRPosition().getPositionKind() == 627 IRPosition::IRP_CALL_SITE_RETURNED && 628 "Can only wrap function returned positions for call site returned " 629 "positions!"); 630 auto &S = this->getState(); 631 632 const Function *AssociatedFunction = 633 this->getIRPosition().getAssociatedFunction(); 634 if (!AssociatedFunction) 635 return S.indicatePessimisticFixpoint(); 636 637 CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue()); 638 if (IntroduceCallBaseContext) 639 LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:" 640 << CBContext << "\n"); 641 642 IRPosition FnPos = IRPosition::returned( 643 *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr); 644 const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED); 645 return clampStateAndIndicateChange(S, AA.getState()); 646 } 647 }; 648 } // namespace 649 650 /// Helper function to accumulate uses. 651 template <class AAType, typename StateType = typename AAType::StateType> 652 static void followUsesInContext(AAType &AA, Attributor &A, 653 MustBeExecutedContextExplorer &Explorer, 654 const Instruction *CtxI, 655 SetVector<const Use *> &Uses, 656 StateType &State) { 657 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); 658 for (unsigned u = 0; u < Uses.size(); ++u) { 659 const Use *U = Uses[u]; 660 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { 661 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); 662 if (Found && AA.followUseInMBEC(A, U, UserI, State)) 663 for (const Use &Us : UserI->uses()) 664 Uses.insert(&Us); 665 } 666 } 667 } 668 669 /// Use the must-be-executed-context around \p I to add information into \p S. 670 /// The AAType class is required to have `followUseInMBEC` method with the 671 /// following signature and behaviour: 672 /// 673 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) 674 /// U - Underlying use. 675 /// I - The user of the \p U. 676 /// Returns true if the value should be tracked transitively. 677 /// 678 template <class AAType, typename StateType = typename AAType::StateType> 679 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, 680 Instruction &CtxI) { 681 682 // Container for (transitive) uses of the associated value. 683 SetVector<const Use *> Uses; 684 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) 685 Uses.insert(&U); 686 687 MustBeExecutedContextExplorer &Explorer = 688 A.getInfoCache().getMustBeExecutedContextExplorer(); 689 690 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); 691 692 if (S.isAtFixpoint()) 693 return; 694 695 SmallVector<const BranchInst *, 4> BrInsts; 696 auto Pred = [&](const Instruction *I) { 697 if (const BranchInst *Br = dyn_cast<BranchInst>(I)) 698 if (Br->isConditional()) 699 BrInsts.push_back(Br); 700 return true; 701 }; 702 703 // Here, accumulate conditional branch instructions in the context. We 704 // explore the child paths and collect the known states. The disjunction of 705 // those states can be merged to its own state. Let ParentState_i be a state 706 // to indicate the known information for an i-th branch instruction in the 707 // context. ChildStates are created for its successors respectively. 708 // 709 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} 710 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} 711 // ... 712 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} 713 // 714 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m 715 // 716 // FIXME: Currently, recursive branches are not handled. For example, we 717 // can't deduce that ptr must be dereferenced in below function. 718 // 719 // void f(int a, int c, int *ptr) { 720 // if(a) 721 // if (b) { 722 // *ptr = 0; 723 // } else { 724 // *ptr = 1; 725 // } 726 // else { 727 // if (b) { 728 // *ptr = 0; 729 // } else { 730 // *ptr = 1; 731 // } 732 // } 733 // } 734 735 Explorer.checkForAllContext(&CtxI, Pred); 736 for (const BranchInst *Br : BrInsts) { 737 StateType ParentState; 738 739 // The known state of the parent state is a conjunction of children's 740 // known states so it is initialized with a best state. 741 ParentState.indicateOptimisticFixpoint(); 742 743 for (const BasicBlock *BB : Br->successors()) { 744 StateType ChildState; 745 746 size_t BeforeSize = Uses.size(); 747 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); 748 749 // Erase uses which only appear in the child. 750 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) 751 It = Uses.erase(It); 752 753 ParentState &= ChildState; 754 } 755 756 // Use only known state. 757 S += ParentState; 758 } 759 } 760 761 /// ------------------------ PointerInfo --------------------------------------- 762 763 namespace llvm { 764 namespace AA { 765 namespace PointerInfo { 766 767 /// An access kind description as used by AAPointerInfo. 768 struct OffsetAndSize; 769 770 struct State; 771 772 } // namespace PointerInfo 773 } // namespace AA 774 775 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage. 776 template <> 777 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> { 778 using Access = AAPointerInfo::Access; 779 static inline Access getEmptyKey(); 780 static inline Access getTombstoneKey(); 781 static unsigned getHashValue(const Access &A); 782 static bool isEqual(const Access &LHS, const Access &RHS); 783 }; 784 785 /// Helper that allows OffsetAndSize as a key in a DenseMap. 786 template <> 787 struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize> 788 : DenseMapInfo<std::pair<int64_t, int64_t>> {}; 789 790 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign 791 /// but the instruction 792 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> { 793 using Base = DenseMapInfo<Instruction *>; 794 using Access = AAPointerInfo::Access; 795 static inline Access getEmptyKey(); 796 static inline Access getTombstoneKey(); 797 static unsigned getHashValue(const Access &A); 798 static bool isEqual(const Access &LHS, const Access &RHS); 799 }; 800 801 } // namespace llvm 802 803 /// Helper to represent an access offset and size, with logic to deal with 804 /// uncertainty and check for overlapping accesses. 805 struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> { 806 using BaseTy = std::pair<int64_t, int64_t>; 807 OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {} 808 OffsetAndSize(const BaseTy &P) : BaseTy(P) {} 809 int64_t getOffset() const { return first; } 810 int64_t getSize() const { return second; } 811 static OffsetAndSize getUnknown() { return OffsetAndSize(Unknown, Unknown); } 812 813 /// Return true if offset or size are unknown. 814 bool offsetOrSizeAreUnknown() const { 815 return getOffset() == OffsetAndSize::Unknown || 816 getSize() == OffsetAndSize::Unknown; 817 } 818 819 /// Return true if this offset and size pair might describe an address that 820 /// overlaps with \p OAS. 821 bool mayOverlap(const OffsetAndSize &OAS) const { 822 // Any unknown value and we are giving up -> overlap. 823 if (offsetOrSizeAreUnknown() || OAS.offsetOrSizeAreUnknown()) 824 return true; 825 826 // Check if one offset point is in the other interval [offset, offset+size]. 827 return OAS.getOffset() + OAS.getSize() > getOffset() && 828 OAS.getOffset() < getOffset() + getSize(); 829 } 830 831 /// Constant used to represent unknown offset or sizes. 832 static constexpr int64_t Unknown = 1 << 31; 833 }; 834 835 /// Implementation of the DenseMapInfo. 836 /// 837 ///{ 838 inline llvm::AccessAsInstructionInfo::Access 839 llvm::AccessAsInstructionInfo::getEmptyKey() { 840 return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr); 841 } 842 inline llvm::AccessAsInstructionInfo::Access 843 llvm::AccessAsInstructionInfo::getTombstoneKey() { 844 return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ, 845 nullptr); 846 } 847 unsigned llvm::AccessAsInstructionInfo::getHashValue( 848 const llvm::AccessAsInstructionInfo::Access &A) { 849 return Base::getHashValue(A.getRemoteInst()); 850 } 851 bool llvm::AccessAsInstructionInfo::isEqual( 852 const llvm::AccessAsInstructionInfo::Access &LHS, 853 const llvm::AccessAsInstructionInfo::Access &RHS) { 854 return LHS.getRemoteInst() == RHS.getRemoteInst(); 855 } 856 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access 857 llvm::DenseMapInfo<AAPointerInfo::Access>::getEmptyKey() { 858 return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ, 859 nullptr); 860 } 861 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access 862 llvm::DenseMapInfo<AAPointerInfo::Access>::getTombstoneKey() { 863 return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE, 864 nullptr); 865 } 866 867 unsigned llvm::DenseMapInfo<AAPointerInfo::Access>::getHashValue( 868 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &A) { 869 return detail::combineHashValue( 870 DenseMapInfo<Instruction *>::getHashValue(A.getRemoteInst()), 871 (A.isWrittenValueYetUndetermined() 872 ? ~0 873 : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) + 874 A.getKind(); 875 } 876 877 bool llvm::DenseMapInfo<AAPointerInfo::Access>::isEqual( 878 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &LHS, 879 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &RHS) { 880 return LHS == RHS; 881 } 882 ///} 883 884 /// A type to track pointer/struct usage and accesses for AAPointerInfo. 885 struct AA::PointerInfo::State : public AbstractState { 886 887 /// Return the best possible representable state. 888 static State getBestState(const State &SIS) { return State(); } 889 890 /// Return the worst possible representable state. 891 static State getWorstState(const State &SIS) { 892 State R; 893 R.indicatePessimisticFixpoint(); 894 return R; 895 } 896 897 State() {} 898 State(const State &SIS) : AccessBins(SIS.AccessBins) {} 899 State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {} 900 901 const State &getAssumed() const { return *this; } 902 903 /// See AbstractState::isValidState(). 904 bool isValidState() const override { return BS.isValidState(); } 905 906 /// See AbstractState::isAtFixpoint(). 907 bool isAtFixpoint() const override { return BS.isAtFixpoint(); } 908 909 /// See AbstractState::indicateOptimisticFixpoint(). 910 ChangeStatus indicateOptimisticFixpoint() override { 911 BS.indicateOptimisticFixpoint(); 912 return ChangeStatus::UNCHANGED; 913 } 914 915 /// See AbstractState::indicatePessimisticFixpoint(). 916 ChangeStatus indicatePessimisticFixpoint() override { 917 BS.indicatePessimisticFixpoint(); 918 return ChangeStatus::CHANGED; 919 } 920 921 State &operator=(const State &R) { 922 if (this == &R) 923 return *this; 924 BS = R.BS; 925 AccessBins = R.AccessBins; 926 return *this; 927 } 928 929 State &operator=(State &&R) { 930 if (this == &R) 931 return *this; 932 std::swap(BS, R.BS); 933 std::swap(AccessBins, R.AccessBins); 934 return *this; 935 } 936 937 bool operator==(const State &R) const { 938 if (BS != R.BS) 939 return false; 940 if (AccessBins.size() != R.AccessBins.size()) 941 return false; 942 auto It = begin(), RIt = R.begin(), E = end(); 943 while (It != E) { 944 if (It->getFirst() != RIt->getFirst()) 945 return false; 946 auto &Accs = It->getSecond(); 947 auto &RAccs = RIt->getSecond(); 948 if (Accs.size() != RAccs.size()) 949 return false; 950 auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end(); 951 while (AccIt != AccE) { 952 if (*AccIt != *RAccIt) 953 return false; 954 ++AccIt; 955 ++RAccIt; 956 } 957 ++It; 958 ++RIt; 959 } 960 return true; 961 } 962 bool operator!=(const State &R) const { return !(*this == R); } 963 964 /// We store accesses in a set with the instruction as key. 965 using Accesses = DenseSet<AAPointerInfo::Access, AccessAsInstructionInfo>; 966 967 /// We store all accesses in bins denoted by their offset and size. 968 using AccessBinsTy = DenseMap<OffsetAndSize, Accesses>; 969 970 AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); } 971 AccessBinsTy::const_iterator end() const { return AccessBins.end(); } 972 973 protected: 974 /// The bins with all the accesses for the associated pointer. 975 DenseMap<OffsetAndSize, Accesses> AccessBins; 976 977 /// Add a new access to the state at offset \p Offset and with size \p Size. 978 /// The access is associated with \p I, writes \p Content (if anything), and 979 /// is of kind \p Kind. 980 /// \Returns CHANGED, if the state changed, UNCHANGED otherwise. 981 ChangeStatus addAccess(int64_t Offset, int64_t Size, Instruction &I, 982 Optional<Value *> Content, 983 AAPointerInfo::AccessKind Kind, Type *Ty, 984 Instruction *RemoteI = nullptr, 985 Accesses *BinPtr = nullptr) { 986 OffsetAndSize Key{Offset, Size}; 987 Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key]; 988 AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty); 989 // Check if we have an access for this instruction in this bin, if not, 990 // simply add it. 991 auto It = Bin.find(Acc); 992 if (It == Bin.end()) { 993 Bin.insert(Acc); 994 return ChangeStatus::CHANGED; 995 } 996 // If the existing access is the same as then new one, nothing changed. 997 AAPointerInfo::Access Before = *It; 998 // The new one will be combined with the existing one. 999 *It &= Acc; 1000 return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; 1001 } 1002 1003 /// See AAPointerInfo::forallInterferingAccesses. 1004 bool forallInterferingAccesses( 1005 Instruction &I, 1006 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { 1007 if (!isValidState()) 1008 return false; 1009 // First find the offset and size of I. 1010 OffsetAndSize OAS(-1, -1); 1011 for (auto &It : AccessBins) { 1012 for (auto &Access : It.getSecond()) { 1013 if (Access.getRemoteInst() == &I) { 1014 OAS = It.getFirst(); 1015 break; 1016 } 1017 } 1018 if (OAS.getSize() != -1) 1019 break; 1020 } 1021 if (OAS.getSize() == -1) 1022 return true; 1023 1024 // Now that we have an offset and size, find all overlapping ones and use 1025 // the callback on the accesses. 1026 for (auto &It : AccessBins) { 1027 OffsetAndSize ItOAS = It.getFirst(); 1028 if (!OAS.mayOverlap(ItOAS)) 1029 continue; 1030 bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown(); 1031 for (auto &Access : It.getSecond()) 1032 if (!CB(Access, IsExact)) 1033 return false; 1034 } 1035 return true; 1036 } 1037 1038 private: 1039 /// State to track fixpoint and validity. 1040 BooleanState BS; 1041 }; 1042 1043 namespace { 1044 struct AAPointerInfoImpl 1045 : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> { 1046 using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>; 1047 AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {} 1048 1049 /// See AbstractAttribute::initialize(...). 1050 void initialize(Attributor &A) override { AAPointerInfo::initialize(A); } 1051 1052 /// See AbstractAttribute::getAsStr(). 1053 const std::string getAsStr() const override { 1054 return std::string("PointerInfo ") + 1055 (isValidState() ? (std::string("#") + 1056 std::to_string(AccessBins.size()) + " bins") 1057 : "<invalid>"); 1058 } 1059 1060 /// See AbstractAttribute::manifest(...). 1061 ChangeStatus manifest(Attributor &A) override { 1062 return AAPointerInfo::manifest(A); 1063 } 1064 1065 bool forallInterferingAccesses( 1066 LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB) 1067 const override { 1068 return State::forallInterferingAccesses(LI, CB); 1069 } 1070 bool forallInterferingAccesses( 1071 StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB) 1072 const override { 1073 return State::forallInterferingAccesses(SI, CB); 1074 } 1075 1076 ChangeStatus translateAndAddCalleeState(Attributor &A, 1077 const AAPointerInfo &CalleeAA, 1078 int64_t CallArgOffset, CallBase &CB) { 1079 using namespace AA::PointerInfo; 1080 if (!CalleeAA.getState().isValidState() || !isValidState()) 1081 return indicatePessimisticFixpoint(); 1082 1083 const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA); 1084 bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr(); 1085 1086 // Combine the accesses bin by bin. 1087 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1088 for (auto &It : CalleeImplAA.getState()) { 1089 OffsetAndSize OAS = OffsetAndSize::getUnknown(); 1090 if (CallArgOffset != OffsetAndSize::Unknown) 1091 OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset, 1092 It.first.getSize()); 1093 Accesses &Bin = AccessBins[OAS]; 1094 for (const AAPointerInfo::Access &RAcc : It.second) { 1095 if (IsByval && !RAcc.isRead()) 1096 continue; 1097 bool UsedAssumedInformation = false; 1098 Optional<Value *> Content = A.translateArgumentToCallSiteContent( 1099 RAcc.getContent(), CB, *this, UsedAssumedInformation); 1100 AccessKind AK = 1101 AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ 1102 : AccessKind::AK_READ_WRITE)); 1103 Changed = 1104 Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK, 1105 RAcc.getType(), RAcc.getRemoteInst(), &Bin); 1106 } 1107 } 1108 return Changed; 1109 } 1110 1111 /// Statistic tracking for all AAPointerInfo implementations. 1112 /// See AbstractAttribute::trackStatistics(). 1113 void trackPointerInfoStatistics(const IRPosition &IRP) const {} 1114 }; 1115 1116 struct AAPointerInfoFloating : public AAPointerInfoImpl { 1117 using AccessKind = AAPointerInfo::AccessKind; 1118 AAPointerInfoFloating(const IRPosition &IRP, Attributor &A) 1119 : AAPointerInfoImpl(IRP, A) {} 1120 1121 /// See AbstractAttribute::initialize(...). 1122 void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); } 1123 1124 /// Deal with an access and signal if it was handled successfully. 1125 bool handleAccess(Attributor &A, Instruction &I, Value &Ptr, 1126 Optional<Value *> Content, AccessKind Kind, int64_t Offset, 1127 ChangeStatus &Changed, Type *Ty, 1128 int64_t Size = AA::PointerInfo::OffsetAndSize::Unknown) { 1129 using namespace AA::PointerInfo; 1130 // No need to find a size if one is given or the offset is unknown. 1131 if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown && 1132 Ty) { 1133 const DataLayout &DL = A.getDataLayout(); 1134 TypeSize AccessSize = DL.getTypeStoreSize(Ty); 1135 if (!AccessSize.isScalable()) 1136 Size = AccessSize.getFixedSize(); 1137 } 1138 Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty); 1139 return true; 1140 }; 1141 1142 /// Helper struct, will support ranges eventually. 1143 struct OffsetInfo { 1144 int64_t Offset = AA::PointerInfo::OffsetAndSize::Unknown; 1145 1146 bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; } 1147 }; 1148 1149 /// See AbstractAttribute::updateImpl(...). 1150 ChangeStatus updateImpl(Attributor &A) override { 1151 using namespace AA::PointerInfo; 1152 State S = getState(); 1153 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1154 Value &AssociatedValue = getAssociatedValue(); 1155 1156 const DataLayout &DL = A.getDataLayout(); 1157 DenseMap<Value *, OffsetInfo> OffsetInfoMap; 1158 OffsetInfoMap[&AssociatedValue] = OffsetInfo{0}; 1159 1160 auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI, 1161 bool &Follow) { 1162 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1163 UsrOI = PtrOI; 1164 Follow = true; 1165 return true; 1166 }; 1167 1168 const auto *TLI = getAnchorScope() 1169 ? A.getInfoCache().getTargetLibraryInfoForFunction( 1170 *getAnchorScope()) 1171 : nullptr; 1172 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 1173 Value *CurPtr = U.get(); 1174 User *Usr = U.getUser(); 1175 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " 1176 << *Usr << "\n"); 1177 assert(OffsetInfoMap.count(CurPtr) && 1178 "The current pointer offset should have been seeded!"); 1179 1180 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) { 1181 if (CE->isCast()) 1182 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); 1183 if (CE->isCompare()) 1184 return true; 1185 if (!CE->isGEPWithNoNotionalOverIndexing()) { 1186 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE 1187 << "\n"); 1188 return false; 1189 } 1190 } 1191 if (auto *GEP = dyn_cast<GEPOperator>(Usr)) { 1192 // Note the order here, the Usr access might change the map, CurPtr is 1193 // already in it though. 1194 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1195 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1196 UsrOI = PtrOI; 1197 1198 // TODO: Use range information. 1199 if (PtrOI.Offset == OffsetAndSize::Unknown || 1200 !GEP->hasAllConstantIndices()) { 1201 UsrOI.Offset = OffsetAndSize::Unknown; 1202 Follow = true; 1203 return true; 1204 } 1205 1206 SmallVector<Value *, 8> Indices; 1207 for (Use &Idx : GEP->indices()) { 1208 if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) { 1209 Indices.push_back(CIdx); 1210 continue; 1211 } 1212 1213 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP 1214 << " : " << *Idx << "\n"); 1215 return false; 1216 } 1217 UsrOI.Offset = PtrOI.Offset + 1218 DL.getIndexedOffsetInType( 1219 CurPtr->getType()->getPointerElementType(), Indices); 1220 Follow = true; 1221 return true; 1222 } 1223 if (isa<CastInst>(Usr) || isa<SelectInst>(Usr)) 1224 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); 1225 1226 // For PHIs we need to take care of the recurrence explicitly as the value 1227 // might change while we iterate through a loop. For now, we give up if 1228 // the PHI is not invariant. 1229 if (isa<PHINode>(Usr)) { 1230 // Note the order here, the Usr access might change the map, CurPtr is 1231 // already in it though. 1232 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1233 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1234 // Check if the PHI is invariant (so far). 1235 if (UsrOI == PtrOI) 1236 return true; 1237 1238 // Check if the PHI operand has already an unknown offset as we can't 1239 // improve on that anymore. 1240 if (PtrOI.Offset == OffsetAndSize::Unknown) { 1241 UsrOI = PtrOI; 1242 Follow = true; 1243 return true; 1244 } 1245 1246 // Check if the PHI operand is not dependent on the PHI itself. 1247 // TODO: This is not great as we look at the pointer type. However, it 1248 // is unclear where the Offset size comes from with typeless pointers. 1249 APInt Offset( 1250 DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()), 1251 0); 1252 if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets( 1253 DL, Offset, /* AllowNonInbounds */ true)) { 1254 if (Offset != PtrOI.Offset) { 1255 LLVM_DEBUG(dbgs() 1256 << "[AAPointerInfo] PHI operand pointer offset mismatch " 1257 << *CurPtr << " in " << *Usr << "\n"); 1258 return false; 1259 } 1260 return HandlePassthroughUser(Usr, PtrOI, Follow); 1261 } 1262 1263 // TODO: Approximate in case we know the direction of the recurrence. 1264 LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex " 1265 << *CurPtr << " in " << *Usr << "\n"); 1266 UsrOI = PtrOI; 1267 UsrOI.Offset = OffsetAndSize::Unknown; 1268 Follow = true; 1269 return true; 1270 } 1271 1272 if (auto *LoadI = dyn_cast<LoadInst>(Usr)) 1273 return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr, 1274 AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset, 1275 Changed, LoadI->getType()); 1276 if (auto *StoreI = dyn_cast<StoreInst>(Usr)) { 1277 if (StoreI->getValueOperand() == CurPtr) { 1278 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store " 1279 << *StoreI << "\n"); 1280 return false; 1281 } 1282 bool UsedAssumedInformation = false; 1283 Optional<Value *> Content = A.getAssumedSimplified( 1284 *StoreI->getValueOperand(), *this, UsedAssumedInformation); 1285 return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE, 1286 OffsetInfoMap[CurPtr].Offset, Changed, 1287 StoreI->getValueOperand()->getType()); 1288 } 1289 if (auto *CB = dyn_cast<CallBase>(Usr)) { 1290 if (CB->isLifetimeStartOrEnd()) 1291 return true; 1292 if (TLI && isFreeCall(CB, TLI)) 1293 return true; 1294 if (CB->isArgOperand(&U)) { 1295 unsigned ArgNo = CB->getArgOperandNo(&U); 1296 const auto &CSArgPI = A.getAAFor<AAPointerInfo>( 1297 *this, IRPosition::callsite_argument(*CB, ArgNo), 1298 DepClassTy::REQUIRED); 1299 Changed = translateAndAddCalleeState( 1300 A, CSArgPI, OffsetInfoMap[CurPtr].Offset, *CB) | 1301 Changed; 1302 return true; 1303 } 1304 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB 1305 << "\n"); 1306 // TODO: Allow some call uses 1307 return false; 1308 } 1309 1310 LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n"); 1311 return false; 1312 }; 1313 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { 1314 if (OffsetInfoMap.count(NewU)) 1315 return OffsetInfoMap[NewU] == OffsetInfoMap[OldU]; 1316 OffsetInfoMap[NewU] = OffsetInfoMap[OldU]; 1317 return true; 1318 }; 1319 if (!A.checkForAllUses(UsePred, *this, AssociatedValue, 1320 /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL, 1321 EquivalentUseCB)) 1322 return indicatePessimisticFixpoint(); 1323 1324 LLVM_DEBUG({ 1325 dbgs() << "Accesses by bin after update:\n"; 1326 for (auto &It : AccessBins) { 1327 dbgs() << "[" << It.first.getOffset() << "-" 1328 << It.first.getOffset() + It.first.getSize() 1329 << "] : " << It.getSecond().size() << "\n"; 1330 for (auto &Acc : It.getSecond()) { 1331 dbgs() << " - " << Acc.getKind() << " - " << *Acc.getLocalInst() 1332 << "\n"; 1333 if (Acc.getLocalInst() != Acc.getRemoteInst()) 1334 dbgs() << " --> " 1335 << *Acc.getRemoteInst() << "\n"; 1336 if (!Acc.isWrittenValueYetUndetermined()) 1337 dbgs() << " - " << Acc.getWrittenValue() << "\n"; 1338 } 1339 } 1340 }); 1341 1342 return Changed; 1343 } 1344 1345 /// See AbstractAttribute::trackStatistics() 1346 void trackStatistics() const override { 1347 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1348 } 1349 }; 1350 1351 struct AAPointerInfoReturned final : AAPointerInfoImpl { 1352 AAPointerInfoReturned(const IRPosition &IRP, Attributor &A) 1353 : AAPointerInfoImpl(IRP, A) {} 1354 1355 /// See AbstractAttribute::updateImpl(...). 1356 ChangeStatus updateImpl(Attributor &A) override { 1357 return indicatePessimisticFixpoint(); 1358 } 1359 1360 /// See AbstractAttribute::trackStatistics() 1361 void trackStatistics() const override { 1362 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1363 } 1364 }; 1365 1366 struct AAPointerInfoArgument final : AAPointerInfoFloating { 1367 AAPointerInfoArgument(const IRPosition &IRP, Attributor &A) 1368 : AAPointerInfoFloating(IRP, A) {} 1369 1370 /// See AbstractAttribute::initialize(...). 1371 void initialize(Attributor &A) override { 1372 AAPointerInfoFloating::initialize(A); 1373 if (getAnchorScope()->isDeclaration()) 1374 indicatePessimisticFixpoint(); 1375 } 1376 1377 /// See AbstractAttribute::trackStatistics() 1378 void trackStatistics() const override { 1379 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1380 } 1381 }; 1382 1383 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating { 1384 AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) 1385 : AAPointerInfoFloating(IRP, A) {} 1386 1387 /// See AbstractAttribute::updateImpl(...). 1388 ChangeStatus updateImpl(Attributor &A) override { 1389 using namespace AA::PointerInfo; 1390 // We handle memory intrinsics explicitly, at least the first (= 1391 // destination) and second (=source) arguments as we know how they are 1392 // accessed. 1393 if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) { 1394 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 1395 int64_t LengthVal = OffsetAndSize::Unknown; 1396 if (Length) 1397 LengthVal = Length->getSExtValue(); 1398 Value &Ptr = getAssociatedValue(); 1399 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 1400 ChangeStatus Changed; 1401 if (ArgNo == 0) { 1402 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed, 1403 nullptr, LengthVal); 1404 } else if (ArgNo == 1) { 1405 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed, 1406 nullptr, LengthVal); 1407 } else { 1408 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic " 1409 << *MI << "\n"); 1410 return indicatePessimisticFixpoint(); 1411 } 1412 return Changed; 1413 } 1414 1415 // TODO: Once we have call site specific value information we can provide 1416 // call site specific liveness information and then it makes 1417 // sense to specialize attributes for call sites arguments instead of 1418 // redirecting requests to the callee argument. 1419 Argument *Arg = getAssociatedArgument(); 1420 if (!Arg) 1421 return indicatePessimisticFixpoint(); 1422 const IRPosition &ArgPos = IRPosition::argument(*Arg); 1423 auto &ArgAA = 1424 A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED); 1425 return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI())); 1426 } 1427 1428 /// See AbstractAttribute::trackStatistics() 1429 void trackStatistics() const override { 1430 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1431 } 1432 }; 1433 1434 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating { 1435 AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) 1436 : AAPointerInfoFloating(IRP, A) {} 1437 1438 /// See AbstractAttribute::trackStatistics() 1439 void trackStatistics() const override { 1440 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1441 } 1442 }; 1443 1444 /// -----------------------NoUnwind Function Attribute-------------------------- 1445 1446 struct AANoUnwindImpl : AANoUnwind { 1447 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} 1448 1449 const std::string getAsStr() const override { 1450 return getAssumed() ? "nounwind" : "may-unwind"; 1451 } 1452 1453 /// See AbstractAttribute::updateImpl(...). 1454 ChangeStatus updateImpl(Attributor &A) override { 1455 auto Opcodes = { 1456 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 1457 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 1458 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 1459 1460 auto CheckForNoUnwind = [&](Instruction &I) { 1461 if (!I.mayThrow()) 1462 return true; 1463 1464 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1465 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( 1466 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 1467 return NoUnwindAA.isAssumedNoUnwind(); 1468 } 1469 return false; 1470 }; 1471 1472 bool UsedAssumedInformation = false; 1473 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes, 1474 UsedAssumedInformation)) 1475 return indicatePessimisticFixpoint(); 1476 1477 return ChangeStatus::UNCHANGED; 1478 } 1479 }; 1480 1481 struct AANoUnwindFunction final : public AANoUnwindImpl { 1482 AANoUnwindFunction(const IRPosition &IRP, Attributor &A) 1483 : AANoUnwindImpl(IRP, A) {} 1484 1485 /// See AbstractAttribute::trackStatistics() 1486 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } 1487 }; 1488 1489 /// NoUnwind attribute deduction for a call sites. 1490 struct AANoUnwindCallSite final : AANoUnwindImpl { 1491 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) 1492 : AANoUnwindImpl(IRP, A) {} 1493 1494 /// See AbstractAttribute::initialize(...). 1495 void initialize(Attributor &A) override { 1496 AANoUnwindImpl::initialize(A); 1497 Function *F = getAssociatedFunction(); 1498 if (!F || F->isDeclaration()) 1499 indicatePessimisticFixpoint(); 1500 } 1501 1502 /// See AbstractAttribute::updateImpl(...). 1503 ChangeStatus updateImpl(Attributor &A) override { 1504 // TODO: Once we have call site specific value information we can provide 1505 // call site specific liveness information and then it makes 1506 // sense to specialize attributes for call sites arguments instead of 1507 // redirecting requests to the callee argument. 1508 Function *F = getAssociatedFunction(); 1509 const IRPosition &FnPos = IRPosition::function(*F); 1510 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED); 1511 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1512 } 1513 1514 /// See AbstractAttribute::trackStatistics() 1515 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } 1516 }; 1517 1518 /// --------------------- Function Return Values ------------------------------- 1519 1520 /// "Attribute" that collects all potential returned values and the return 1521 /// instructions that they arise from. 1522 /// 1523 /// If there is a unique returned value R, the manifest method will: 1524 /// - mark R with the "returned" attribute, if R is an argument. 1525 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { 1526 1527 /// Mapping of values potentially returned by the associated function to the 1528 /// return instructions that might return them. 1529 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; 1530 1531 /// State flags 1532 /// 1533 ///{ 1534 bool IsFixed = false; 1535 bool IsValidState = true; 1536 ///} 1537 1538 public: 1539 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) 1540 : AAReturnedValues(IRP, A) {} 1541 1542 /// See AbstractAttribute::initialize(...). 1543 void initialize(Attributor &A) override { 1544 // Reset the state. 1545 IsFixed = false; 1546 IsValidState = true; 1547 ReturnedValues.clear(); 1548 1549 Function *F = getAssociatedFunction(); 1550 if (!F || F->isDeclaration()) { 1551 indicatePessimisticFixpoint(); 1552 return; 1553 } 1554 assert(!F->getReturnType()->isVoidTy() && 1555 "Did not expect a void return type!"); 1556 1557 // The map from instruction opcodes to those instructions in the function. 1558 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); 1559 1560 // Look through all arguments, if one is marked as returned we are done. 1561 for (Argument &Arg : F->args()) { 1562 if (Arg.hasReturnedAttr()) { 1563 auto &ReturnInstSet = ReturnedValues[&Arg]; 1564 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) 1565 for (Instruction *RI : *Insts) 1566 ReturnInstSet.insert(cast<ReturnInst>(RI)); 1567 1568 indicateOptimisticFixpoint(); 1569 return; 1570 } 1571 } 1572 1573 if (!A.isFunctionIPOAmendable(*F)) 1574 indicatePessimisticFixpoint(); 1575 } 1576 1577 /// See AbstractAttribute::manifest(...). 1578 ChangeStatus manifest(Attributor &A) override; 1579 1580 /// See AbstractAttribute::getState(...). 1581 AbstractState &getState() override { return *this; } 1582 1583 /// See AbstractAttribute::getState(...). 1584 const AbstractState &getState() const override { return *this; } 1585 1586 /// See AbstractAttribute::updateImpl(Attributor &A). 1587 ChangeStatus updateImpl(Attributor &A) override; 1588 1589 llvm::iterator_range<iterator> returned_values() override { 1590 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1591 } 1592 1593 llvm::iterator_range<const_iterator> returned_values() const override { 1594 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1595 } 1596 1597 /// Return the number of potential return values, -1 if unknown. 1598 size_t getNumReturnValues() const override { 1599 return isValidState() ? ReturnedValues.size() : -1; 1600 } 1601 1602 /// Return an assumed unique return value if a single candidate is found. If 1603 /// there cannot be one, return a nullptr. If it is not clear yet, return the 1604 /// Optional::NoneType. 1605 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 1606 1607 /// See AbstractState::checkForAllReturnedValues(...). 1608 bool checkForAllReturnedValuesAndReturnInsts( 1609 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1610 const override; 1611 1612 /// Pretty print the attribute similar to the IR representation. 1613 const std::string getAsStr() const override; 1614 1615 /// See AbstractState::isAtFixpoint(). 1616 bool isAtFixpoint() const override { return IsFixed; } 1617 1618 /// See AbstractState::isValidState(). 1619 bool isValidState() const override { return IsValidState; } 1620 1621 /// See AbstractState::indicateOptimisticFixpoint(...). 1622 ChangeStatus indicateOptimisticFixpoint() override { 1623 IsFixed = true; 1624 return ChangeStatus::UNCHANGED; 1625 } 1626 1627 ChangeStatus indicatePessimisticFixpoint() override { 1628 IsFixed = true; 1629 IsValidState = false; 1630 return ChangeStatus::CHANGED; 1631 } 1632 }; 1633 1634 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 1635 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1636 1637 // Bookkeeping. 1638 assert(isValidState()); 1639 STATS_DECLTRACK(KnownReturnValues, FunctionReturn, 1640 "Number of function with known return values"); 1641 1642 // Check if we have an assumed unique return value that we could manifest. 1643 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); 1644 1645 if (!UniqueRV.hasValue() || !UniqueRV.getValue()) 1646 return Changed; 1647 1648 // Bookkeeping. 1649 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, 1650 "Number of function with unique return"); 1651 // If the assumed unique return value is an argument, annotate it. 1652 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { 1653 if (UniqueRVArg->getType()->canLosslesslyBitCastTo( 1654 getAssociatedFunction()->getReturnType())) { 1655 getIRPosition() = IRPosition::argument(*UniqueRVArg); 1656 Changed = IRAttribute::manifest(A); 1657 } 1658 } 1659 return Changed; 1660 } 1661 1662 const std::string AAReturnedValuesImpl::getAsStr() const { 1663 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 1664 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")"; 1665 } 1666 1667 Optional<Value *> 1668 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { 1669 // If checkForAllReturnedValues provides a unique value, ignoring potential 1670 // undef values that can also be present, it is assumed to be the actual 1671 // return value and forwarded to the caller of this method. If there are 1672 // multiple, a nullptr is returned indicating there cannot be a unique 1673 // returned value. 1674 Optional<Value *> UniqueRV; 1675 Type *Ty = getAssociatedFunction()->getReturnType(); 1676 1677 auto Pred = [&](Value &RV) -> bool { 1678 UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty); 1679 return UniqueRV != Optional<Value *>(nullptr); 1680 }; 1681 1682 if (!A.checkForAllReturnedValues(Pred, *this)) 1683 UniqueRV = nullptr; 1684 1685 return UniqueRV; 1686 } 1687 1688 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( 1689 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1690 const { 1691 if (!isValidState()) 1692 return false; 1693 1694 // Check all returned values but ignore call sites as long as we have not 1695 // encountered an overdefined one during an update. 1696 for (auto &It : ReturnedValues) { 1697 Value *RV = It.first; 1698 if (!Pred(*RV, It.second)) 1699 return false; 1700 } 1701 1702 return true; 1703 } 1704 1705 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 1706 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1707 1708 auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret, 1709 bool) -> bool { 1710 bool UsedAssumedInformation = false; 1711 Optional<Value *> SimpleRetVal = 1712 A.getAssumedSimplified(V, *this, UsedAssumedInformation); 1713 if (!SimpleRetVal.hasValue()) 1714 return true; 1715 if (!SimpleRetVal.getValue()) 1716 return false; 1717 Value *RetVal = *SimpleRetVal; 1718 assert(AA::isValidInScope(*RetVal, Ret.getFunction()) && 1719 "Assumed returned value should be valid in function scope!"); 1720 if (ReturnedValues[RetVal].insert(&Ret)) 1721 Changed = ChangeStatus::CHANGED; 1722 return true; 1723 }; 1724 1725 auto ReturnInstCB = [&](Instruction &I) { 1726 ReturnInst &Ret = cast<ReturnInst>(I); 1727 return genericValueTraversal<ReturnInst>( 1728 A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB, 1729 &I); 1730 }; 1731 1732 // Discover returned values from all live returned instructions in the 1733 // associated function. 1734 bool UsedAssumedInformation = false; 1735 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret}, 1736 UsedAssumedInformation)) 1737 return indicatePessimisticFixpoint(); 1738 return Changed; 1739 } 1740 1741 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { 1742 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) 1743 : AAReturnedValuesImpl(IRP, A) {} 1744 1745 /// See AbstractAttribute::trackStatistics() 1746 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } 1747 }; 1748 1749 /// Returned values information for a call sites. 1750 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { 1751 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) 1752 : AAReturnedValuesImpl(IRP, A) {} 1753 1754 /// See AbstractAttribute::initialize(...). 1755 void initialize(Attributor &A) override { 1756 // TODO: Once we have call site specific value information we can provide 1757 // call site specific liveness information and then it makes 1758 // sense to specialize attributes for call sites instead of 1759 // redirecting requests to the callee. 1760 llvm_unreachable("Abstract attributes for returned values are not " 1761 "supported for call sites yet!"); 1762 } 1763 1764 /// See AbstractAttribute::updateImpl(...). 1765 ChangeStatus updateImpl(Attributor &A) override { 1766 return indicatePessimisticFixpoint(); 1767 } 1768 1769 /// See AbstractAttribute::trackStatistics() 1770 void trackStatistics() const override {} 1771 }; 1772 1773 /// ------------------------ NoSync Function Attribute ------------------------- 1774 1775 struct AANoSyncImpl : AANoSync { 1776 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} 1777 1778 const std::string getAsStr() const override { 1779 return getAssumed() ? "nosync" : "may-sync"; 1780 } 1781 1782 /// See AbstractAttribute::updateImpl(...). 1783 ChangeStatus updateImpl(Attributor &A) override; 1784 1785 /// Helper function used to determine whether an instruction is non-relaxed 1786 /// atomic. In other words, if an atomic instruction does not have unordered 1787 /// or monotonic ordering 1788 static bool isNonRelaxedAtomic(Instruction *I); 1789 1790 /// Helper function specific for intrinsics which are potentially volatile 1791 static bool isNoSyncIntrinsic(Instruction *I); 1792 }; 1793 1794 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) { 1795 if (!I->isAtomic()) 1796 return false; 1797 1798 if (auto *FI = dyn_cast<FenceInst>(I)) 1799 // All legal orderings for fence are stronger than monotonic. 1800 return FI->getSyncScopeID() != SyncScope::SingleThread; 1801 else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) { 1802 // Unordered is not a legal ordering for cmpxchg. 1803 return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic || 1804 AI->getFailureOrdering() != AtomicOrdering::Monotonic); 1805 } 1806 1807 AtomicOrdering Ordering; 1808 switch (I->getOpcode()) { 1809 case Instruction::AtomicRMW: 1810 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 1811 break; 1812 case Instruction::Store: 1813 Ordering = cast<StoreInst>(I)->getOrdering(); 1814 break; 1815 case Instruction::Load: 1816 Ordering = cast<LoadInst>(I)->getOrdering(); 1817 break; 1818 default: 1819 llvm_unreachable( 1820 "New atomic operations need to be known in the attributor."); 1821 } 1822 1823 return (Ordering != AtomicOrdering::Unordered && 1824 Ordering != AtomicOrdering::Monotonic); 1825 } 1826 1827 /// Return true if this intrinsic is nosync. This is only used for intrinsics 1828 /// which would be nosync except that they have a volatile flag. All other 1829 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td. 1830 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) { 1831 if (auto *MI = dyn_cast<MemIntrinsic>(I)) 1832 return !MI->isVolatile(); 1833 return false; 1834 } 1835 1836 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { 1837 1838 auto CheckRWInstForNoSync = [&](Instruction &I) { 1839 /// We are looking for volatile instructions or Non-Relaxed atomics. 1840 1841 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1842 if (CB->hasFnAttr(Attribute::NoSync)) 1843 return true; 1844 1845 if (isNoSyncIntrinsic(&I)) 1846 return true; 1847 1848 const auto &NoSyncAA = A.getAAFor<AANoSync>( 1849 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 1850 return NoSyncAA.isAssumedNoSync(); 1851 } 1852 1853 if (!I.isVolatile() && !isNonRelaxedAtomic(&I)) 1854 return true; 1855 1856 return false; 1857 }; 1858 1859 auto CheckForNoSync = [&](Instruction &I) { 1860 // At this point we handled all read/write effects and they are all 1861 // nosync, so they can be skipped. 1862 if (I.mayReadOrWriteMemory()) 1863 return true; 1864 1865 // non-convergent and readnone imply nosync. 1866 return !cast<CallBase>(I).isConvergent(); 1867 }; 1868 1869 bool UsedAssumedInformation = false; 1870 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this, 1871 UsedAssumedInformation) || 1872 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this, 1873 UsedAssumedInformation)) 1874 return indicatePessimisticFixpoint(); 1875 1876 return ChangeStatus::UNCHANGED; 1877 } 1878 1879 struct AANoSyncFunction final : public AANoSyncImpl { 1880 AANoSyncFunction(const IRPosition &IRP, Attributor &A) 1881 : AANoSyncImpl(IRP, A) {} 1882 1883 /// See AbstractAttribute::trackStatistics() 1884 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } 1885 }; 1886 1887 /// NoSync attribute deduction for a call sites. 1888 struct AANoSyncCallSite final : AANoSyncImpl { 1889 AANoSyncCallSite(const IRPosition &IRP, Attributor &A) 1890 : AANoSyncImpl(IRP, A) {} 1891 1892 /// See AbstractAttribute::initialize(...). 1893 void initialize(Attributor &A) override { 1894 AANoSyncImpl::initialize(A); 1895 Function *F = getAssociatedFunction(); 1896 if (!F || F->isDeclaration()) 1897 indicatePessimisticFixpoint(); 1898 } 1899 1900 /// See AbstractAttribute::updateImpl(...). 1901 ChangeStatus updateImpl(Attributor &A) override { 1902 // TODO: Once we have call site specific value information we can provide 1903 // call site specific liveness information and then it makes 1904 // sense to specialize attributes for call sites arguments instead of 1905 // redirecting requests to the callee argument. 1906 Function *F = getAssociatedFunction(); 1907 const IRPosition &FnPos = IRPosition::function(*F); 1908 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED); 1909 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1910 } 1911 1912 /// See AbstractAttribute::trackStatistics() 1913 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } 1914 }; 1915 1916 /// ------------------------ No-Free Attributes ---------------------------- 1917 1918 struct AANoFreeImpl : public AANoFree { 1919 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} 1920 1921 /// See AbstractAttribute::updateImpl(...). 1922 ChangeStatus updateImpl(Attributor &A) override { 1923 auto CheckForNoFree = [&](Instruction &I) { 1924 const auto &CB = cast<CallBase>(I); 1925 if (CB.hasFnAttr(Attribute::NoFree)) 1926 return true; 1927 1928 const auto &NoFreeAA = A.getAAFor<AANoFree>( 1929 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 1930 return NoFreeAA.isAssumedNoFree(); 1931 }; 1932 1933 bool UsedAssumedInformation = false; 1934 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this, 1935 UsedAssumedInformation)) 1936 return indicatePessimisticFixpoint(); 1937 return ChangeStatus::UNCHANGED; 1938 } 1939 1940 /// See AbstractAttribute::getAsStr(). 1941 const std::string getAsStr() const override { 1942 return getAssumed() ? "nofree" : "may-free"; 1943 } 1944 }; 1945 1946 struct AANoFreeFunction final : public AANoFreeImpl { 1947 AANoFreeFunction(const IRPosition &IRP, Attributor &A) 1948 : AANoFreeImpl(IRP, A) {} 1949 1950 /// See AbstractAttribute::trackStatistics() 1951 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } 1952 }; 1953 1954 /// NoFree attribute deduction for a call sites. 1955 struct AANoFreeCallSite final : AANoFreeImpl { 1956 AANoFreeCallSite(const IRPosition &IRP, Attributor &A) 1957 : AANoFreeImpl(IRP, A) {} 1958 1959 /// See AbstractAttribute::initialize(...). 1960 void initialize(Attributor &A) override { 1961 AANoFreeImpl::initialize(A); 1962 Function *F = getAssociatedFunction(); 1963 if (!F || F->isDeclaration()) 1964 indicatePessimisticFixpoint(); 1965 } 1966 1967 /// See AbstractAttribute::updateImpl(...). 1968 ChangeStatus updateImpl(Attributor &A) override { 1969 // TODO: Once we have call site specific value information we can provide 1970 // call site specific liveness information and then it makes 1971 // sense to specialize attributes for call sites arguments instead of 1972 // redirecting requests to the callee argument. 1973 Function *F = getAssociatedFunction(); 1974 const IRPosition &FnPos = IRPosition::function(*F); 1975 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED); 1976 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1977 } 1978 1979 /// See AbstractAttribute::trackStatistics() 1980 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } 1981 }; 1982 1983 /// NoFree attribute for floating values. 1984 struct AANoFreeFloating : AANoFreeImpl { 1985 AANoFreeFloating(const IRPosition &IRP, Attributor &A) 1986 : AANoFreeImpl(IRP, A) {} 1987 1988 /// See AbstractAttribute::trackStatistics() 1989 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} 1990 1991 /// See Abstract Attribute::updateImpl(...). 1992 ChangeStatus updateImpl(Attributor &A) override { 1993 const IRPosition &IRP = getIRPosition(); 1994 1995 const auto &NoFreeAA = A.getAAFor<AANoFree>( 1996 *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL); 1997 if (NoFreeAA.isAssumedNoFree()) 1998 return ChangeStatus::UNCHANGED; 1999 2000 Value &AssociatedValue = getIRPosition().getAssociatedValue(); 2001 auto Pred = [&](const Use &U, bool &Follow) -> bool { 2002 Instruction *UserI = cast<Instruction>(U.getUser()); 2003 if (auto *CB = dyn_cast<CallBase>(UserI)) { 2004 if (CB->isBundleOperand(&U)) 2005 return false; 2006 if (!CB->isArgOperand(&U)) 2007 return true; 2008 unsigned ArgNo = CB->getArgOperandNo(&U); 2009 2010 const auto &NoFreeArg = A.getAAFor<AANoFree>( 2011 *this, IRPosition::callsite_argument(*CB, ArgNo), 2012 DepClassTy::REQUIRED); 2013 return NoFreeArg.isAssumedNoFree(); 2014 } 2015 2016 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 2017 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 2018 Follow = true; 2019 return true; 2020 } 2021 if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) || 2022 isa<ReturnInst>(UserI)) 2023 return true; 2024 2025 // Unknown user. 2026 return false; 2027 }; 2028 if (!A.checkForAllUses(Pred, *this, AssociatedValue)) 2029 return indicatePessimisticFixpoint(); 2030 2031 return ChangeStatus::UNCHANGED; 2032 } 2033 }; 2034 2035 /// NoFree attribute for a call site argument. 2036 struct AANoFreeArgument final : AANoFreeFloating { 2037 AANoFreeArgument(const IRPosition &IRP, Attributor &A) 2038 : AANoFreeFloating(IRP, A) {} 2039 2040 /// See AbstractAttribute::trackStatistics() 2041 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } 2042 }; 2043 2044 /// NoFree attribute for call site arguments. 2045 struct AANoFreeCallSiteArgument final : AANoFreeFloating { 2046 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) 2047 : AANoFreeFloating(IRP, A) {} 2048 2049 /// See AbstractAttribute::updateImpl(...). 2050 ChangeStatus updateImpl(Attributor &A) override { 2051 // TODO: Once we have call site specific value information we can provide 2052 // call site specific liveness information and then it makes 2053 // sense to specialize attributes for call sites arguments instead of 2054 // redirecting requests to the callee argument. 2055 Argument *Arg = getAssociatedArgument(); 2056 if (!Arg) 2057 return indicatePessimisticFixpoint(); 2058 const IRPosition &ArgPos = IRPosition::argument(*Arg); 2059 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED); 2060 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 2061 } 2062 2063 /// See AbstractAttribute::trackStatistics() 2064 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)}; 2065 }; 2066 2067 /// NoFree attribute for function return value. 2068 struct AANoFreeReturned final : AANoFreeFloating { 2069 AANoFreeReturned(const IRPosition &IRP, Attributor &A) 2070 : AANoFreeFloating(IRP, A) { 2071 llvm_unreachable("NoFree is not applicable to function returns!"); 2072 } 2073 2074 /// See AbstractAttribute::initialize(...). 2075 void initialize(Attributor &A) override { 2076 llvm_unreachable("NoFree is not applicable to function returns!"); 2077 } 2078 2079 /// See AbstractAttribute::updateImpl(...). 2080 ChangeStatus updateImpl(Attributor &A) override { 2081 llvm_unreachable("NoFree is not applicable to function returns!"); 2082 } 2083 2084 /// See AbstractAttribute::trackStatistics() 2085 void trackStatistics() const override {} 2086 }; 2087 2088 /// NoFree attribute deduction for a call site return value. 2089 struct AANoFreeCallSiteReturned final : AANoFreeFloating { 2090 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) 2091 : AANoFreeFloating(IRP, A) {} 2092 2093 ChangeStatus manifest(Attributor &A) override { 2094 return ChangeStatus::UNCHANGED; 2095 } 2096 /// See AbstractAttribute::trackStatistics() 2097 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } 2098 }; 2099 2100 /// ------------------------ NonNull Argument Attribute ------------------------ 2101 static int64_t getKnownNonNullAndDerefBytesForUse( 2102 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, 2103 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { 2104 TrackUse = false; 2105 2106 const Value *UseV = U->get(); 2107 if (!UseV->getType()->isPointerTy()) 2108 return 0; 2109 2110 // We need to follow common pointer manipulation uses to the accesses they 2111 // feed into. We can try to be smart to avoid looking through things we do not 2112 // like for now, e.g., non-inbounds GEPs. 2113 if (isa<CastInst>(I)) { 2114 TrackUse = true; 2115 return 0; 2116 } 2117 2118 if (isa<GetElementPtrInst>(I)) { 2119 TrackUse = true; 2120 return 0; 2121 } 2122 2123 Type *PtrTy = UseV->getType(); 2124 const Function *F = I->getFunction(); 2125 bool NullPointerIsDefined = 2126 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; 2127 const DataLayout &DL = A.getInfoCache().getDL(); 2128 if (const auto *CB = dyn_cast<CallBase>(I)) { 2129 if (CB->isBundleOperand(U)) { 2130 if (RetainedKnowledge RK = getKnowledgeFromUse( 2131 U, {Attribute::NonNull, Attribute::Dereferenceable})) { 2132 IsNonNull |= 2133 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); 2134 return RK.ArgValue; 2135 } 2136 return 0; 2137 } 2138 2139 if (CB->isCallee(U)) { 2140 IsNonNull |= !NullPointerIsDefined; 2141 return 0; 2142 } 2143 2144 unsigned ArgNo = CB->getArgOperandNo(U); 2145 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 2146 // As long as we only use known information there is no need to track 2147 // dependences here. 2148 auto &DerefAA = 2149 A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE); 2150 IsNonNull |= DerefAA.isKnownNonNull(); 2151 return DerefAA.getKnownDereferenceableBytes(); 2152 } 2153 2154 int64_t Offset; 2155 const Value *Base = 2156 getMinimalBaseOfAccessPointerOperand(A, QueryingAA, I, Offset, DL); 2157 if (Base) { 2158 if (Base == &AssociatedValue && 2159 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 2160 int64_t DerefBytes = 2161 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset; 2162 2163 IsNonNull |= !NullPointerIsDefined; 2164 return std::max(int64_t(0), DerefBytes); 2165 } 2166 } 2167 2168 /// Corner case when an offset is 0. 2169 Base = getBasePointerOfAccessPointerOperand(I, Offset, DL, 2170 /*AllowNonInbounds*/ true); 2171 if (Base) { 2172 if (Offset == 0 && Base == &AssociatedValue && 2173 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 2174 int64_t DerefBytes = 2175 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()); 2176 IsNonNull |= !NullPointerIsDefined; 2177 return std::max(int64_t(0), DerefBytes); 2178 } 2179 } 2180 2181 return 0; 2182 } 2183 2184 struct AANonNullImpl : AANonNull { 2185 AANonNullImpl(const IRPosition &IRP, Attributor &A) 2186 : AANonNull(IRP, A), 2187 NullIsDefined(NullPointerIsDefined( 2188 getAnchorScope(), 2189 getAssociatedValue().getType()->getPointerAddressSpace())) {} 2190 2191 /// See AbstractAttribute::initialize(...). 2192 void initialize(Attributor &A) override { 2193 Value &V = getAssociatedValue(); 2194 if (!NullIsDefined && 2195 hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, 2196 /* IgnoreSubsumingPositions */ false, &A)) { 2197 indicateOptimisticFixpoint(); 2198 return; 2199 } 2200 2201 if (isa<ConstantPointerNull>(V)) { 2202 indicatePessimisticFixpoint(); 2203 return; 2204 } 2205 2206 AANonNull::initialize(A); 2207 2208 bool CanBeNull, CanBeFreed; 2209 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull, 2210 CanBeFreed)) { 2211 if (!CanBeNull) { 2212 indicateOptimisticFixpoint(); 2213 return; 2214 } 2215 } 2216 2217 if (isa<GlobalValue>(&getAssociatedValue())) { 2218 indicatePessimisticFixpoint(); 2219 return; 2220 } 2221 2222 if (Instruction *CtxI = getCtxI()) 2223 followUsesInMBEC(*this, A, getState(), *CtxI); 2224 } 2225 2226 /// See followUsesInMBEC 2227 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 2228 AANonNull::StateType &State) { 2229 bool IsNonNull = false; 2230 bool TrackUse = false; 2231 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, 2232 IsNonNull, TrackUse); 2233 State.setKnown(IsNonNull); 2234 return TrackUse; 2235 } 2236 2237 /// See AbstractAttribute::getAsStr(). 2238 const std::string getAsStr() const override { 2239 return getAssumed() ? "nonnull" : "may-null"; 2240 } 2241 2242 /// Flag to determine if the underlying value can be null and still allow 2243 /// valid accesses. 2244 const bool NullIsDefined; 2245 }; 2246 2247 /// NonNull attribute for a floating value. 2248 struct AANonNullFloating : public AANonNullImpl { 2249 AANonNullFloating(const IRPosition &IRP, Attributor &A) 2250 : AANonNullImpl(IRP, A) {} 2251 2252 /// See AbstractAttribute::updateImpl(...). 2253 ChangeStatus updateImpl(Attributor &A) override { 2254 const DataLayout &DL = A.getDataLayout(); 2255 2256 DominatorTree *DT = nullptr; 2257 AssumptionCache *AC = nullptr; 2258 InformationCache &InfoCache = A.getInfoCache(); 2259 if (const Function *Fn = getAnchorScope()) { 2260 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); 2261 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); 2262 } 2263 2264 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 2265 AANonNull::StateType &T, bool Stripped) -> bool { 2266 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V), 2267 DepClassTy::REQUIRED); 2268 if (!Stripped && this == &AA) { 2269 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) 2270 T.indicatePessimisticFixpoint(); 2271 } else { 2272 // Use abstract attribute information. 2273 const AANonNull::StateType &NS = AA.getState(); 2274 T ^= NS; 2275 } 2276 return T.isValidState(); 2277 }; 2278 2279 StateType T; 2280 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 2281 VisitValueCB, getCtxI())) 2282 return indicatePessimisticFixpoint(); 2283 2284 return clampStateAndIndicateChange(getState(), T); 2285 } 2286 2287 /// See AbstractAttribute::trackStatistics() 2288 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2289 }; 2290 2291 /// NonNull attribute for function return value. 2292 struct AANonNullReturned final 2293 : AAReturnedFromReturnedValues<AANonNull, AANonNull> { 2294 AANonNullReturned(const IRPosition &IRP, Attributor &A) 2295 : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {} 2296 2297 /// See AbstractAttribute::getAsStr(). 2298 const std::string getAsStr() const override { 2299 return getAssumed() ? "nonnull" : "may-null"; 2300 } 2301 2302 /// See AbstractAttribute::trackStatistics() 2303 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2304 }; 2305 2306 /// NonNull attribute for function argument. 2307 struct AANonNullArgument final 2308 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { 2309 AANonNullArgument(const IRPosition &IRP, Attributor &A) 2310 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} 2311 2312 /// See AbstractAttribute::trackStatistics() 2313 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } 2314 }; 2315 2316 struct AANonNullCallSiteArgument final : AANonNullFloating { 2317 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) 2318 : AANonNullFloating(IRP, A) {} 2319 2320 /// See AbstractAttribute::trackStatistics() 2321 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } 2322 }; 2323 2324 /// NonNull attribute for a call site return position. 2325 struct AANonNullCallSiteReturned final 2326 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { 2327 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) 2328 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} 2329 2330 /// See AbstractAttribute::trackStatistics() 2331 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } 2332 }; 2333 2334 /// ------------------------ No-Recurse Attributes ---------------------------- 2335 2336 struct AANoRecurseImpl : public AANoRecurse { 2337 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} 2338 2339 /// See AbstractAttribute::getAsStr() 2340 const std::string getAsStr() const override { 2341 return getAssumed() ? "norecurse" : "may-recurse"; 2342 } 2343 }; 2344 2345 struct AANoRecurseFunction final : AANoRecurseImpl { 2346 AANoRecurseFunction(const IRPosition &IRP, Attributor &A) 2347 : AANoRecurseImpl(IRP, A) {} 2348 2349 /// See AbstractAttribute::initialize(...). 2350 void initialize(Attributor &A) override { 2351 AANoRecurseImpl::initialize(A); 2352 // TODO: We should build a call graph ourselves to enable this in the module 2353 // pass as well. 2354 if (const Function *F = getAnchorScope()) 2355 if (A.getInfoCache().getSccSize(*F) != 1) 2356 indicatePessimisticFixpoint(); 2357 } 2358 2359 /// See AbstractAttribute::updateImpl(...). 2360 ChangeStatus updateImpl(Attributor &A) override { 2361 2362 // If all live call sites are known to be no-recurse, we are as well. 2363 auto CallSitePred = [&](AbstractCallSite ACS) { 2364 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 2365 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 2366 DepClassTy::NONE); 2367 return NoRecurseAA.isKnownNoRecurse(); 2368 }; 2369 bool AllCallSitesKnown; 2370 if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) { 2371 // If we know all call sites and all are known no-recurse, we are done. 2372 // If all known call sites, which might not be all that exist, are known 2373 // to be no-recurse, we are not done but we can continue to assume 2374 // no-recurse. If one of the call sites we have not visited will become 2375 // live, another update is triggered. 2376 if (AllCallSitesKnown) 2377 indicateOptimisticFixpoint(); 2378 return ChangeStatus::UNCHANGED; 2379 } 2380 2381 // If the above check does not hold anymore we look at the calls. 2382 auto CheckForNoRecurse = [&](Instruction &I) { 2383 const auto &CB = cast<CallBase>(I); 2384 if (CB.hasFnAttr(Attribute::NoRecurse)) 2385 return true; 2386 2387 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 2388 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 2389 if (!NoRecurseAA.isAssumedNoRecurse()) 2390 return false; 2391 2392 // Recursion to the same function 2393 if (CB.getCalledFunction() == getAnchorScope()) 2394 return false; 2395 2396 return true; 2397 }; 2398 2399 bool UsedAssumedInformation = false; 2400 if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this, 2401 UsedAssumedInformation)) 2402 return indicatePessimisticFixpoint(); 2403 return ChangeStatus::UNCHANGED; 2404 } 2405 2406 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } 2407 }; 2408 2409 /// NoRecurse attribute deduction for a call sites. 2410 struct AANoRecurseCallSite final : AANoRecurseImpl { 2411 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) 2412 : AANoRecurseImpl(IRP, A) {} 2413 2414 /// See AbstractAttribute::initialize(...). 2415 void initialize(Attributor &A) override { 2416 AANoRecurseImpl::initialize(A); 2417 Function *F = getAssociatedFunction(); 2418 if (!F || F->isDeclaration()) 2419 indicatePessimisticFixpoint(); 2420 } 2421 2422 /// See AbstractAttribute::updateImpl(...). 2423 ChangeStatus updateImpl(Attributor &A) override { 2424 // TODO: Once we have call site specific value information we can provide 2425 // call site specific liveness information and then it makes 2426 // sense to specialize attributes for call sites arguments instead of 2427 // redirecting requests to the callee argument. 2428 Function *F = getAssociatedFunction(); 2429 const IRPosition &FnPos = IRPosition::function(*F); 2430 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED); 2431 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2432 } 2433 2434 /// See AbstractAttribute::trackStatistics() 2435 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } 2436 }; 2437 2438 /// -------------------- Undefined-Behavior Attributes ------------------------ 2439 2440 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { 2441 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) 2442 : AAUndefinedBehavior(IRP, A) {} 2443 2444 /// See AbstractAttribute::updateImpl(...). 2445 // through a pointer (i.e. also branches etc.) 2446 ChangeStatus updateImpl(Attributor &A) override { 2447 const size_t UBPrevSize = KnownUBInsts.size(); 2448 const size_t NoUBPrevSize = AssumedNoUBInsts.size(); 2449 2450 auto InspectMemAccessInstForUB = [&](Instruction &I) { 2451 // Lang ref now states volatile store is not UB, let's skip them. 2452 if (I.isVolatile() && I.mayWriteToMemory()) 2453 return true; 2454 2455 // Skip instructions that are already saved. 2456 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2457 return true; 2458 2459 // If we reach here, we know we have an instruction 2460 // that accesses memory through a pointer operand, 2461 // for which getPointerOperand() should give it to us. 2462 Value *PtrOp = 2463 const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true)); 2464 assert(PtrOp && 2465 "Expected pointer operand of memory accessing instruction"); 2466 2467 // Either we stopped and the appropriate action was taken, 2468 // or we got back a simplified value to continue. 2469 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); 2470 if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue()) 2471 return true; 2472 const Value *PtrOpVal = SimplifiedPtrOp.getValue(); 2473 2474 // A memory access through a pointer is considered UB 2475 // only if the pointer has constant null value. 2476 // TODO: Expand it to not only check constant values. 2477 if (!isa<ConstantPointerNull>(PtrOpVal)) { 2478 AssumedNoUBInsts.insert(&I); 2479 return true; 2480 } 2481 const Type *PtrTy = PtrOpVal->getType(); 2482 2483 // Because we only consider instructions inside functions, 2484 // assume that a parent function exists. 2485 const Function *F = I.getFunction(); 2486 2487 // A memory access using constant null pointer is only considered UB 2488 // if null pointer is _not_ defined for the target platform. 2489 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) 2490 AssumedNoUBInsts.insert(&I); 2491 else 2492 KnownUBInsts.insert(&I); 2493 return true; 2494 }; 2495 2496 auto InspectBrInstForUB = [&](Instruction &I) { 2497 // A conditional branch instruction is considered UB if it has `undef` 2498 // condition. 2499 2500 // Skip instructions that are already saved. 2501 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2502 return true; 2503 2504 // We know we have a branch instruction. 2505 auto *BrInst = cast<BranchInst>(&I); 2506 2507 // Unconditional branches are never considered UB. 2508 if (BrInst->isUnconditional()) 2509 return true; 2510 2511 // Either we stopped and the appropriate action was taken, 2512 // or we got back a simplified value to continue. 2513 Optional<Value *> SimplifiedCond = 2514 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); 2515 if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue()) 2516 return true; 2517 AssumedNoUBInsts.insert(&I); 2518 return true; 2519 }; 2520 2521 auto InspectCallSiteForUB = [&](Instruction &I) { 2522 // Check whether a callsite always cause UB or not 2523 2524 // Skip instructions that are already saved. 2525 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2526 return true; 2527 2528 // Check nonnull and noundef argument attribute violation for each 2529 // callsite. 2530 CallBase &CB = cast<CallBase>(I); 2531 Function *Callee = CB.getCalledFunction(); 2532 if (!Callee) 2533 return true; 2534 for (unsigned idx = 0; idx < CB.arg_size(); idx++) { 2535 // If current argument is known to be simplified to null pointer and the 2536 // corresponding argument position is known to have nonnull attribute, 2537 // the argument is poison. Furthermore, if the argument is poison and 2538 // the position is known to have noundef attriubte, this callsite is 2539 // considered UB. 2540 if (idx >= Callee->arg_size()) 2541 break; 2542 Value *ArgVal = CB.getArgOperand(idx); 2543 if (!ArgVal) 2544 continue; 2545 // Here, we handle three cases. 2546 // (1) Not having a value means it is dead. (we can replace the value 2547 // with undef) 2548 // (2) Simplified to undef. The argument violate noundef attriubte. 2549 // (3) Simplified to null pointer where known to be nonnull. 2550 // The argument is a poison value and violate noundef attribute. 2551 IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx); 2552 auto &NoUndefAA = 2553 A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2554 if (!NoUndefAA.isKnownNoUndef()) 2555 continue; 2556 bool UsedAssumedInformation = false; 2557 Optional<Value *> SimplifiedVal = A.getAssumedSimplified( 2558 IRPosition::value(*ArgVal), *this, UsedAssumedInformation); 2559 if (UsedAssumedInformation) 2560 continue; 2561 if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue()) 2562 return true; 2563 if (!SimplifiedVal.hasValue() || 2564 isa<UndefValue>(*SimplifiedVal.getValue())) { 2565 KnownUBInsts.insert(&I); 2566 continue; 2567 } 2568 if (!ArgVal->getType()->isPointerTy() || 2569 !isa<ConstantPointerNull>(*SimplifiedVal.getValue())) 2570 continue; 2571 auto &NonNullAA = 2572 A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2573 if (NonNullAA.isKnownNonNull()) 2574 KnownUBInsts.insert(&I); 2575 } 2576 return true; 2577 }; 2578 2579 auto InspectReturnInstForUB = 2580 [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) { 2581 // Check if a return instruction always cause UB or not 2582 // Note: It is guaranteed that the returned position of the anchor 2583 // scope has noundef attribute when this is called. 2584 // We also ensure the return position is not "assumed dead" 2585 // because the returned value was then potentially simplified to 2586 // `undef` in AAReturnedValues without removing the `noundef` 2587 // attribute yet. 2588 2589 // When the returned position has noundef attriubte, UB occur in the 2590 // following cases. 2591 // (1) Returned value is known to be undef. 2592 // (2) The value is known to be a null pointer and the returned 2593 // position has nonnull attribute (because the returned value is 2594 // poison). 2595 bool FoundUB = false; 2596 if (isa<UndefValue>(V)) { 2597 FoundUB = true; 2598 } else { 2599 if (isa<ConstantPointerNull>(V)) { 2600 auto &NonNullAA = A.getAAFor<AANonNull>( 2601 *this, IRPosition::returned(*getAnchorScope()), 2602 DepClassTy::NONE); 2603 if (NonNullAA.isKnownNonNull()) 2604 FoundUB = true; 2605 } 2606 } 2607 2608 if (FoundUB) 2609 for (ReturnInst *RI : RetInsts) 2610 KnownUBInsts.insert(RI); 2611 return true; 2612 }; 2613 2614 bool UsedAssumedInformation = false; 2615 A.checkForAllInstructions(InspectMemAccessInstForUB, *this, 2616 {Instruction::Load, Instruction::Store, 2617 Instruction::AtomicCmpXchg, 2618 Instruction::AtomicRMW}, 2619 UsedAssumedInformation, 2620 /* CheckBBLivenessOnly */ true); 2621 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, 2622 UsedAssumedInformation, 2623 /* CheckBBLivenessOnly */ true); 2624 A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this, 2625 UsedAssumedInformation); 2626 2627 // If the returned position of the anchor scope has noundef attriubte, check 2628 // all returned instructions. 2629 if (!getAnchorScope()->getReturnType()->isVoidTy()) { 2630 const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope()); 2631 if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) { 2632 auto &RetPosNoUndefAA = 2633 A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE); 2634 if (RetPosNoUndefAA.isKnownNoUndef()) 2635 A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB, 2636 *this); 2637 } 2638 } 2639 2640 if (NoUBPrevSize != AssumedNoUBInsts.size() || 2641 UBPrevSize != KnownUBInsts.size()) 2642 return ChangeStatus::CHANGED; 2643 return ChangeStatus::UNCHANGED; 2644 } 2645 2646 bool isKnownToCauseUB(Instruction *I) const override { 2647 return KnownUBInsts.count(I); 2648 } 2649 2650 bool isAssumedToCauseUB(Instruction *I) const override { 2651 // In simple words, if an instruction is not in the assumed to _not_ 2652 // cause UB, then it is assumed UB (that includes those 2653 // in the KnownUBInsts set). The rest is boilerplate 2654 // is to ensure that it is one of the instructions we test 2655 // for UB. 2656 2657 switch (I->getOpcode()) { 2658 case Instruction::Load: 2659 case Instruction::Store: 2660 case Instruction::AtomicCmpXchg: 2661 case Instruction::AtomicRMW: 2662 return !AssumedNoUBInsts.count(I); 2663 case Instruction::Br: { 2664 auto BrInst = cast<BranchInst>(I); 2665 if (BrInst->isUnconditional()) 2666 return false; 2667 return !AssumedNoUBInsts.count(I); 2668 } break; 2669 default: 2670 return false; 2671 } 2672 return false; 2673 } 2674 2675 ChangeStatus manifest(Attributor &A) override { 2676 if (KnownUBInsts.empty()) 2677 return ChangeStatus::UNCHANGED; 2678 for (Instruction *I : KnownUBInsts) 2679 A.changeToUnreachableAfterManifest(I); 2680 return ChangeStatus::CHANGED; 2681 } 2682 2683 /// See AbstractAttribute::getAsStr() 2684 const std::string getAsStr() const override { 2685 return getAssumed() ? "undefined-behavior" : "no-ub"; 2686 } 2687 2688 /// Note: The correctness of this analysis depends on the fact that the 2689 /// following 2 sets will stop changing after some point. 2690 /// "Change" here means that their size changes. 2691 /// The size of each set is monotonically increasing 2692 /// (we only add items to them) and it is upper bounded by the number of 2693 /// instructions in the processed function (we can never save more 2694 /// elements in either set than this number). Hence, at some point, 2695 /// they will stop increasing. 2696 /// Consequently, at some point, both sets will have stopped 2697 /// changing, effectively making the analysis reach a fixpoint. 2698 2699 /// Note: These 2 sets are disjoint and an instruction can be considered 2700 /// one of 3 things: 2701 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in 2702 /// the KnownUBInsts set. 2703 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior 2704 /// has a reason to assume it). 2705 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior 2706 /// could not find a reason to assume or prove that it can cause UB, 2707 /// hence it assumes it doesn't. We have a set for these instructions 2708 /// so that we don't reprocess them in every update. 2709 /// Note however that instructions in this set may cause UB. 2710 2711 protected: 2712 /// A set of all live instructions _known_ to cause UB. 2713 SmallPtrSet<Instruction *, 8> KnownUBInsts; 2714 2715 private: 2716 /// A set of all the (live) instructions that are assumed to _not_ cause UB. 2717 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; 2718 2719 // Should be called on updates in which if we're processing an instruction 2720 // \p I that depends on a value \p V, one of the following has to happen: 2721 // - If the value is assumed, then stop. 2722 // - If the value is known but undef, then consider it UB. 2723 // - Otherwise, do specific processing with the simplified value. 2724 // We return None in the first 2 cases to signify that an appropriate 2725 // action was taken and the caller should stop. 2726 // Otherwise, we return the simplified value that the caller should 2727 // use for specific processing. 2728 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V, 2729 Instruction *I) { 2730 bool UsedAssumedInformation = false; 2731 Optional<Value *> SimplifiedV = A.getAssumedSimplified( 2732 IRPosition::value(*V), *this, UsedAssumedInformation); 2733 if (!UsedAssumedInformation) { 2734 // Don't depend on assumed values. 2735 if (!SimplifiedV.hasValue()) { 2736 // If it is known (which we tested above) but it doesn't have a value, 2737 // then we can assume `undef` and hence the instruction is UB. 2738 KnownUBInsts.insert(I); 2739 return llvm::None; 2740 } 2741 if (!SimplifiedV.getValue()) 2742 return nullptr; 2743 V = *SimplifiedV; 2744 } 2745 if (isa<UndefValue>(V)) { 2746 KnownUBInsts.insert(I); 2747 return llvm::None; 2748 } 2749 return V; 2750 } 2751 }; 2752 2753 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { 2754 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) 2755 : AAUndefinedBehaviorImpl(IRP, A) {} 2756 2757 /// See AbstractAttribute::trackStatistics() 2758 void trackStatistics() const override { 2759 STATS_DECL(UndefinedBehaviorInstruction, Instruction, 2760 "Number of instructions known to have UB"); 2761 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += 2762 KnownUBInsts.size(); 2763 } 2764 }; 2765 2766 /// ------------------------ Will-Return Attributes ---------------------------- 2767 2768 // Helper function that checks whether a function has any cycle which we don't 2769 // know if it is bounded or not. 2770 // Loops with maximum trip count are considered bounded, any other cycle not. 2771 static bool mayContainUnboundedCycle(Function &F, Attributor &A) { 2772 ScalarEvolution *SE = 2773 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); 2774 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); 2775 // If either SCEV or LoopInfo is not available for the function then we assume 2776 // any cycle to be unbounded cycle. 2777 // We use scc_iterator which uses Tarjan algorithm to find all the maximal 2778 // SCCs.To detect if there's a cycle, we only need to find the maximal ones. 2779 if (!SE || !LI) { 2780 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) 2781 if (SCCI.hasCycle()) 2782 return true; 2783 return false; 2784 } 2785 2786 // If there's irreducible control, the function may contain non-loop cycles. 2787 if (mayContainIrreducibleControl(F, LI)) 2788 return true; 2789 2790 // Any loop that does not have a max trip count is considered unbounded cycle. 2791 for (auto *L : LI->getLoopsInPreorder()) { 2792 if (!SE->getSmallConstantMaxTripCount(L)) 2793 return true; 2794 } 2795 return false; 2796 } 2797 2798 struct AAWillReturnImpl : public AAWillReturn { 2799 AAWillReturnImpl(const IRPosition &IRP, Attributor &A) 2800 : AAWillReturn(IRP, A) {} 2801 2802 /// See AbstractAttribute::initialize(...). 2803 void initialize(Attributor &A) override { 2804 AAWillReturn::initialize(A); 2805 2806 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) { 2807 indicateOptimisticFixpoint(); 2808 return; 2809 } 2810 } 2811 2812 /// Check for `mustprogress` and `readonly` as they imply `willreturn`. 2813 bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) { 2814 // Check for `mustprogress` in the scope and the associated function which 2815 // might be different if this is a call site. 2816 if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) && 2817 (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress())) 2818 return false; 2819 2820 const auto &MemAA = 2821 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 2822 if (!MemAA.isAssumedReadOnly()) 2823 return false; 2824 if (KnownOnly && !MemAA.isKnownReadOnly()) 2825 return false; 2826 if (!MemAA.isKnownReadOnly()) 2827 A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL); 2828 2829 return true; 2830 } 2831 2832 /// See AbstractAttribute::updateImpl(...). 2833 ChangeStatus updateImpl(Attributor &A) override { 2834 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 2835 return ChangeStatus::UNCHANGED; 2836 2837 auto CheckForWillReturn = [&](Instruction &I) { 2838 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); 2839 const auto &WillReturnAA = 2840 A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED); 2841 if (WillReturnAA.isKnownWillReturn()) 2842 return true; 2843 if (!WillReturnAA.isAssumedWillReturn()) 2844 return false; 2845 const auto &NoRecurseAA = 2846 A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED); 2847 return NoRecurseAA.isAssumedNoRecurse(); 2848 }; 2849 2850 bool UsedAssumedInformation = false; 2851 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this, 2852 UsedAssumedInformation)) 2853 return indicatePessimisticFixpoint(); 2854 2855 return ChangeStatus::UNCHANGED; 2856 } 2857 2858 /// See AbstractAttribute::getAsStr() 2859 const std::string getAsStr() const override { 2860 return getAssumed() ? "willreturn" : "may-noreturn"; 2861 } 2862 }; 2863 2864 struct AAWillReturnFunction final : AAWillReturnImpl { 2865 AAWillReturnFunction(const IRPosition &IRP, Attributor &A) 2866 : AAWillReturnImpl(IRP, A) {} 2867 2868 /// See AbstractAttribute::initialize(...). 2869 void initialize(Attributor &A) override { 2870 AAWillReturnImpl::initialize(A); 2871 2872 Function *F = getAnchorScope(); 2873 if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A)) 2874 indicatePessimisticFixpoint(); 2875 } 2876 2877 /// See AbstractAttribute::trackStatistics() 2878 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } 2879 }; 2880 2881 /// WillReturn attribute deduction for a call sites. 2882 struct AAWillReturnCallSite final : AAWillReturnImpl { 2883 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) 2884 : AAWillReturnImpl(IRP, A) {} 2885 2886 /// See AbstractAttribute::initialize(...). 2887 void initialize(Attributor &A) override { 2888 AAWillReturnImpl::initialize(A); 2889 Function *F = getAssociatedFunction(); 2890 if (!F || !A.isFunctionIPOAmendable(*F)) 2891 indicatePessimisticFixpoint(); 2892 } 2893 2894 /// See AbstractAttribute::updateImpl(...). 2895 ChangeStatus updateImpl(Attributor &A) override { 2896 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 2897 return ChangeStatus::UNCHANGED; 2898 2899 // TODO: Once we have call site specific value information we can provide 2900 // call site specific liveness information and then it makes 2901 // sense to specialize attributes for call sites arguments instead of 2902 // redirecting requests to the callee argument. 2903 Function *F = getAssociatedFunction(); 2904 const IRPosition &FnPos = IRPosition::function(*F); 2905 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED); 2906 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2907 } 2908 2909 /// See AbstractAttribute::trackStatistics() 2910 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } 2911 }; 2912 2913 /// -------------------AAReachability Attribute-------------------------- 2914 2915 struct AAReachabilityImpl : AAReachability { 2916 AAReachabilityImpl(const IRPosition &IRP, Attributor &A) 2917 : AAReachability(IRP, A) {} 2918 2919 const std::string getAsStr() const override { 2920 // TODO: Return the number of reachable queries. 2921 return "reachable"; 2922 } 2923 2924 /// See AbstractAttribute::updateImpl(...). 2925 ChangeStatus updateImpl(Attributor &A) override { 2926 return ChangeStatus::UNCHANGED; 2927 } 2928 }; 2929 2930 struct AAReachabilityFunction final : public AAReachabilityImpl { 2931 AAReachabilityFunction(const IRPosition &IRP, Attributor &A) 2932 : AAReachabilityImpl(IRP, A) {} 2933 2934 /// See AbstractAttribute::trackStatistics() 2935 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); } 2936 }; 2937 2938 /// ------------------------ NoAlias Argument Attribute ------------------------ 2939 2940 struct AANoAliasImpl : AANoAlias { 2941 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { 2942 assert(getAssociatedType()->isPointerTy() && 2943 "Noalias is a pointer attribute"); 2944 } 2945 2946 const std::string getAsStr() const override { 2947 return getAssumed() ? "noalias" : "may-alias"; 2948 } 2949 }; 2950 2951 /// NoAlias attribute for a floating value. 2952 struct AANoAliasFloating final : AANoAliasImpl { 2953 AANoAliasFloating(const IRPosition &IRP, Attributor &A) 2954 : AANoAliasImpl(IRP, A) {} 2955 2956 /// See AbstractAttribute::initialize(...). 2957 void initialize(Attributor &A) override { 2958 AANoAliasImpl::initialize(A); 2959 Value *Val = &getAssociatedValue(); 2960 do { 2961 CastInst *CI = dyn_cast<CastInst>(Val); 2962 if (!CI) 2963 break; 2964 Value *Base = CI->getOperand(0); 2965 if (!Base->hasOneUse()) 2966 break; 2967 Val = Base; 2968 } while (true); 2969 2970 if (!Val->getType()->isPointerTy()) { 2971 indicatePessimisticFixpoint(); 2972 return; 2973 } 2974 2975 if (isa<AllocaInst>(Val)) 2976 indicateOptimisticFixpoint(); 2977 else if (isa<ConstantPointerNull>(Val) && 2978 !NullPointerIsDefined(getAnchorScope(), 2979 Val->getType()->getPointerAddressSpace())) 2980 indicateOptimisticFixpoint(); 2981 else if (Val != &getAssociatedValue()) { 2982 const auto &ValNoAliasAA = A.getAAFor<AANoAlias>( 2983 *this, IRPosition::value(*Val), DepClassTy::OPTIONAL); 2984 if (ValNoAliasAA.isKnownNoAlias()) 2985 indicateOptimisticFixpoint(); 2986 } 2987 } 2988 2989 /// See AbstractAttribute::updateImpl(...). 2990 ChangeStatus updateImpl(Attributor &A) override { 2991 // TODO: Implement this. 2992 return indicatePessimisticFixpoint(); 2993 } 2994 2995 /// See AbstractAttribute::trackStatistics() 2996 void trackStatistics() const override { 2997 STATS_DECLTRACK_FLOATING_ATTR(noalias) 2998 } 2999 }; 3000 3001 /// NoAlias attribute for an argument. 3002 struct AANoAliasArgument final 3003 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { 3004 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; 3005 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 3006 3007 /// See AbstractAttribute::initialize(...). 3008 void initialize(Attributor &A) override { 3009 Base::initialize(A); 3010 // See callsite argument attribute and callee argument attribute. 3011 if (hasAttr({Attribute::ByVal})) 3012 indicateOptimisticFixpoint(); 3013 } 3014 3015 /// See AbstractAttribute::update(...). 3016 ChangeStatus updateImpl(Attributor &A) override { 3017 // We have to make sure no-alias on the argument does not break 3018 // synchronization when this is a callback argument, see also [1] below. 3019 // If synchronization cannot be affected, we delegate to the base updateImpl 3020 // function, otherwise we give up for now. 3021 3022 // If the function is no-sync, no-alias cannot break synchronization. 3023 const auto &NoSyncAA = 3024 A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()), 3025 DepClassTy::OPTIONAL); 3026 if (NoSyncAA.isAssumedNoSync()) 3027 return Base::updateImpl(A); 3028 3029 // If the argument is read-only, no-alias cannot break synchronization. 3030 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 3031 *this, getIRPosition(), DepClassTy::OPTIONAL); 3032 if (MemBehaviorAA.isAssumedReadOnly()) 3033 return Base::updateImpl(A); 3034 3035 // If the argument is never passed through callbacks, no-alias cannot break 3036 // synchronization. 3037 bool AllCallSitesKnown; 3038 if (A.checkForAllCallSites( 3039 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, 3040 true, AllCallSitesKnown)) 3041 return Base::updateImpl(A); 3042 3043 // TODO: add no-alias but make sure it doesn't break synchronization by 3044 // introducing fake uses. See: 3045 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, 3046 // International Workshop on OpenMP 2018, 3047 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf 3048 3049 return indicatePessimisticFixpoint(); 3050 } 3051 3052 /// See AbstractAttribute::trackStatistics() 3053 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } 3054 }; 3055 3056 struct AANoAliasCallSiteArgument final : AANoAliasImpl { 3057 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) 3058 : AANoAliasImpl(IRP, A) {} 3059 3060 /// See AbstractAttribute::initialize(...). 3061 void initialize(Attributor &A) override { 3062 // See callsite argument attribute and callee argument attribute. 3063 const auto &CB = cast<CallBase>(getAnchorValue()); 3064 if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias)) 3065 indicateOptimisticFixpoint(); 3066 Value &Val = getAssociatedValue(); 3067 if (isa<ConstantPointerNull>(Val) && 3068 !NullPointerIsDefined(getAnchorScope(), 3069 Val.getType()->getPointerAddressSpace())) 3070 indicateOptimisticFixpoint(); 3071 } 3072 3073 /// Determine if the underlying value may alias with the call site argument 3074 /// \p OtherArgNo of \p ICS (= the underlying call site). 3075 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, 3076 const AAMemoryBehavior &MemBehaviorAA, 3077 const CallBase &CB, unsigned OtherArgNo) { 3078 // We do not need to worry about aliasing with the underlying IRP. 3079 if (this->getCalleeArgNo() == (int)OtherArgNo) 3080 return false; 3081 3082 // If it is not a pointer or pointer vector we do not alias. 3083 const Value *ArgOp = CB.getArgOperand(OtherArgNo); 3084 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 3085 return false; 3086 3087 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 3088 *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE); 3089 3090 // If the argument is readnone, there is no read-write aliasing. 3091 if (CBArgMemBehaviorAA.isAssumedReadNone()) { 3092 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3093 return false; 3094 } 3095 3096 // If the argument is readonly and the underlying value is readonly, there 3097 // is no read-write aliasing. 3098 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); 3099 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { 3100 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3101 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3102 return false; 3103 } 3104 3105 // We have to utilize actual alias analysis queries so we need the object. 3106 if (!AAR) 3107 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); 3108 3109 // Try to rule it out at the call site. 3110 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); 3111 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " 3112 "callsite arguments: " 3113 << getAssociatedValue() << " " << *ArgOp << " => " 3114 << (IsAliasing ? "" : "no-") << "alias \n"); 3115 3116 return IsAliasing; 3117 } 3118 3119 bool 3120 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, 3121 const AAMemoryBehavior &MemBehaviorAA, 3122 const AANoAlias &NoAliasAA) { 3123 // We can deduce "noalias" if the following conditions hold. 3124 // (i) Associated value is assumed to be noalias in the definition. 3125 // (ii) Associated value is assumed to be no-capture in all the uses 3126 // possibly executed before this callsite. 3127 // (iii) There is no other pointer argument which could alias with the 3128 // value. 3129 3130 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); 3131 if (!AssociatedValueIsNoAliasAtDef) { 3132 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() 3133 << " is not no-alias at the definition\n"); 3134 return false; 3135 } 3136 3137 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); 3138 3139 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3140 const Function *ScopeFn = VIRP.getAnchorScope(); 3141 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE); 3142 // Check whether the value is captured in the scope using AANoCapture. 3143 // Look at CFG and check only uses possibly executed before this 3144 // callsite. 3145 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 3146 Instruction *UserI = cast<Instruction>(U.getUser()); 3147 3148 // If UserI is the curr instruction and there is a single potential use of 3149 // the value in UserI we allow the use. 3150 // TODO: We should inspect the operands and allow those that cannot alias 3151 // with the value. 3152 if (UserI == getCtxI() && UserI->getNumOperands() == 1) 3153 return true; 3154 3155 if (ScopeFn) { 3156 const auto &ReachabilityAA = A.getAAFor<AAReachability>( 3157 *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL); 3158 3159 if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI())) 3160 return true; 3161 3162 if (auto *CB = dyn_cast<CallBase>(UserI)) { 3163 if (CB->isArgOperand(&U)) { 3164 3165 unsigned ArgNo = CB->getArgOperandNo(&U); 3166 3167 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 3168 *this, IRPosition::callsite_argument(*CB, ArgNo), 3169 DepClassTy::OPTIONAL); 3170 3171 if (NoCaptureAA.isAssumedNoCapture()) 3172 return true; 3173 } 3174 } 3175 } 3176 3177 // For cases which can potentially have more users 3178 if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) || 3179 isa<SelectInst>(U)) { 3180 Follow = true; 3181 return true; 3182 } 3183 3184 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n"); 3185 return false; 3186 }; 3187 3188 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 3189 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { 3190 LLVM_DEBUG( 3191 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() 3192 << " cannot be noalias as it is potentially captured\n"); 3193 return false; 3194 } 3195 } 3196 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); 3197 3198 // Check there is no other pointer argument which could alias with the 3199 // value passed at this call site. 3200 // TODO: AbstractCallSite 3201 const auto &CB = cast<CallBase>(getAnchorValue()); 3202 for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++) 3203 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) 3204 return false; 3205 3206 return true; 3207 } 3208 3209 /// See AbstractAttribute::updateImpl(...). 3210 ChangeStatus updateImpl(Attributor &A) override { 3211 // If the argument is readnone we are done as there are no accesses via the 3212 // argument. 3213 auto &MemBehaviorAA = 3214 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 3215 if (MemBehaviorAA.isAssumedReadNone()) { 3216 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3217 return ChangeStatus::UNCHANGED; 3218 } 3219 3220 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3221 const auto &NoAliasAA = 3222 A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE); 3223 3224 AAResults *AAR = nullptr; 3225 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, 3226 NoAliasAA)) { 3227 LLVM_DEBUG( 3228 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"); 3229 return ChangeStatus::UNCHANGED; 3230 } 3231 3232 return indicatePessimisticFixpoint(); 3233 } 3234 3235 /// See AbstractAttribute::trackStatistics() 3236 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } 3237 }; 3238 3239 /// NoAlias attribute for function return value. 3240 struct AANoAliasReturned final : AANoAliasImpl { 3241 AANoAliasReturned(const IRPosition &IRP, Attributor &A) 3242 : AANoAliasImpl(IRP, A) {} 3243 3244 /// See AbstractAttribute::initialize(...). 3245 void initialize(Attributor &A) override { 3246 AANoAliasImpl::initialize(A); 3247 Function *F = getAssociatedFunction(); 3248 if (!F || F->isDeclaration()) 3249 indicatePessimisticFixpoint(); 3250 } 3251 3252 /// See AbstractAttribute::updateImpl(...). 3253 virtual ChangeStatus updateImpl(Attributor &A) override { 3254 3255 auto CheckReturnValue = [&](Value &RV) -> bool { 3256 if (Constant *C = dyn_cast<Constant>(&RV)) 3257 if (C->isNullValue() || isa<UndefValue>(C)) 3258 return true; 3259 3260 /// For now, we can only deduce noalias if we have call sites. 3261 /// FIXME: add more support. 3262 if (!isa<CallBase>(&RV)) 3263 return false; 3264 3265 const IRPosition &RVPos = IRPosition::value(RV); 3266 const auto &NoAliasAA = 3267 A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED); 3268 if (!NoAliasAA.isAssumedNoAlias()) 3269 return false; 3270 3271 const auto &NoCaptureAA = 3272 A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED); 3273 return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); 3274 }; 3275 3276 if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) 3277 return indicatePessimisticFixpoint(); 3278 3279 return ChangeStatus::UNCHANGED; 3280 } 3281 3282 /// See AbstractAttribute::trackStatistics() 3283 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } 3284 }; 3285 3286 /// NoAlias attribute deduction for a call site return value. 3287 struct AANoAliasCallSiteReturned final : AANoAliasImpl { 3288 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) 3289 : AANoAliasImpl(IRP, A) {} 3290 3291 /// See AbstractAttribute::initialize(...). 3292 void initialize(Attributor &A) override { 3293 AANoAliasImpl::initialize(A); 3294 Function *F = getAssociatedFunction(); 3295 if (!F || F->isDeclaration()) 3296 indicatePessimisticFixpoint(); 3297 } 3298 3299 /// See AbstractAttribute::updateImpl(...). 3300 ChangeStatus updateImpl(Attributor &A) override { 3301 // TODO: Once we have call site specific value information we can provide 3302 // call site specific liveness information and then it makes 3303 // sense to specialize attributes for call sites arguments instead of 3304 // redirecting requests to the callee argument. 3305 Function *F = getAssociatedFunction(); 3306 const IRPosition &FnPos = IRPosition::returned(*F); 3307 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED); 3308 return clampStateAndIndicateChange(getState(), FnAA.getState()); 3309 } 3310 3311 /// See AbstractAttribute::trackStatistics() 3312 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } 3313 }; 3314 3315 /// -------------------AAIsDead Function Attribute----------------------- 3316 3317 struct AAIsDeadValueImpl : public AAIsDead { 3318 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3319 3320 /// See AAIsDead::isAssumedDead(). 3321 bool isAssumedDead() const override { return isAssumed(IS_DEAD); } 3322 3323 /// See AAIsDead::isKnownDead(). 3324 bool isKnownDead() const override { return isKnown(IS_DEAD); } 3325 3326 /// See AAIsDead::isAssumedDead(BasicBlock *). 3327 bool isAssumedDead(const BasicBlock *BB) const override { return false; } 3328 3329 /// See AAIsDead::isKnownDead(BasicBlock *). 3330 bool isKnownDead(const BasicBlock *BB) const override { return false; } 3331 3332 /// See AAIsDead::isAssumedDead(Instruction *I). 3333 bool isAssumedDead(const Instruction *I) const override { 3334 return I == getCtxI() && isAssumedDead(); 3335 } 3336 3337 /// See AAIsDead::isKnownDead(Instruction *I). 3338 bool isKnownDead(const Instruction *I) const override { 3339 return isAssumedDead(I) && isKnownDead(); 3340 } 3341 3342 /// See AbstractAttribute::getAsStr(). 3343 const std::string getAsStr() const override { 3344 return isAssumedDead() ? "assumed-dead" : "assumed-live"; 3345 } 3346 3347 /// Check if all uses are assumed dead. 3348 bool areAllUsesAssumedDead(Attributor &A, Value &V) { 3349 // Callers might not check the type, void has no uses. 3350 if (V.getType()->isVoidTy()) 3351 return true; 3352 3353 // If we replace a value with a constant there are no uses left afterwards. 3354 if (!isa<Constant>(V)) { 3355 bool UsedAssumedInformation = false; 3356 Optional<Constant *> C = 3357 A.getAssumedConstant(V, *this, UsedAssumedInformation); 3358 if (!C.hasValue() || *C) 3359 return true; 3360 } 3361 3362 auto UsePred = [&](const Use &U, bool &Follow) { return false; }; 3363 // Explicitly set the dependence class to required because we want a long 3364 // chain of N dependent instructions to be considered live as soon as one is 3365 // without going through N update cycles. This is not required for 3366 // correctness. 3367 return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false, 3368 DepClassTy::REQUIRED); 3369 } 3370 3371 /// Determine if \p I is assumed to be side-effect free. 3372 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { 3373 if (!I || wouldInstructionBeTriviallyDead(I)) 3374 return true; 3375 3376 auto *CB = dyn_cast<CallBase>(I); 3377 if (!CB || isa<IntrinsicInst>(CB)) 3378 return false; 3379 3380 const IRPosition &CallIRP = IRPosition::callsite_function(*CB); 3381 const auto &NoUnwindAA = 3382 A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE); 3383 if (!NoUnwindAA.isAssumedNoUnwind()) 3384 return false; 3385 if (!NoUnwindAA.isKnownNoUnwind()) 3386 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); 3387 3388 const auto &MemBehaviorAA = 3389 A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE); 3390 if (MemBehaviorAA.isAssumedReadOnly()) { 3391 if (!MemBehaviorAA.isKnownReadOnly()) 3392 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3393 return true; 3394 } 3395 return false; 3396 } 3397 }; 3398 3399 struct AAIsDeadFloating : public AAIsDeadValueImpl { 3400 AAIsDeadFloating(const IRPosition &IRP, Attributor &A) 3401 : AAIsDeadValueImpl(IRP, A) {} 3402 3403 /// See AbstractAttribute::initialize(...). 3404 void initialize(Attributor &A) override { 3405 if (isa<UndefValue>(getAssociatedValue())) { 3406 indicatePessimisticFixpoint(); 3407 return; 3408 } 3409 3410 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3411 if (!isAssumedSideEffectFree(A, I)) { 3412 if (!isa_and_nonnull<StoreInst>(I)) 3413 indicatePessimisticFixpoint(); 3414 else 3415 removeAssumedBits(HAS_NO_EFFECT); 3416 } 3417 } 3418 3419 bool isDeadStore(Attributor &A, StoreInst &SI) { 3420 // Lang ref now states volatile store is not UB/dead, let's skip them. 3421 if (SI.isVolatile()) 3422 return false; 3423 3424 bool UsedAssumedInformation = false; 3425 SmallSetVector<Value *, 4> PotentialCopies; 3426 if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this, 3427 UsedAssumedInformation)) 3428 return false; 3429 return llvm::all_of(PotentialCopies, [&](Value *V) { 3430 return A.isAssumedDead(IRPosition::value(*V), this, nullptr, 3431 UsedAssumedInformation); 3432 }); 3433 } 3434 3435 /// See AbstractAttribute::updateImpl(...). 3436 ChangeStatus updateImpl(Attributor &A) override { 3437 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3438 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { 3439 if (!isDeadStore(A, *SI)) 3440 return indicatePessimisticFixpoint(); 3441 } else { 3442 if (!isAssumedSideEffectFree(A, I)) 3443 return indicatePessimisticFixpoint(); 3444 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3445 return indicatePessimisticFixpoint(); 3446 } 3447 return ChangeStatus::UNCHANGED; 3448 } 3449 3450 /// See AbstractAttribute::manifest(...). 3451 ChangeStatus manifest(Attributor &A) override { 3452 Value &V = getAssociatedValue(); 3453 if (auto *I = dyn_cast<Instruction>(&V)) { 3454 // If we get here we basically know the users are all dead. We check if 3455 // isAssumedSideEffectFree returns true here again because it might not be 3456 // the case and only the users are dead but the instruction (=call) is 3457 // still needed. 3458 if (isa<StoreInst>(I) || 3459 (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) { 3460 A.deleteAfterManifest(*I); 3461 return ChangeStatus::CHANGED; 3462 } 3463 } 3464 if (V.use_empty()) 3465 return ChangeStatus::UNCHANGED; 3466 3467 bool UsedAssumedInformation = false; 3468 Optional<Constant *> C = 3469 A.getAssumedConstant(V, *this, UsedAssumedInformation); 3470 if (C.hasValue() && C.getValue()) 3471 return ChangeStatus::UNCHANGED; 3472 3473 // Replace the value with undef as it is dead but keep droppable uses around 3474 // as they provide information we don't want to give up on just yet. 3475 UndefValue &UV = *UndefValue::get(V.getType()); 3476 bool AnyChange = 3477 A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false); 3478 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3479 } 3480 3481 /// See AbstractAttribute::trackStatistics() 3482 void trackStatistics() const override { 3483 STATS_DECLTRACK_FLOATING_ATTR(IsDead) 3484 } 3485 }; 3486 3487 struct AAIsDeadArgument : public AAIsDeadFloating { 3488 AAIsDeadArgument(const IRPosition &IRP, Attributor &A) 3489 : AAIsDeadFloating(IRP, A) {} 3490 3491 /// See AbstractAttribute::initialize(...). 3492 void initialize(Attributor &A) override { 3493 if (!A.isFunctionIPOAmendable(*getAnchorScope())) 3494 indicatePessimisticFixpoint(); 3495 } 3496 3497 /// See AbstractAttribute::manifest(...). 3498 ChangeStatus manifest(Attributor &A) override { 3499 ChangeStatus Changed = AAIsDeadFloating::manifest(A); 3500 Argument &Arg = *getAssociatedArgument(); 3501 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) 3502 if (A.registerFunctionSignatureRewrite( 3503 Arg, /* ReplacementTypes */ {}, 3504 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, 3505 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { 3506 Arg.dropDroppableUses(); 3507 return ChangeStatus::CHANGED; 3508 } 3509 return Changed; 3510 } 3511 3512 /// See AbstractAttribute::trackStatistics() 3513 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) } 3514 }; 3515 3516 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { 3517 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) 3518 : AAIsDeadValueImpl(IRP, A) {} 3519 3520 /// See AbstractAttribute::initialize(...). 3521 void initialize(Attributor &A) override { 3522 if (isa<UndefValue>(getAssociatedValue())) 3523 indicatePessimisticFixpoint(); 3524 } 3525 3526 /// See AbstractAttribute::updateImpl(...). 3527 ChangeStatus updateImpl(Attributor &A) override { 3528 // TODO: Once we have call site specific value information we can provide 3529 // call site specific liveness information and then it makes 3530 // sense to specialize attributes for call sites arguments instead of 3531 // redirecting requests to the callee argument. 3532 Argument *Arg = getAssociatedArgument(); 3533 if (!Arg) 3534 return indicatePessimisticFixpoint(); 3535 const IRPosition &ArgPos = IRPosition::argument(*Arg); 3536 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED); 3537 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 3538 } 3539 3540 /// See AbstractAttribute::manifest(...). 3541 ChangeStatus manifest(Attributor &A) override { 3542 CallBase &CB = cast<CallBase>(getAnchorValue()); 3543 Use &U = CB.getArgOperandUse(getCallSiteArgNo()); 3544 assert(!isa<UndefValue>(U.get()) && 3545 "Expected undef values to be filtered out!"); 3546 UndefValue &UV = *UndefValue::get(U->getType()); 3547 if (A.changeUseAfterManifest(U, UV)) 3548 return ChangeStatus::CHANGED; 3549 return ChangeStatus::UNCHANGED; 3550 } 3551 3552 /// See AbstractAttribute::trackStatistics() 3553 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) } 3554 }; 3555 3556 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { 3557 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) 3558 : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {} 3559 3560 /// See AAIsDead::isAssumedDead(). 3561 bool isAssumedDead() const override { 3562 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; 3563 } 3564 3565 /// See AbstractAttribute::initialize(...). 3566 void initialize(Attributor &A) override { 3567 if (isa<UndefValue>(getAssociatedValue())) { 3568 indicatePessimisticFixpoint(); 3569 return; 3570 } 3571 3572 // We track this separately as a secondary state. 3573 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); 3574 } 3575 3576 /// See AbstractAttribute::updateImpl(...). 3577 ChangeStatus updateImpl(Attributor &A) override { 3578 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3579 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { 3580 IsAssumedSideEffectFree = false; 3581 Changed = ChangeStatus::CHANGED; 3582 } 3583 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3584 return indicatePessimisticFixpoint(); 3585 return Changed; 3586 } 3587 3588 /// See AbstractAttribute::trackStatistics() 3589 void trackStatistics() const override { 3590 if (IsAssumedSideEffectFree) 3591 STATS_DECLTRACK_CSRET_ATTR(IsDead) 3592 else 3593 STATS_DECLTRACK_CSRET_ATTR(UnusedResult) 3594 } 3595 3596 /// See AbstractAttribute::getAsStr(). 3597 const std::string getAsStr() const override { 3598 return isAssumedDead() 3599 ? "assumed-dead" 3600 : (getAssumed() ? "assumed-dead-users" : "assumed-live"); 3601 } 3602 3603 private: 3604 bool IsAssumedSideEffectFree; 3605 }; 3606 3607 struct AAIsDeadReturned : public AAIsDeadValueImpl { 3608 AAIsDeadReturned(const IRPosition &IRP, Attributor &A) 3609 : AAIsDeadValueImpl(IRP, A) {} 3610 3611 /// See AbstractAttribute::updateImpl(...). 3612 ChangeStatus updateImpl(Attributor &A) override { 3613 3614 bool UsedAssumedInformation = false; 3615 A.checkForAllInstructions([](Instruction &) { return true; }, *this, 3616 {Instruction::Ret}, UsedAssumedInformation); 3617 3618 auto PredForCallSite = [&](AbstractCallSite ACS) { 3619 if (ACS.isCallbackCall() || !ACS.getInstruction()) 3620 return false; 3621 return areAllUsesAssumedDead(A, *ACS.getInstruction()); 3622 }; 3623 3624 bool AllCallSitesKnown; 3625 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 3626 AllCallSitesKnown)) 3627 return indicatePessimisticFixpoint(); 3628 3629 return ChangeStatus::UNCHANGED; 3630 } 3631 3632 /// See AbstractAttribute::manifest(...). 3633 ChangeStatus manifest(Attributor &A) override { 3634 // TODO: Rewrite the signature to return void? 3635 bool AnyChange = false; 3636 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); 3637 auto RetInstPred = [&](Instruction &I) { 3638 ReturnInst &RI = cast<ReturnInst>(I); 3639 if (!isa<UndefValue>(RI.getReturnValue())) 3640 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); 3641 return true; 3642 }; 3643 bool UsedAssumedInformation = false; 3644 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}, 3645 UsedAssumedInformation); 3646 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3647 } 3648 3649 /// See AbstractAttribute::trackStatistics() 3650 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) } 3651 }; 3652 3653 struct AAIsDeadFunction : public AAIsDead { 3654 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3655 3656 /// See AbstractAttribute::initialize(...). 3657 void initialize(Attributor &A) override { 3658 const Function *F = getAnchorScope(); 3659 if (F && !F->isDeclaration()) { 3660 // We only want to compute liveness once. If the function is not part of 3661 // the SCC, skip it. 3662 if (A.isRunOn(*const_cast<Function *>(F))) { 3663 ToBeExploredFrom.insert(&F->getEntryBlock().front()); 3664 assumeLive(A, F->getEntryBlock()); 3665 } else { 3666 indicatePessimisticFixpoint(); 3667 } 3668 } 3669 } 3670 3671 /// See AbstractAttribute::getAsStr(). 3672 const std::string getAsStr() const override { 3673 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + 3674 std::to_string(getAnchorScope()->size()) + "][#TBEP " + 3675 std::to_string(ToBeExploredFrom.size()) + "][#KDE " + 3676 std::to_string(KnownDeadEnds.size()) + "]"; 3677 } 3678 3679 /// See AbstractAttribute::manifest(...). 3680 ChangeStatus manifest(Attributor &A) override { 3681 assert(getState().isValidState() && 3682 "Attempted to manifest an invalid state!"); 3683 3684 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 3685 Function &F = *getAnchorScope(); 3686 3687 if (AssumedLiveBlocks.empty()) { 3688 A.deleteAfterManifest(F); 3689 return ChangeStatus::CHANGED; 3690 } 3691 3692 // Flag to determine if we can change an invoke to a call assuming the 3693 // callee is nounwind. This is not possible if the personality of the 3694 // function allows to catch asynchronous exceptions. 3695 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 3696 3697 KnownDeadEnds.set_union(ToBeExploredFrom); 3698 for (const Instruction *DeadEndI : KnownDeadEnds) { 3699 auto *CB = dyn_cast<CallBase>(DeadEndI); 3700 if (!CB) 3701 continue; 3702 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3703 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 3704 bool MayReturn = !NoReturnAA.isAssumedNoReturn(); 3705 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) 3706 continue; 3707 3708 if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) 3709 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); 3710 else 3711 A.changeToUnreachableAfterManifest( 3712 const_cast<Instruction *>(DeadEndI->getNextNode())); 3713 HasChanged = ChangeStatus::CHANGED; 3714 } 3715 3716 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted."); 3717 for (BasicBlock &BB : F) 3718 if (!AssumedLiveBlocks.count(&BB)) { 3719 A.deleteAfterManifest(BB); 3720 ++BUILD_STAT_NAME(AAIsDead, BasicBlock); 3721 } 3722 3723 return HasChanged; 3724 } 3725 3726 /// See AbstractAttribute::updateImpl(...). 3727 ChangeStatus updateImpl(Attributor &A) override; 3728 3729 bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override { 3730 return !AssumedLiveEdges.count(std::make_pair(From, To)); 3731 } 3732 3733 /// See AbstractAttribute::trackStatistics() 3734 void trackStatistics() const override {} 3735 3736 /// Returns true if the function is assumed dead. 3737 bool isAssumedDead() const override { return false; } 3738 3739 /// See AAIsDead::isKnownDead(). 3740 bool isKnownDead() const override { return false; } 3741 3742 /// See AAIsDead::isAssumedDead(BasicBlock *). 3743 bool isAssumedDead(const BasicBlock *BB) const override { 3744 assert(BB->getParent() == getAnchorScope() && 3745 "BB must be in the same anchor scope function."); 3746 3747 if (!getAssumed()) 3748 return false; 3749 return !AssumedLiveBlocks.count(BB); 3750 } 3751 3752 /// See AAIsDead::isKnownDead(BasicBlock *). 3753 bool isKnownDead(const BasicBlock *BB) const override { 3754 return getKnown() && isAssumedDead(BB); 3755 } 3756 3757 /// See AAIsDead::isAssumed(Instruction *I). 3758 bool isAssumedDead(const Instruction *I) const override { 3759 assert(I->getParent()->getParent() == getAnchorScope() && 3760 "Instruction must be in the same anchor scope function."); 3761 3762 if (!getAssumed()) 3763 return false; 3764 3765 // If it is not in AssumedLiveBlocks then it for sure dead. 3766 // Otherwise, it can still be after noreturn call in a live block. 3767 if (!AssumedLiveBlocks.count(I->getParent())) 3768 return true; 3769 3770 // If it is not after a liveness barrier it is live. 3771 const Instruction *PrevI = I->getPrevNode(); 3772 while (PrevI) { 3773 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) 3774 return true; 3775 PrevI = PrevI->getPrevNode(); 3776 } 3777 return false; 3778 } 3779 3780 /// See AAIsDead::isKnownDead(Instruction *I). 3781 bool isKnownDead(const Instruction *I) const override { 3782 return getKnown() && isAssumedDead(I); 3783 } 3784 3785 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A 3786 /// that internal function called from \p BB should now be looked at. 3787 bool assumeLive(Attributor &A, const BasicBlock &BB) { 3788 if (!AssumedLiveBlocks.insert(&BB).second) 3789 return false; 3790 3791 // We assume that all of BB is (probably) live now and if there are calls to 3792 // internal functions we will assume that those are now live as well. This 3793 // is a performance optimization for blocks with calls to a lot of internal 3794 // functions. It can however cause dead functions to be treated as live. 3795 for (const Instruction &I : BB) 3796 if (const auto *CB = dyn_cast<CallBase>(&I)) 3797 if (const Function *F = CB->getCalledFunction()) 3798 if (F->hasLocalLinkage()) 3799 A.markLiveInternalFunction(*F); 3800 return true; 3801 } 3802 3803 /// Collection of instructions that need to be explored again, e.g., we 3804 /// did assume they do not transfer control to (one of their) successors. 3805 SmallSetVector<const Instruction *, 8> ToBeExploredFrom; 3806 3807 /// Collection of instructions that are known to not transfer control. 3808 SmallSetVector<const Instruction *, 8> KnownDeadEnds; 3809 3810 /// Collection of all assumed live edges 3811 DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges; 3812 3813 /// Collection of all assumed live BasicBlocks. 3814 DenseSet<const BasicBlock *> AssumedLiveBlocks; 3815 }; 3816 3817 static bool 3818 identifyAliveSuccessors(Attributor &A, const CallBase &CB, 3819 AbstractAttribute &AA, 3820 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3821 const IRPosition &IPos = IRPosition::callsite_function(CB); 3822 3823 const auto &NoReturnAA = 3824 A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL); 3825 if (NoReturnAA.isAssumedNoReturn()) 3826 return !NoReturnAA.isKnownNoReturn(); 3827 if (CB.isTerminator()) 3828 AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); 3829 else 3830 AliveSuccessors.push_back(CB.getNextNode()); 3831 return false; 3832 } 3833 3834 static bool 3835 identifyAliveSuccessors(Attributor &A, const InvokeInst &II, 3836 AbstractAttribute &AA, 3837 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3838 bool UsedAssumedInformation = 3839 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); 3840 3841 // First, determine if we can change an invoke to a call assuming the 3842 // callee is nounwind. This is not possible if the personality of the 3843 // function allows to catch asynchronous exceptions. 3844 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { 3845 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3846 } else { 3847 const IRPosition &IPos = IRPosition::callsite_function(II); 3848 const auto &AANoUnw = 3849 A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL); 3850 if (AANoUnw.isAssumedNoUnwind()) { 3851 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); 3852 } else { 3853 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3854 } 3855 } 3856 return UsedAssumedInformation; 3857 } 3858 3859 static bool 3860 identifyAliveSuccessors(Attributor &A, const BranchInst &BI, 3861 AbstractAttribute &AA, 3862 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3863 bool UsedAssumedInformation = false; 3864 if (BI.getNumSuccessors() == 1) { 3865 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3866 } else { 3867 Optional<Constant *> C = 3868 A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation); 3869 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) { 3870 // No value yet, assume both edges are dead. 3871 } else if (isa_and_nonnull<ConstantInt>(*C)) { 3872 const BasicBlock *SuccBB = 3873 BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue()); 3874 AliveSuccessors.push_back(&SuccBB->front()); 3875 } else { 3876 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3877 AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); 3878 UsedAssumedInformation = false; 3879 } 3880 } 3881 return UsedAssumedInformation; 3882 } 3883 3884 static bool 3885 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, 3886 AbstractAttribute &AA, 3887 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3888 bool UsedAssumedInformation = false; 3889 Optional<Constant *> C = 3890 A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation); 3891 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) { 3892 // No value yet, assume all edges are dead. 3893 } else if (isa_and_nonnull<ConstantInt>(C.getValue())) { 3894 for (auto &CaseIt : SI.cases()) { 3895 if (CaseIt.getCaseValue() == C.getValue()) { 3896 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); 3897 return UsedAssumedInformation; 3898 } 3899 } 3900 AliveSuccessors.push_back(&SI.getDefaultDest()->front()); 3901 return UsedAssumedInformation; 3902 } else { 3903 for (const BasicBlock *SuccBB : successors(SI.getParent())) 3904 AliveSuccessors.push_back(&SuccBB->front()); 3905 } 3906 return UsedAssumedInformation; 3907 } 3908 3909 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { 3910 ChangeStatus Change = ChangeStatus::UNCHANGED; 3911 3912 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" 3913 << getAnchorScope()->size() << "] BBs and " 3914 << ToBeExploredFrom.size() << " exploration points and " 3915 << KnownDeadEnds.size() << " known dead ends\n"); 3916 3917 // Copy and clear the list of instructions we need to explore from. It is 3918 // refilled with instructions the next update has to look at. 3919 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), 3920 ToBeExploredFrom.end()); 3921 decltype(ToBeExploredFrom) NewToBeExploredFrom; 3922 3923 SmallVector<const Instruction *, 8> AliveSuccessors; 3924 while (!Worklist.empty()) { 3925 const Instruction *I = Worklist.pop_back_val(); 3926 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"); 3927 3928 // Fast forward for uninteresting instructions. We could look for UB here 3929 // though. 3930 while (!I->isTerminator() && !isa<CallBase>(I)) 3931 I = I->getNextNode(); 3932 3933 AliveSuccessors.clear(); 3934 3935 bool UsedAssumedInformation = false; 3936 switch (I->getOpcode()) { 3937 // TODO: look for (assumed) UB to backwards propagate "deadness". 3938 default: 3939 assert(I->isTerminator() && 3940 "Expected non-terminators to be handled already!"); 3941 for (const BasicBlock *SuccBB : successors(I->getParent())) 3942 AliveSuccessors.push_back(&SuccBB->front()); 3943 break; 3944 case Instruction::Call: 3945 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), 3946 *this, AliveSuccessors); 3947 break; 3948 case Instruction::Invoke: 3949 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), 3950 *this, AliveSuccessors); 3951 break; 3952 case Instruction::Br: 3953 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), 3954 *this, AliveSuccessors); 3955 break; 3956 case Instruction::Switch: 3957 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), 3958 *this, AliveSuccessors); 3959 break; 3960 } 3961 3962 if (UsedAssumedInformation) { 3963 NewToBeExploredFrom.insert(I); 3964 } else if (AliveSuccessors.empty() || 3965 (I->isTerminator() && 3966 AliveSuccessors.size() < I->getNumSuccessors())) { 3967 if (KnownDeadEnds.insert(I)) 3968 Change = ChangeStatus::CHANGED; 3969 } 3970 3971 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: " 3972 << AliveSuccessors.size() << " UsedAssumedInformation: " 3973 << UsedAssumedInformation << "\n"); 3974 3975 for (const Instruction *AliveSuccessor : AliveSuccessors) { 3976 if (!I->isTerminator()) { 3977 assert(AliveSuccessors.size() == 1 && 3978 "Non-terminator expected to have a single successor!"); 3979 Worklist.push_back(AliveSuccessor); 3980 } else { 3981 // record the assumed live edge 3982 auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent()); 3983 if (AssumedLiveEdges.insert(Edge).second) 3984 Change = ChangeStatus::CHANGED; 3985 if (assumeLive(A, *AliveSuccessor->getParent())) 3986 Worklist.push_back(AliveSuccessor); 3987 } 3988 } 3989 } 3990 3991 // Check if the content of ToBeExploredFrom changed, ignore the order. 3992 if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() || 3993 llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) { 3994 return !ToBeExploredFrom.count(I); 3995 })) { 3996 Change = ChangeStatus::CHANGED; 3997 ToBeExploredFrom = std::move(NewToBeExploredFrom); 3998 } 3999 4000 // If we know everything is live there is no need to query for liveness. 4001 // Instead, indicating a pessimistic fixpoint will cause the state to be 4002 // "invalid" and all queries to be answered conservatively without lookups. 4003 // To be in this state we have to (1) finished the exploration and (3) not 4004 // discovered any non-trivial dead end and (2) not ruled unreachable code 4005 // dead. 4006 if (ToBeExploredFrom.empty() && 4007 getAnchorScope()->size() == AssumedLiveBlocks.size() && 4008 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { 4009 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; 4010 })) 4011 return indicatePessimisticFixpoint(); 4012 return Change; 4013 } 4014 4015 /// Liveness information for a call sites. 4016 struct AAIsDeadCallSite final : AAIsDeadFunction { 4017 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) 4018 : AAIsDeadFunction(IRP, A) {} 4019 4020 /// See AbstractAttribute::initialize(...). 4021 void initialize(Attributor &A) override { 4022 // TODO: Once we have call site specific value information we can provide 4023 // call site specific liveness information and then it makes 4024 // sense to specialize attributes for call sites instead of 4025 // redirecting requests to the callee. 4026 llvm_unreachable("Abstract attributes for liveness are not " 4027 "supported for call sites yet!"); 4028 } 4029 4030 /// See AbstractAttribute::updateImpl(...). 4031 ChangeStatus updateImpl(Attributor &A) override { 4032 return indicatePessimisticFixpoint(); 4033 } 4034 4035 /// See AbstractAttribute::trackStatistics() 4036 void trackStatistics() const override {} 4037 }; 4038 4039 /// -------------------- Dereferenceable Argument Attribute -------------------- 4040 4041 struct AADereferenceableImpl : AADereferenceable { 4042 AADereferenceableImpl(const IRPosition &IRP, Attributor &A) 4043 : AADereferenceable(IRP, A) {} 4044 using StateType = DerefState; 4045 4046 /// See AbstractAttribute::initialize(...). 4047 void initialize(Attributor &A) override { 4048 SmallVector<Attribute, 4> Attrs; 4049 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, 4050 Attrs, /* IgnoreSubsumingPositions */ false, &A); 4051 for (const Attribute &Attr : Attrs) 4052 takeKnownDerefBytesMaximum(Attr.getValueAsInt()); 4053 4054 const IRPosition &IRP = this->getIRPosition(); 4055 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE); 4056 4057 bool CanBeNull, CanBeFreed; 4058 takeKnownDerefBytesMaximum( 4059 IRP.getAssociatedValue().getPointerDereferenceableBytes( 4060 A.getDataLayout(), CanBeNull, CanBeFreed)); 4061 4062 bool IsFnInterface = IRP.isFnInterfaceKind(); 4063 Function *FnScope = IRP.getAnchorScope(); 4064 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { 4065 indicatePessimisticFixpoint(); 4066 return; 4067 } 4068 4069 if (Instruction *CtxI = getCtxI()) 4070 followUsesInMBEC(*this, A, getState(), *CtxI); 4071 } 4072 4073 /// See AbstractAttribute::getState() 4074 /// { 4075 StateType &getState() override { return *this; } 4076 const StateType &getState() const override { return *this; } 4077 /// } 4078 4079 /// Helper function for collecting accessed bytes in must-be-executed-context 4080 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, 4081 DerefState &State) { 4082 const Value *UseV = U->get(); 4083 if (!UseV->getType()->isPointerTy()) 4084 return; 4085 4086 Type *PtrTy = UseV->getType(); 4087 const DataLayout &DL = A.getDataLayout(); 4088 int64_t Offset; 4089 if (const Value *Base = getBasePointerOfAccessPointerOperand( 4090 I, Offset, DL, /*AllowNonInbounds*/ true)) { 4091 if (Base == &getAssociatedValue() && 4092 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 4093 uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType()); 4094 State.addAccessedBytes(Offset, Size); 4095 } 4096 } 4097 } 4098 4099 /// See followUsesInMBEC 4100 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4101 AADereferenceable::StateType &State) { 4102 bool IsNonNull = false; 4103 bool TrackUse = false; 4104 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( 4105 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); 4106 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes 4107 << " for instruction " << *I << "\n"); 4108 4109 addAccessedBytesForUse(A, U, I, State); 4110 State.takeKnownDerefBytesMaximum(DerefBytes); 4111 return TrackUse; 4112 } 4113 4114 /// See AbstractAttribute::manifest(...). 4115 ChangeStatus manifest(Attributor &A) override { 4116 ChangeStatus Change = AADereferenceable::manifest(A); 4117 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { 4118 removeAttrs({Attribute::DereferenceableOrNull}); 4119 return ChangeStatus::CHANGED; 4120 } 4121 return Change; 4122 } 4123 4124 void getDeducedAttributes(LLVMContext &Ctx, 4125 SmallVectorImpl<Attribute> &Attrs) const override { 4126 // TODO: Add *_globally support 4127 if (isAssumedNonNull()) 4128 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 4129 Ctx, getAssumedDereferenceableBytes())); 4130 else 4131 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 4132 Ctx, getAssumedDereferenceableBytes())); 4133 } 4134 4135 /// See AbstractAttribute::getAsStr(). 4136 const std::string getAsStr() const override { 4137 if (!getAssumedDereferenceableBytes()) 4138 return "unknown-dereferenceable"; 4139 return std::string("dereferenceable") + 4140 (isAssumedNonNull() ? "" : "_or_null") + 4141 (isAssumedGlobal() ? "_globally" : "") + "<" + 4142 std::to_string(getKnownDereferenceableBytes()) + "-" + 4143 std::to_string(getAssumedDereferenceableBytes()) + ">"; 4144 } 4145 }; 4146 4147 /// Dereferenceable attribute for a floating value. 4148 struct AADereferenceableFloating : AADereferenceableImpl { 4149 AADereferenceableFloating(const IRPosition &IRP, Attributor &A) 4150 : AADereferenceableImpl(IRP, A) {} 4151 4152 /// See AbstractAttribute::updateImpl(...). 4153 ChangeStatus updateImpl(Attributor &A) override { 4154 const DataLayout &DL = A.getDataLayout(); 4155 4156 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T, 4157 bool Stripped) -> bool { 4158 unsigned IdxWidth = 4159 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 4160 APInt Offset(IdxWidth, 0); 4161 const Value *Base = 4162 stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false); 4163 4164 const auto &AA = A.getAAFor<AADereferenceable>( 4165 *this, IRPosition::value(*Base), DepClassTy::REQUIRED); 4166 int64_t DerefBytes = 0; 4167 if (!Stripped && this == &AA) { 4168 // Use IR information if we did not strip anything. 4169 // TODO: track globally. 4170 bool CanBeNull, CanBeFreed; 4171 DerefBytes = 4172 Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); 4173 T.GlobalState.indicatePessimisticFixpoint(); 4174 } else { 4175 const DerefState &DS = AA.getState(); 4176 DerefBytes = DS.DerefBytesState.getAssumed(); 4177 T.GlobalState &= DS.GlobalState; 4178 } 4179 4180 // For now we do not try to "increase" dereferenceability due to negative 4181 // indices as we first have to come up with code to deal with loops and 4182 // for overflows of the dereferenceable bytes. 4183 int64_t OffsetSExt = Offset.getSExtValue(); 4184 if (OffsetSExt < 0) 4185 OffsetSExt = 0; 4186 4187 T.takeAssumedDerefBytesMinimum( 4188 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4189 4190 if (this == &AA) { 4191 if (!Stripped) { 4192 // If nothing was stripped IR information is all we got. 4193 T.takeKnownDerefBytesMaximum( 4194 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4195 T.indicatePessimisticFixpoint(); 4196 } else if (OffsetSExt > 0) { 4197 // If something was stripped but there is circular reasoning we look 4198 // for the offset. If it is positive we basically decrease the 4199 // dereferenceable bytes in a circluar loop now, which will simply 4200 // drive them down to the known value in a very slow way which we 4201 // can accelerate. 4202 T.indicatePessimisticFixpoint(); 4203 } 4204 } 4205 4206 return T.isValidState(); 4207 }; 4208 4209 DerefState T; 4210 if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T, 4211 VisitValueCB, getCtxI())) 4212 return indicatePessimisticFixpoint(); 4213 4214 return clampStateAndIndicateChange(getState(), T); 4215 } 4216 4217 /// See AbstractAttribute::trackStatistics() 4218 void trackStatistics() const override { 4219 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) 4220 } 4221 }; 4222 4223 /// Dereferenceable attribute for a return value. 4224 struct AADereferenceableReturned final 4225 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { 4226 AADereferenceableReturned(const IRPosition &IRP, Attributor &A) 4227 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( 4228 IRP, A) {} 4229 4230 /// See AbstractAttribute::trackStatistics() 4231 void trackStatistics() const override { 4232 STATS_DECLTRACK_FNRET_ATTR(dereferenceable) 4233 } 4234 }; 4235 4236 /// Dereferenceable attribute for an argument 4237 struct AADereferenceableArgument final 4238 : AAArgumentFromCallSiteArguments<AADereferenceable, 4239 AADereferenceableImpl> { 4240 using Base = 4241 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; 4242 AADereferenceableArgument(const IRPosition &IRP, Attributor &A) 4243 : Base(IRP, A) {} 4244 4245 /// See AbstractAttribute::trackStatistics() 4246 void trackStatistics() const override { 4247 STATS_DECLTRACK_ARG_ATTR(dereferenceable) 4248 } 4249 }; 4250 4251 /// Dereferenceable attribute for a call site argument. 4252 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { 4253 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) 4254 : AADereferenceableFloating(IRP, A) {} 4255 4256 /// See AbstractAttribute::trackStatistics() 4257 void trackStatistics() const override { 4258 STATS_DECLTRACK_CSARG_ATTR(dereferenceable) 4259 } 4260 }; 4261 4262 /// Dereferenceable attribute deduction for a call site return value. 4263 struct AADereferenceableCallSiteReturned final 4264 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { 4265 using Base = 4266 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; 4267 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) 4268 : Base(IRP, A) {} 4269 4270 /// See AbstractAttribute::trackStatistics() 4271 void trackStatistics() const override { 4272 STATS_DECLTRACK_CS_ATTR(dereferenceable); 4273 } 4274 }; 4275 4276 // ------------------------ Align Argument Attribute ------------------------ 4277 4278 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA, 4279 Value &AssociatedValue, const Use *U, 4280 const Instruction *I, bool &TrackUse) { 4281 // We need to follow common pointer manipulation uses to the accesses they 4282 // feed into. 4283 if (isa<CastInst>(I)) { 4284 // Follow all but ptr2int casts. 4285 TrackUse = !isa<PtrToIntInst>(I); 4286 return 0; 4287 } 4288 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 4289 if (GEP->hasAllConstantIndices()) 4290 TrackUse = true; 4291 return 0; 4292 } 4293 4294 MaybeAlign MA; 4295 if (const auto *CB = dyn_cast<CallBase>(I)) { 4296 if (CB->isBundleOperand(U) || CB->isCallee(U)) 4297 return 0; 4298 4299 unsigned ArgNo = CB->getArgOperandNo(U); 4300 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 4301 // As long as we only use known information there is no need to track 4302 // dependences here. 4303 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE); 4304 MA = MaybeAlign(AlignAA.getKnownAlign()); 4305 } 4306 4307 const DataLayout &DL = A.getDataLayout(); 4308 const Value *UseV = U->get(); 4309 if (auto *SI = dyn_cast<StoreInst>(I)) { 4310 if (SI->getPointerOperand() == UseV) 4311 MA = SI->getAlign(); 4312 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 4313 if (LI->getPointerOperand() == UseV) 4314 MA = LI->getAlign(); 4315 } 4316 4317 if (!MA || *MA <= QueryingAA.getKnownAlign()) 4318 return 0; 4319 4320 unsigned Alignment = MA->value(); 4321 int64_t Offset; 4322 4323 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { 4324 if (Base == &AssociatedValue) { 4325 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4326 // So we can say that the maximum power of two which is a divisor of 4327 // gcd(Offset, Alignment) is an alignment. 4328 4329 uint32_t gcd = 4330 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment); 4331 Alignment = llvm::PowerOf2Floor(gcd); 4332 } 4333 } 4334 4335 return Alignment; 4336 } 4337 4338 struct AAAlignImpl : AAAlign { 4339 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} 4340 4341 /// See AbstractAttribute::initialize(...). 4342 void initialize(Attributor &A) override { 4343 SmallVector<Attribute, 4> Attrs; 4344 getAttrs({Attribute::Alignment}, Attrs); 4345 for (const Attribute &Attr : Attrs) 4346 takeKnownMaximum(Attr.getValueAsInt()); 4347 4348 Value &V = getAssociatedValue(); 4349 // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int 4350 // use of the function pointer. This was caused by D73131. We want to 4351 // avoid this for function pointers especially because we iterate 4352 // their uses and int2ptr is not handled. It is not a correctness 4353 // problem though! 4354 if (!V.getType()->getPointerElementType()->isFunctionTy()) 4355 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value()); 4356 4357 if (getIRPosition().isFnInterfaceKind() && 4358 (!getAnchorScope() || 4359 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { 4360 indicatePessimisticFixpoint(); 4361 return; 4362 } 4363 4364 if (Instruction *CtxI = getCtxI()) 4365 followUsesInMBEC(*this, A, getState(), *CtxI); 4366 } 4367 4368 /// See AbstractAttribute::manifest(...). 4369 ChangeStatus manifest(Attributor &A) override { 4370 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; 4371 4372 // Check for users that allow alignment annotations. 4373 Value &AssociatedValue = getAssociatedValue(); 4374 for (const Use &U : AssociatedValue.uses()) { 4375 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { 4376 if (SI->getPointerOperand() == &AssociatedValue) 4377 if (SI->getAlignment() < getAssumedAlign()) { 4378 STATS_DECLTRACK(AAAlign, Store, 4379 "Number of times alignment added to a store"); 4380 SI->setAlignment(Align(getAssumedAlign())); 4381 LoadStoreChanged = ChangeStatus::CHANGED; 4382 } 4383 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { 4384 if (LI->getPointerOperand() == &AssociatedValue) 4385 if (LI->getAlignment() < getAssumedAlign()) { 4386 LI->setAlignment(Align(getAssumedAlign())); 4387 STATS_DECLTRACK(AAAlign, Load, 4388 "Number of times alignment added to a load"); 4389 LoadStoreChanged = ChangeStatus::CHANGED; 4390 } 4391 } 4392 } 4393 4394 ChangeStatus Changed = AAAlign::manifest(A); 4395 4396 Align InheritAlign = 4397 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4398 if (InheritAlign >= getAssumedAlign()) 4399 return LoadStoreChanged; 4400 return Changed | LoadStoreChanged; 4401 } 4402 4403 // TODO: Provide a helper to determine the implied ABI alignment and check in 4404 // the existing manifest method and a new one for AAAlignImpl that value 4405 // to avoid making the alignment explicit if it did not improve. 4406 4407 /// See AbstractAttribute::getDeducedAttributes 4408 virtual void 4409 getDeducedAttributes(LLVMContext &Ctx, 4410 SmallVectorImpl<Attribute> &Attrs) const override { 4411 if (getAssumedAlign() > 1) 4412 Attrs.emplace_back( 4413 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); 4414 } 4415 4416 /// See followUsesInMBEC 4417 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4418 AAAlign::StateType &State) { 4419 bool TrackUse = false; 4420 4421 unsigned int KnownAlign = 4422 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); 4423 State.takeKnownMaximum(KnownAlign); 4424 4425 return TrackUse; 4426 } 4427 4428 /// See AbstractAttribute::getAsStr(). 4429 const std::string getAsStr() const override { 4430 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + 4431 "-" + std::to_string(getAssumedAlign()) + ">") 4432 : "unknown-align"; 4433 } 4434 }; 4435 4436 /// Align attribute for a floating value. 4437 struct AAAlignFloating : AAAlignImpl { 4438 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} 4439 4440 /// See AbstractAttribute::updateImpl(...). 4441 ChangeStatus updateImpl(Attributor &A) override { 4442 const DataLayout &DL = A.getDataLayout(); 4443 4444 auto VisitValueCB = [&](Value &V, const Instruction *, 4445 AAAlign::StateType &T, bool Stripped) -> bool { 4446 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V), 4447 DepClassTy::REQUIRED); 4448 if (!Stripped && this == &AA) { 4449 int64_t Offset; 4450 unsigned Alignment = 1; 4451 if (const Value *Base = 4452 GetPointerBaseWithConstantOffset(&V, Offset, DL)) { 4453 Align PA = Base->getPointerAlignment(DL); 4454 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4455 // So we can say that the maximum power of two which is a divisor of 4456 // gcd(Offset, Alignment) is an alignment. 4457 4458 uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), 4459 uint32_t(PA.value())); 4460 Alignment = llvm::PowerOf2Floor(gcd); 4461 } else { 4462 Alignment = V.getPointerAlignment(DL).value(); 4463 } 4464 // Use only IR information if we did not strip anything. 4465 T.takeKnownMaximum(Alignment); 4466 T.indicatePessimisticFixpoint(); 4467 } else { 4468 // Use abstract attribute information. 4469 const AAAlign::StateType &DS = AA.getState(); 4470 T ^= DS; 4471 } 4472 return T.isValidState(); 4473 }; 4474 4475 StateType T; 4476 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 4477 VisitValueCB, getCtxI())) 4478 return indicatePessimisticFixpoint(); 4479 4480 // TODO: If we know we visited all incoming values, thus no are assumed 4481 // dead, we can take the known information from the state T. 4482 return clampStateAndIndicateChange(getState(), T); 4483 } 4484 4485 /// See AbstractAttribute::trackStatistics() 4486 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } 4487 }; 4488 4489 /// Align attribute for function return value. 4490 struct AAAlignReturned final 4491 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { 4492 using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>; 4493 AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4494 4495 /// See AbstractAttribute::initialize(...). 4496 void initialize(Attributor &A) override { 4497 Base::initialize(A); 4498 Function *F = getAssociatedFunction(); 4499 if (!F || F->isDeclaration()) 4500 indicatePessimisticFixpoint(); 4501 } 4502 4503 /// See AbstractAttribute::trackStatistics() 4504 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } 4505 }; 4506 4507 /// Align attribute for function argument. 4508 struct AAAlignArgument final 4509 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { 4510 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; 4511 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4512 4513 /// See AbstractAttribute::manifest(...). 4514 ChangeStatus manifest(Attributor &A) override { 4515 // If the associated argument is involved in a must-tail call we give up 4516 // because we would need to keep the argument alignments of caller and 4517 // callee in-sync. Just does not seem worth the trouble right now. 4518 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) 4519 return ChangeStatus::UNCHANGED; 4520 return Base::manifest(A); 4521 } 4522 4523 /// See AbstractAttribute::trackStatistics() 4524 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } 4525 }; 4526 4527 struct AAAlignCallSiteArgument final : AAAlignFloating { 4528 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) 4529 : AAAlignFloating(IRP, A) {} 4530 4531 /// See AbstractAttribute::manifest(...). 4532 ChangeStatus manifest(Attributor &A) override { 4533 // If the associated argument is involved in a must-tail call we give up 4534 // because we would need to keep the argument alignments of caller and 4535 // callee in-sync. Just does not seem worth the trouble right now. 4536 if (Argument *Arg = getAssociatedArgument()) 4537 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) 4538 return ChangeStatus::UNCHANGED; 4539 ChangeStatus Changed = AAAlignImpl::manifest(A); 4540 Align InheritAlign = 4541 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4542 if (InheritAlign >= getAssumedAlign()) 4543 Changed = ChangeStatus::UNCHANGED; 4544 return Changed; 4545 } 4546 4547 /// See AbstractAttribute::updateImpl(Attributor &A). 4548 ChangeStatus updateImpl(Attributor &A) override { 4549 ChangeStatus Changed = AAAlignFloating::updateImpl(A); 4550 if (Argument *Arg = getAssociatedArgument()) { 4551 // We only take known information from the argument 4552 // so we do not need to track a dependence. 4553 const auto &ArgAlignAA = A.getAAFor<AAAlign>( 4554 *this, IRPosition::argument(*Arg), DepClassTy::NONE); 4555 takeKnownMaximum(ArgAlignAA.getKnownAlign()); 4556 } 4557 return Changed; 4558 } 4559 4560 /// See AbstractAttribute::trackStatistics() 4561 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } 4562 }; 4563 4564 /// Align attribute deduction for a call site return value. 4565 struct AAAlignCallSiteReturned final 4566 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { 4567 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; 4568 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) 4569 : Base(IRP, A) {} 4570 4571 /// See AbstractAttribute::initialize(...). 4572 void initialize(Attributor &A) override { 4573 Base::initialize(A); 4574 Function *F = getAssociatedFunction(); 4575 if (!F || F->isDeclaration()) 4576 indicatePessimisticFixpoint(); 4577 } 4578 4579 /// See AbstractAttribute::trackStatistics() 4580 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } 4581 }; 4582 4583 /// ------------------ Function No-Return Attribute ---------------------------- 4584 struct AANoReturnImpl : public AANoReturn { 4585 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} 4586 4587 /// See AbstractAttribute::initialize(...). 4588 void initialize(Attributor &A) override { 4589 AANoReturn::initialize(A); 4590 Function *F = getAssociatedFunction(); 4591 if (!F || F->isDeclaration()) 4592 indicatePessimisticFixpoint(); 4593 } 4594 4595 /// See AbstractAttribute::getAsStr(). 4596 const std::string getAsStr() const override { 4597 return getAssumed() ? "noreturn" : "may-return"; 4598 } 4599 4600 /// See AbstractAttribute::updateImpl(Attributor &A). 4601 virtual ChangeStatus updateImpl(Attributor &A) override { 4602 auto CheckForNoReturn = [](Instruction &) { return false; }; 4603 bool UsedAssumedInformation = false; 4604 if (!A.checkForAllInstructions(CheckForNoReturn, *this, 4605 {(unsigned)Instruction::Ret}, 4606 UsedAssumedInformation)) 4607 return indicatePessimisticFixpoint(); 4608 return ChangeStatus::UNCHANGED; 4609 } 4610 }; 4611 4612 struct AANoReturnFunction final : AANoReturnImpl { 4613 AANoReturnFunction(const IRPosition &IRP, Attributor &A) 4614 : AANoReturnImpl(IRP, A) {} 4615 4616 /// See AbstractAttribute::trackStatistics() 4617 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } 4618 }; 4619 4620 /// NoReturn attribute deduction for a call sites. 4621 struct AANoReturnCallSite final : AANoReturnImpl { 4622 AANoReturnCallSite(const IRPosition &IRP, Attributor &A) 4623 : AANoReturnImpl(IRP, A) {} 4624 4625 /// See AbstractAttribute::initialize(...). 4626 void initialize(Attributor &A) override { 4627 AANoReturnImpl::initialize(A); 4628 if (Function *F = getAssociatedFunction()) { 4629 const IRPosition &FnPos = IRPosition::function(*F); 4630 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4631 if (!FnAA.isAssumedNoReturn()) 4632 indicatePessimisticFixpoint(); 4633 } 4634 } 4635 4636 /// See AbstractAttribute::updateImpl(...). 4637 ChangeStatus updateImpl(Attributor &A) override { 4638 // TODO: Once we have call site specific value information we can provide 4639 // call site specific liveness information and then it makes 4640 // sense to specialize attributes for call sites arguments instead of 4641 // redirecting requests to the callee argument. 4642 Function *F = getAssociatedFunction(); 4643 const IRPosition &FnPos = IRPosition::function(*F); 4644 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4645 return clampStateAndIndicateChange(getState(), FnAA.getState()); 4646 } 4647 4648 /// See AbstractAttribute::trackStatistics() 4649 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } 4650 }; 4651 4652 /// ----------------------- Variable Capturing --------------------------------- 4653 4654 /// A class to hold the state of for no-capture attributes. 4655 struct AANoCaptureImpl : public AANoCapture { 4656 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} 4657 4658 /// See AbstractAttribute::initialize(...). 4659 void initialize(Attributor &A) override { 4660 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { 4661 indicateOptimisticFixpoint(); 4662 return; 4663 } 4664 Function *AnchorScope = getAnchorScope(); 4665 if (isFnInterfaceKind() && 4666 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { 4667 indicatePessimisticFixpoint(); 4668 return; 4669 } 4670 4671 // You cannot "capture" null in the default address space. 4672 if (isa<ConstantPointerNull>(getAssociatedValue()) && 4673 getAssociatedValue().getType()->getPointerAddressSpace() == 0) { 4674 indicateOptimisticFixpoint(); 4675 return; 4676 } 4677 4678 const Function *F = 4679 isArgumentPosition() ? getAssociatedFunction() : AnchorScope; 4680 4681 // Check what state the associated function can actually capture. 4682 if (F) 4683 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 4684 else 4685 indicatePessimisticFixpoint(); 4686 } 4687 4688 /// See AbstractAttribute::updateImpl(...). 4689 ChangeStatus updateImpl(Attributor &A) override; 4690 4691 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). 4692 virtual void 4693 getDeducedAttributes(LLVMContext &Ctx, 4694 SmallVectorImpl<Attribute> &Attrs) const override { 4695 if (!isAssumedNoCaptureMaybeReturned()) 4696 return; 4697 4698 if (isArgumentPosition()) { 4699 if (isAssumedNoCapture()) 4700 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); 4701 else if (ManifestInternal) 4702 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); 4703 } 4704 } 4705 4706 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known 4707 /// depending on the ability of the function associated with \p IRP to capture 4708 /// state in memory and through "returning/throwing", respectively. 4709 static void determineFunctionCaptureCapabilities(const IRPosition &IRP, 4710 const Function &F, 4711 BitIntegerState &State) { 4712 // TODO: Once we have memory behavior attributes we should use them here. 4713 4714 // If we know we cannot communicate or write to memory, we do not care about 4715 // ptr2int anymore. 4716 if (F.onlyReadsMemory() && F.doesNotThrow() && 4717 F.getReturnType()->isVoidTy()) { 4718 State.addKnownBits(NO_CAPTURE); 4719 return; 4720 } 4721 4722 // A function cannot capture state in memory if it only reads memory, it can 4723 // however return/throw state and the state might be influenced by the 4724 // pointer value, e.g., loading from a returned pointer might reveal a bit. 4725 if (F.onlyReadsMemory()) 4726 State.addKnownBits(NOT_CAPTURED_IN_MEM); 4727 4728 // A function cannot communicate state back if it does not through 4729 // exceptions and doesn not return values. 4730 if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) 4731 State.addKnownBits(NOT_CAPTURED_IN_RET); 4732 4733 // Check existing "returned" attributes. 4734 int ArgNo = IRP.getCalleeArgNo(); 4735 if (F.doesNotThrow() && ArgNo >= 0) { 4736 for (unsigned u = 0, e = F.arg_size(); u < e; ++u) 4737 if (F.hasParamAttribute(u, Attribute::Returned)) { 4738 if (u == unsigned(ArgNo)) 4739 State.removeAssumedBits(NOT_CAPTURED_IN_RET); 4740 else if (F.onlyReadsMemory()) 4741 State.addKnownBits(NO_CAPTURE); 4742 else 4743 State.addKnownBits(NOT_CAPTURED_IN_RET); 4744 break; 4745 } 4746 } 4747 } 4748 4749 /// See AbstractState::getAsStr(). 4750 const std::string getAsStr() const override { 4751 if (isKnownNoCapture()) 4752 return "known not-captured"; 4753 if (isAssumedNoCapture()) 4754 return "assumed not-captured"; 4755 if (isKnownNoCaptureMaybeReturned()) 4756 return "known not-captured-maybe-returned"; 4757 if (isAssumedNoCaptureMaybeReturned()) 4758 return "assumed not-captured-maybe-returned"; 4759 return "assumed-captured"; 4760 } 4761 }; 4762 4763 /// Attributor-aware capture tracker. 4764 struct AACaptureUseTracker final : public CaptureTracker { 4765 4766 /// Create a capture tracker that can lookup in-flight abstract attributes 4767 /// through the Attributor \p A. 4768 /// 4769 /// If a use leads to a potential capture, \p CapturedInMemory is set and the 4770 /// search is stopped. If a use leads to a return instruction, 4771 /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed. 4772 /// If a use leads to a ptr2int which may capture the value, 4773 /// \p CapturedInInteger is set. If a use is found that is currently assumed 4774 /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies 4775 /// set. All values in \p PotentialCopies are later tracked as well. For every 4776 /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0, 4777 /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger 4778 /// conservatively set to true. 4779 AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA, 4780 const AAIsDead &IsDeadAA, AANoCapture::StateType &State, 4781 SmallSetVector<Value *, 4> &PotentialCopies, 4782 unsigned &RemainingUsesToExplore) 4783 : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State), 4784 PotentialCopies(PotentialCopies), 4785 RemainingUsesToExplore(RemainingUsesToExplore) {} 4786 4787 /// Determine if \p V maybe captured. *Also updates the state!* 4788 bool valueMayBeCaptured(const Value *V) { 4789 if (V->getType()->isPointerTy()) { 4790 PointerMayBeCaptured(V, this); 4791 } else { 4792 State.indicatePessimisticFixpoint(); 4793 } 4794 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4795 } 4796 4797 /// See CaptureTracker::tooManyUses(). 4798 void tooManyUses() override { 4799 State.removeAssumedBits(AANoCapture::NO_CAPTURE); 4800 } 4801 4802 bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override { 4803 if (CaptureTracker::isDereferenceableOrNull(O, DL)) 4804 return true; 4805 const auto &DerefAA = A.getAAFor<AADereferenceable>( 4806 NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL); 4807 return DerefAA.getAssumedDereferenceableBytes(); 4808 } 4809 4810 /// See CaptureTracker::captured(...). 4811 bool captured(const Use *U) override { 4812 Instruction *UInst = cast<Instruction>(U->getUser()); 4813 LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst 4814 << "\n"); 4815 4816 // Because we may reuse the tracker multiple times we keep track of the 4817 // number of explored uses ourselves as well. 4818 if (RemainingUsesToExplore-- == 0) { 4819 LLVM_DEBUG(dbgs() << " - too many uses to explore!\n"); 4820 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4821 /* Return */ true); 4822 } 4823 4824 // Deal with ptr2int by following uses. 4825 if (isa<PtrToIntInst>(UInst)) { 4826 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n"); 4827 return valueMayBeCaptured(UInst); 4828 } 4829 4830 // For stores we check if we can follow the value through memory or not. 4831 if (auto *SI = dyn_cast<StoreInst>(UInst)) { 4832 if (SI->isVolatile()) 4833 return isCapturedIn(/* Memory */ true, /* Integer */ false, 4834 /* Return */ false); 4835 bool UsedAssumedInformation = false; 4836 if (!AA::getPotentialCopiesOfStoredValue( 4837 A, *SI, PotentialCopies, NoCaptureAA, UsedAssumedInformation)) 4838 return isCapturedIn(/* Memory */ true, /* Integer */ false, 4839 /* Return */ false); 4840 // Not captured directly, potential copies will be checked. 4841 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4842 /* Return */ false); 4843 } 4844 4845 // Explicitly catch return instructions. 4846 if (isa<ReturnInst>(UInst)) { 4847 if (UInst->getFunction() == NoCaptureAA.getAnchorScope()) 4848 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4849 /* Return */ true); 4850 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4851 /* Return */ true); 4852 } 4853 4854 // For now we only use special logic for call sites. However, the tracker 4855 // itself knows about a lot of other non-capturing cases already. 4856 auto *CB = dyn_cast<CallBase>(UInst); 4857 if (!CB || !CB->isArgOperand(U)) 4858 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4859 /* Return */ true); 4860 4861 unsigned ArgNo = CB->getArgOperandNo(U); 4862 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); 4863 // If we have a abstract no-capture attribute for the argument we can use 4864 // it to justify a non-capture attribute here. This allows recursion! 4865 auto &ArgNoCaptureAA = 4866 A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED); 4867 if (ArgNoCaptureAA.isAssumedNoCapture()) 4868 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4869 /* Return */ false); 4870 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 4871 addPotentialCopy(*CB); 4872 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4873 /* Return */ false); 4874 } 4875 4876 // Lastly, we could not find a reason no-capture can be assumed so we don't. 4877 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4878 /* Return */ true); 4879 } 4880 4881 /// Register \p CS as potential copy of the value we are checking. 4882 void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); } 4883 4884 /// See CaptureTracker::shouldExplore(...). 4885 bool shouldExplore(const Use *U) override { 4886 // Check liveness and ignore droppable users. 4887 bool UsedAssumedInformation = false; 4888 return !U->getUser()->isDroppable() && 4889 !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA, 4890 UsedAssumedInformation); 4891 } 4892 4893 /// Update the state according to \p CapturedInMem, \p CapturedInInt, and 4894 /// \p CapturedInRet, then return the appropriate value for use in the 4895 /// CaptureTracker::captured() interface. 4896 bool isCapturedIn(bool CapturedInMem, bool CapturedInInt, 4897 bool CapturedInRet) { 4898 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " 4899 << CapturedInInt << "|Ret " << CapturedInRet << "]\n"); 4900 if (CapturedInMem) 4901 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); 4902 if (CapturedInInt) 4903 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); 4904 if (CapturedInRet) 4905 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); 4906 return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4907 } 4908 4909 private: 4910 /// The attributor providing in-flight abstract attributes. 4911 Attributor &A; 4912 4913 /// The abstract attribute currently updated. 4914 AANoCapture &NoCaptureAA; 4915 4916 /// The abstract liveness state. 4917 const AAIsDead &IsDeadAA; 4918 4919 /// The state currently updated. 4920 AANoCapture::StateType &State; 4921 4922 /// Set of potential copies of the tracked value. 4923 SmallSetVector<Value *, 4> &PotentialCopies; 4924 4925 /// Global counter to limit the number of explored uses. 4926 unsigned &RemainingUsesToExplore; 4927 }; 4928 4929 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { 4930 const IRPosition &IRP = getIRPosition(); 4931 Value *V = isArgumentPosition() ? IRP.getAssociatedArgument() 4932 : &IRP.getAssociatedValue(); 4933 if (!V) 4934 return indicatePessimisticFixpoint(); 4935 4936 const Function *F = 4937 isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 4938 assert(F && "Expected a function!"); 4939 const IRPosition &FnPos = IRPosition::function(*F); 4940 const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE); 4941 4942 AANoCapture::StateType T; 4943 4944 // Readonly means we cannot capture through memory. 4945 const auto &FnMemAA = 4946 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE); 4947 if (FnMemAA.isAssumedReadOnly()) { 4948 T.addKnownBits(NOT_CAPTURED_IN_MEM); 4949 if (FnMemAA.isKnownReadOnly()) 4950 addKnownBits(NOT_CAPTURED_IN_MEM); 4951 else 4952 A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL); 4953 } 4954 4955 // Make sure all returned values are different than the underlying value. 4956 // TODO: we could do this in a more sophisticated way inside 4957 // AAReturnedValues, e.g., track all values that escape through returns 4958 // directly somehow. 4959 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { 4960 bool SeenConstant = false; 4961 for (auto &It : RVAA.returned_values()) { 4962 if (isa<Constant>(It.first)) { 4963 if (SeenConstant) 4964 return false; 4965 SeenConstant = true; 4966 } else if (!isa<Argument>(It.first) || 4967 It.first == getAssociatedArgument()) 4968 return false; 4969 } 4970 return true; 4971 }; 4972 4973 const auto &NoUnwindAA = 4974 A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL); 4975 if (NoUnwindAA.isAssumedNoUnwind()) { 4976 bool IsVoidTy = F->getReturnType()->isVoidTy(); 4977 const AAReturnedValues *RVAA = 4978 IsVoidTy ? nullptr 4979 : &A.getAAFor<AAReturnedValues>(*this, FnPos, 4980 4981 DepClassTy::OPTIONAL); 4982 if (IsVoidTy || CheckReturnedArgs(*RVAA)) { 4983 T.addKnownBits(NOT_CAPTURED_IN_RET); 4984 if (T.isKnown(NOT_CAPTURED_IN_MEM)) 4985 return ChangeStatus::UNCHANGED; 4986 if (NoUnwindAA.isKnownNoUnwind() && 4987 (IsVoidTy || RVAA->getState().isAtFixpoint())) { 4988 addKnownBits(NOT_CAPTURED_IN_RET); 4989 if (isKnown(NOT_CAPTURED_IN_MEM)) 4990 return indicateOptimisticFixpoint(); 4991 } 4992 } 4993 } 4994 4995 // Use the CaptureTracker interface and logic with the specialized tracker, 4996 // defined in AACaptureUseTracker, that can look at in-flight abstract 4997 // attributes and directly updates the assumed state. 4998 SmallSetVector<Value *, 4> PotentialCopies; 4999 unsigned RemainingUsesToExplore = 5000 getDefaultMaxUsesToExploreForCaptureTracking(); 5001 AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies, 5002 RemainingUsesToExplore); 5003 5004 // Check all potential copies of the associated value until we can assume 5005 // none will be captured or we have to assume at least one might be. 5006 unsigned Idx = 0; 5007 PotentialCopies.insert(V); 5008 while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size()) 5009 Tracker.valueMayBeCaptured(PotentialCopies[Idx++]); 5010 5011 AANoCapture::StateType &S = getState(); 5012 auto Assumed = S.getAssumed(); 5013 S.intersectAssumedBits(T.getAssumed()); 5014 if (!isAssumedNoCaptureMaybeReturned()) 5015 return indicatePessimisticFixpoint(); 5016 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 5017 : ChangeStatus::CHANGED; 5018 } 5019 5020 /// NoCapture attribute for function arguments. 5021 struct AANoCaptureArgument final : AANoCaptureImpl { 5022 AANoCaptureArgument(const IRPosition &IRP, Attributor &A) 5023 : AANoCaptureImpl(IRP, A) {} 5024 5025 /// See AbstractAttribute::trackStatistics() 5026 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } 5027 }; 5028 5029 /// NoCapture attribute for call site arguments. 5030 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { 5031 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) 5032 : AANoCaptureImpl(IRP, A) {} 5033 5034 /// See AbstractAttribute::initialize(...). 5035 void initialize(Attributor &A) override { 5036 if (Argument *Arg = getAssociatedArgument()) 5037 if (Arg->hasByValAttr()) 5038 indicateOptimisticFixpoint(); 5039 AANoCaptureImpl::initialize(A); 5040 } 5041 5042 /// See AbstractAttribute::updateImpl(...). 5043 ChangeStatus updateImpl(Attributor &A) override { 5044 // TODO: Once we have call site specific value information we can provide 5045 // call site specific liveness information and then it makes 5046 // sense to specialize attributes for call sites arguments instead of 5047 // redirecting requests to the callee argument. 5048 Argument *Arg = getAssociatedArgument(); 5049 if (!Arg) 5050 return indicatePessimisticFixpoint(); 5051 const IRPosition &ArgPos = IRPosition::argument(*Arg); 5052 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED); 5053 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 5054 } 5055 5056 /// See AbstractAttribute::trackStatistics() 5057 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)}; 5058 }; 5059 5060 /// NoCapture attribute for floating values. 5061 struct AANoCaptureFloating final : AANoCaptureImpl { 5062 AANoCaptureFloating(const IRPosition &IRP, Attributor &A) 5063 : AANoCaptureImpl(IRP, A) {} 5064 5065 /// See AbstractAttribute::trackStatistics() 5066 void trackStatistics() const override { 5067 STATS_DECLTRACK_FLOATING_ATTR(nocapture) 5068 } 5069 }; 5070 5071 /// NoCapture attribute for function return value. 5072 struct AANoCaptureReturned final : AANoCaptureImpl { 5073 AANoCaptureReturned(const IRPosition &IRP, Attributor &A) 5074 : AANoCaptureImpl(IRP, A) { 5075 llvm_unreachable("NoCapture is not applicable to function returns!"); 5076 } 5077 5078 /// See AbstractAttribute::initialize(...). 5079 void initialize(Attributor &A) override { 5080 llvm_unreachable("NoCapture is not applicable to function returns!"); 5081 } 5082 5083 /// See AbstractAttribute::updateImpl(...). 5084 ChangeStatus updateImpl(Attributor &A) override { 5085 llvm_unreachable("NoCapture is not applicable to function returns!"); 5086 } 5087 5088 /// See AbstractAttribute::trackStatistics() 5089 void trackStatistics() const override {} 5090 }; 5091 5092 /// NoCapture attribute deduction for a call site return value. 5093 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { 5094 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) 5095 : AANoCaptureImpl(IRP, A) {} 5096 5097 /// See AbstractAttribute::initialize(...). 5098 void initialize(Attributor &A) override { 5099 const Function *F = getAnchorScope(); 5100 // Check what state the associated function can actually capture. 5101 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 5102 } 5103 5104 /// See AbstractAttribute::trackStatistics() 5105 void trackStatistics() const override { 5106 STATS_DECLTRACK_CSRET_ATTR(nocapture) 5107 } 5108 }; 5109 } // namespace 5110 5111 /// ------------------ Value Simplify Attribute ---------------------------- 5112 5113 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) { 5114 // FIXME: Add a typecast support. 5115 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5116 SimplifiedAssociatedValue, Other, Ty); 5117 if (SimplifiedAssociatedValue == Optional<Value *>(nullptr)) 5118 return false; 5119 5120 LLVM_DEBUG({ 5121 if (SimplifiedAssociatedValue.hasValue()) 5122 dbgs() << "[ValueSimplify] is assumed to be " 5123 << **SimplifiedAssociatedValue << "\n"; 5124 else 5125 dbgs() << "[ValueSimplify] is assumed to be <none>\n"; 5126 }); 5127 return true; 5128 } 5129 5130 namespace { 5131 struct AAValueSimplifyImpl : AAValueSimplify { 5132 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) 5133 : AAValueSimplify(IRP, A) {} 5134 5135 /// See AbstractAttribute::initialize(...). 5136 void initialize(Attributor &A) override { 5137 if (getAssociatedValue().getType()->isVoidTy()) 5138 indicatePessimisticFixpoint(); 5139 if (A.hasSimplificationCallback(getIRPosition())) 5140 indicatePessimisticFixpoint(); 5141 } 5142 5143 /// See AbstractAttribute::getAsStr(). 5144 const std::string getAsStr() const override { 5145 LLVM_DEBUG({ 5146 errs() << "SAV: " << SimplifiedAssociatedValue << " "; 5147 if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue) 5148 errs() << "SAV: " << **SimplifiedAssociatedValue << " "; 5149 }); 5150 return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple") 5151 : "not-simple"; 5152 } 5153 5154 /// See AbstractAttribute::trackStatistics() 5155 void trackStatistics() const override {} 5156 5157 /// See AAValueSimplify::getAssumedSimplifiedValue() 5158 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5159 return SimplifiedAssociatedValue; 5160 } 5161 5162 /// Return a value we can use as replacement for the associated one, or 5163 /// nullptr if we don't have one that makes sense. 5164 Value *getReplacementValue(Attributor &A) const { 5165 Value *NewV; 5166 NewV = SimplifiedAssociatedValue.hasValue() 5167 ? SimplifiedAssociatedValue.getValue() 5168 : UndefValue::get(getAssociatedType()); 5169 if (!NewV) 5170 return nullptr; 5171 NewV = AA::getWithType(*NewV, *getAssociatedType()); 5172 if (!NewV || NewV == &getAssociatedValue()) 5173 return nullptr; 5174 const Instruction *CtxI = getCtxI(); 5175 if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache())) 5176 return nullptr; 5177 if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope())) 5178 return nullptr; 5179 return NewV; 5180 } 5181 5182 /// Helper function for querying AAValueSimplify and updating candicate. 5183 /// \param IRP The value position we are trying to unify with SimplifiedValue 5184 bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, 5185 const IRPosition &IRP, bool Simplify = true) { 5186 bool UsedAssumedInformation = false; 5187 Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue(); 5188 if (Simplify) 5189 QueryingValueSimplified = 5190 A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation); 5191 return unionAssumed(QueryingValueSimplified); 5192 } 5193 5194 /// Returns a candidate is found or not 5195 template <typename AAType> bool askSimplifiedValueFor(Attributor &A) { 5196 if (!getAssociatedValue().getType()->isIntegerTy()) 5197 return false; 5198 5199 // This will also pass the call base context. 5200 const auto &AA = 5201 A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE); 5202 5203 Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A); 5204 5205 if (!COpt.hasValue()) { 5206 SimplifiedAssociatedValue = llvm::None; 5207 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5208 return true; 5209 } 5210 if (auto *C = COpt.getValue()) { 5211 SimplifiedAssociatedValue = C; 5212 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5213 return true; 5214 } 5215 return false; 5216 } 5217 5218 bool askSimplifiedValueForOtherAAs(Attributor &A) { 5219 if (askSimplifiedValueFor<AAValueConstantRange>(A)) 5220 return true; 5221 if (askSimplifiedValueFor<AAPotentialValues>(A)) 5222 return true; 5223 return false; 5224 } 5225 5226 /// See AbstractAttribute::manifest(...). 5227 ChangeStatus manifest(Attributor &A) override { 5228 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5229 if (getAssociatedValue().user_empty()) 5230 return Changed; 5231 5232 if (auto *NewV = getReplacementValue(A)) { 5233 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> " 5234 << *NewV << " :: " << *this << "\n"); 5235 if (A.changeValueAfterManifest(getAssociatedValue(), *NewV)) 5236 Changed = ChangeStatus::CHANGED; 5237 } 5238 5239 return Changed | AAValueSimplify::manifest(A); 5240 } 5241 5242 /// See AbstractState::indicatePessimisticFixpoint(...). 5243 ChangeStatus indicatePessimisticFixpoint() override { 5244 SimplifiedAssociatedValue = &getAssociatedValue(); 5245 return AAValueSimplify::indicatePessimisticFixpoint(); 5246 } 5247 5248 static bool handleLoad(Attributor &A, const AbstractAttribute &AA, 5249 LoadInst &L, function_ref<bool(Value &)> Union) { 5250 auto UnionWrapper = [&](Value &V, Value &Obj) { 5251 if (isa<AllocaInst>(Obj)) 5252 return Union(V); 5253 if (!AA::isDynamicallyUnique(A, AA, V)) 5254 return false; 5255 if (!AA::isValidAtPosition(V, L, A.getInfoCache())) 5256 return false; 5257 return Union(V); 5258 }; 5259 5260 Value &Ptr = *L.getPointerOperand(); 5261 SmallVector<Value *, 8> Objects; 5262 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L)) 5263 return false; 5264 5265 const auto *TLI = 5266 A.getInfoCache().getTargetLibraryInfoForFunction(*L.getFunction()); 5267 for (Value *Obj : Objects) { 5268 LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n"); 5269 if (isa<UndefValue>(Obj)) 5270 continue; 5271 if (isa<ConstantPointerNull>(Obj)) { 5272 // A null pointer access can be undefined but any offset from null may 5273 // be OK. We do not try to optimize the latter. 5274 bool UsedAssumedInformation = false; 5275 if (!NullPointerIsDefined(L.getFunction(), 5276 Ptr.getType()->getPointerAddressSpace()) && 5277 A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj) 5278 continue; 5279 return false; 5280 } 5281 Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType(), TLI); 5282 if (!InitialVal || !Union(*InitialVal)) 5283 return false; 5284 5285 LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store " 5286 "propagation, checking accesses next.\n"); 5287 5288 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) { 5289 LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n"); 5290 if (!Acc.isWrite()) 5291 return true; 5292 if (Acc.isWrittenValueYetUndetermined()) 5293 return true; 5294 Value *Content = Acc.getWrittenValue(); 5295 if (!Content) 5296 return false; 5297 Value *CastedContent = 5298 AA::getWithType(*Content, *AA.getAssociatedType()); 5299 if (!CastedContent) 5300 return false; 5301 if (IsExact) 5302 return UnionWrapper(*CastedContent, *Obj); 5303 if (auto *C = dyn_cast<Constant>(CastedContent)) 5304 if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C)) 5305 return UnionWrapper(*CastedContent, *Obj); 5306 return false; 5307 }; 5308 5309 auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj), 5310 DepClassTy::REQUIRED); 5311 if (!PI.forallInterferingAccesses(L, CheckAccess)) 5312 return false; 5313 } 5314 return true; 5315 } 5316 }; 5317 5318 struct AAValueSimplifyArgument final : AAValueSimplifyImpl { 5319 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) 5320 : AAValueSimplifyImpl(IRP, A) {} 5321 5322 void initialize(Attributor &A) override { 5323 AAValueSimplifyImpl::initialize(A); 5324 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) 5325 indicatePessimisticFixpoint(); 5326 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated, 5327 Attribute::StructRet, Attribute::Nest, Attribute::ByVal}, 5328 /* IgnoreSubsumingPositions */ true)) 5329 indicatePessimisticFixpoint(); 5330 5331 // FIXME: This is a hack to prevent us from propagating function poiner in 5332 // the new pass manager CGSCC pass as it creates call edges the 5333 // CallGraphUpdater cannot handle yet. 5334 Value &V = getAssociatedValue(); 5335 if (V.getType()->isPointerTy() && 5336 V.getType()->getPointerElementType()->isFunctionTy() && 5337 !A.isModulePass()) 5338 indicatePessimisticFixpoint(); 5339 } 5340 5341 /// See AbstractAttribute::updateImpl(...). 5342 ChangeStatus updateImpl(Attributor &A) override { 5343 // Byval is only replacable if it is readonly otherwise we would write into 5344 // the replaced value and not the copy that byval creates implicitly. 5345 Argument *Arg = getAssociatedArgument(); 5346 if (Arg->hasByValAttr()) { 5347 // TODO: We probably need to verify synchronization is not an issue, e.g., 5348 // there is no race by not copying a constant byval. 5349 const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), 5350 DepClassTy::REQUIRED); 5351 if (!MemAA.isAssumedReadOnly()) 5352 return indicatePessimisticFixpoint(); 5353 } 5354 5355 auto Before = SimplifiedAssociatedValue; 5356 5357 auto PredForCallSite = [&](AbstractCallSite ACS) { 5358 const IRPosition &ACSArgPos = 5359 IRPosition::callsite_argument(ACS, getCallSiteArgNo()); 5360 // Check if a coresponding argument was found or if it is on not 5361 // associated (which can happen for callback calls). 5362 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 5363 return false; 5364 5365 // Simplify the argument operand explicitly and check if the result is 5366 // valid in the current scope. This avoids refering to simplified values 5367 // in other functions, e.g., we don't want to say a an argument in a 5368 // static function is actually an argument in a different function. 5369 bool UsedAssumedInformation = false; 5370 Optional<Constant *> SimpleArgOp = 5371 A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation); 5372 if (!SimpleArgOp.hasValue()) 5373 return true; 5374 if (!SimpleArgOp.getValue()) 5375 return false; 5376 if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp)) 5377 return false; 5378 return unionAssumed(*SimpleArgOp); 5379 }; 5380 5381 // Generate a answer specific to a call site context. 5382 bool Success; 5383 bool AllCallSitesKnown; 5384 if (hasCallBaseContext() && 5385 getCallBaseContext()->getCalledFunction() == Arg->getParent()) 5386 Success = PredForCallSite( 5387 AbstractCallSite(&getCallBaseContext()->getCalledOperandUse())); 5388 else 5389 Success = A.checkForAllCallSites(PredForCallSite, *this, true, 5390 AllCallSitesKnown); 5391 5392 if (!Success) 5393 if (!askSimplifiedValueForOtherAAs(A)) 5394 return indicatePessimisticFixpoint(); 5395 5396 // If a candicate was found in this update, return CHANGED. 5397 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5398 : ChangeStatus ::CHANGED; 5399 } 5400 5401 /// See AbstractAttribute::trackStatistics() 5402 void trackStatistics() const override { 5403 STATS_DECLTRACK_ARG_ATTR(value_simplify) 5404 } 5405 }; 5406 5407 struct AAValueSimplifyReturned : AAValueSimplifyImpl { 5408 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) 5409 : AAValueSimplifyImpl(IRP, A) {} 5410 5411 /// See AAValueSimplify::getAssumedSimplifiedValue() 5412 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5413 if (!isValidState()) 5414 return nullptr; 5415 return SimplifiedAssociatedValue; 5416 } 5417 5418 /// See AbstractAttribute::updateImpl(...). 5419 ChangeStatus updateImpl(Attributor &A) override { 5420 auto Before = SimplifiedAssociatedValue; 5421 5422 auto PredForReturned = [&](Value &V) { 5423 return checkAndUpdate(A, *this, 5424 IRPosition::value(V, getCallBaseContext())); 5425 }; 5426 5427 if (!A.checkForAllReturnedValues(PredForReturned, *this)) 5428 if (!askSimplifiedValueForOtherAAs(A)) 5429 return indicatePessimisticFixpoint(); 5430 5431 // If a candicate was found in this update, return CHANGED. 5432 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5433 : ChangeStatus ::CHANGED; 5434 } 5435 5436 ChangeStatus manifest(Attributor &A) override { 5437 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5438 5439 if (auto *NewV = getReplacementValue(A)) { 5440 auto PredForReturned = 5441 [&](Value &, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 5442 for (ReturnInst *RI : RetInsts) { 5443 Value *ReturnedVal = RI->getReturnValue(); 5444 if (ReturnedVal == NewV || isa<UndefValue>(ReturnedVal)) 5445 return true; 5446 assert(RI->getFunction() == getAnchorScope() && 5447 "ReturnInst in wrong function!"); 5448 LLVM_DEBUG(dbgs() 5449 << "[ValueSimplify] " << *ReturnedVal << " -> " 5450 << *NewV << " in " << *RI << " :: " << *this << "\n"); 5451 if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV)) 5452 Changed = ChangeStatus::CHANGED; 5453 } 5454 return true; 5455 }; 5456 A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this); 5457 } 5458 5459 return Changed | AAValueSimplify::manifest(A); 5460 } 5461 5462 /// See AbstractAttribute::trackStatistics() 5463 void trackStatistics() const override { 5464 STATS_DECLTRACK_FNRET_ATTR(value_simplify) 5465 } 5466 }; 5467 5468 struct AAValueSimplifyFloating : AAValueSimplifyImpl { 5469 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) 5470 : AAValueSimplifyImpl(IRP, A) {} 5471 5472 /// See AbstractAttribute::initialize(...). 5473 void initialize(Attributor &A) override { 5474 AAValueSimplifyImpl::initialize(A); 5475 Value &V = getAnchorValue(); 5476 5477 // TODO: add other stuffs 5478 if (isa<Constant>(V)) 5479 indicatePessimisticFixpoint(); 5480 } 5481 5482 /// Check if \p Cmp is a comparison we can simplify. 5483 /// 5484 /// We handle multiple cases, one in which at least one operand is an 5485 /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other 5486 /// operand. Return true if successful, in that case SimplifiedAssociatedValue 5487 /// will be updated. 5488 bool handleCmp(Attributor &A, CmpInst &Cmp) { 5489 auto Union = [&](Value &V) { 5490 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5491 SimplifiedAssociatedValue, &V, V.getType()); 5492 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5493 }; 5494 5495 Value *LHS = Cmp.getOperand(0); 5496 Value *RHS = Cmp.getOperand(1); 5497 5498 // Simplify the operands first. 5499 bool UsedAssumedInformation = false; 5500 const auto &SimplifiedLHS = 5501 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 5502 *this, UsedAssumedInformation); 5503 if (!SimplifiedLHS.hasValue()) 5504 return true; 5505 if (!SimplifiedLHS.getValue()) 5506 return false; 5507 LHS = *SimplifiedLHS; 5508 5509 const auto &SimplifiedRHS = 5510 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 5511 *this, UsedAssumedInformation); 5512 if (!SimplifiedRHS.hasValue()) 5513 return true; 5514 if (!SimplifiedRHS.getValue()) 5515 return false; 5516 RHS = *SimplifiedRHS; 5517 5518 LLVMContext &Ctx = Cmp.getContext(); 5519 // Handle the trivial case first in which we don't even need to think about 5520 // null or non-null. 5521 if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) { 5522 Constant *NewVal = 5523 ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual()); 5524 if (!Union(*NewVal)) 5525 return false; 5526 if (!UsedAssumedInformation) 5527 indicateOptimisticFixpoint(); 5528 return true; 5529 } 5530 5531 // From now on we only handle equalities (==, !=). 5532 ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp); 5533 if (!ICmp || !ICmp->isEquality()) 5534 return false; 5535 5536 bool LHSIsNull = isa<ConstantPointerNull>(LHS); 5537 bool RHSIsNull = isa<ConstantPointerNull>(RHS); 5538 if (!LHSIsNull && !RHSIsNull) 5539 return false; 5540 5541 // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the 5542 // non-nullptr operand and if we assume it's non-null we can conclude the 5543 // result of the comparison. 5544 assert((LHSIsNull || RHSIsNull) && 5545 "Expected nullptr versus non-nullptr comparison at this point"); 5546 5547 // The index is the operand that we assume is not null. 5548 unsigned PtrIdx = LHSIsNull; 5549 auto &PtrNonNullAA = A.getAAFor<AANonNull>( 5550 *this, IRPosition::value(*ICmp->getOperand(PtrIdx)), 5551 DepClassTy::REQUIRED); 5552 if (!PtrNonNullAA.isAssumedNonNull()) 5553 return false; 5554 UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull(); 5555 5556 // The new value depends on the predicate, true for != and false for ==. 5557 Constant *NewVal = ConstantInt::get( 5558 Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE); 5559 if (!Union(*NewVal)) 5560 return false; 5561 5562 if (!UsedAssumedInformation) 5563 indicateOptimisticFixpoint(); 5564 5565 return true; 5566 } 5567 5568 bool updateWithLoad(Attributor &A, LoadInst &L) { 5569 auto Union = [&](Value &V) { 5570 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5571 SimplifiedAssociatedValue, &V, L.getType()); 5572 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5573 }; 5574 return handleLoad(A, *this, L, Union); 5575 } 5576 5577 /// Use the generic, non-optimistic InstSimplfy functionality if we managed to 5578 /// simplify any operand of the instruction \p I. Return true if successful, 5579 /// in that case SimplifiedAssociatedValue will be updated. 5580 bool handleGenericInst(Attributor &A, Instruction &I) { 5581 bool SomeSimplified = false; 5582 bool UsedAssumedInformation = false; 5583 5584 SmallVector<Value *, 8> NewOps(I.getNumOperands()); 5585 int Idx = 0; 5586 for (Value *Op : I.operands()) { 5587 const auto &SimplifiedOp = 5588 A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()), 5589 *this, UsedAssumedInformation); 5590 // If we are not sure about any operand we are not sure about the entire 5591 // instruction, we'll wait. 5592 if (!SimplifiedOp.hasValue()) 5593 return true; 5594 5595 if (SimplifiedOp.getValue()) 5596 NewOps[Idx] = SimplifiedOp.getValue(); 5597 else 5598 NewOps[Idx] = Op; 5599 5600 SomeSimplified |= (NewOps[Idx] != Op); 5601 ++Idx; 5602 } 5603 5604 // We won't bother with the InstSimplify interface if we didn't simplify any 5605 // operand ourselves. 5606 if (!SomeSimplified) 5607 return false; 5608 5609 InformationCache &InfoCache = A.getInfoCache(); 5610 Function *F = I.getFunction(); 5611 const auto *DT = 5612 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 5613 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5614 auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 5615 OptimizationRemarkEmitter *ORE = nullptr; 5616 5617 const DataLayout &DL = I.getModule()->getDataLayout(); 5618 SimplifyQuery Q(DL, TLI, DT, AC, &I); 5619 if (Value *SimplifiedI = 5620 SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) { 5621 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5622 SimplifiedAssociatedValue, SimplifiedI, I.getType()); 5623 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5624 } 5625 return false; 5626 } 5627 5628 /// See AbstractAttribute::updateImpl(...). 5629 ChangeStatus updateImpl(Attributor &A) override { 5630 auto Before = SimplifiedAssociatedValue; 5631 5632 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &, 5633 bool Stripped) -> bool { 5634 auto &AA = A.getAAFor<AAValueSimplify>( 5635 *this, IRPosition::value(V, getCallBaseContext()), 5636 DepClassTy::REQUIRED); 5637 if (!Stripped && this == &AA) { 5638 5639 if (auto *I = dyn_cast<Instruction>(&V)) { 5640 if (auto *LI = dyn_cast<LoadInst>(&V)) 5641 if (updateWithLoad(A, *LI)) 5642 return true; 5643 if (auto *Cmp = dyn_cast<CmpInst>(&V)) 5644 if (handleCmp(A, *Cmp)) 5645 return true; 5646 if (handleGenericInst(A, *I)) 5647 return true; 5648 } 5649 // TODO: Look the instruction and check recursively. 5650 5651 LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V 5652 << "\n"); 5653 return false; 5654 } 5655 return checkAndUpdate(A, *this, 5656 IRPosition::value(V, getCallBaseContext())); 5657 }; 5658 5659 bool Dummy = false; 5660 if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy, 5661 VisitValueCB, getCtxI(), 5662 /* UseValueSimplify */ false)) 5663 if (!askSimplifiedValueForOtherAAs(A)) 5664 return indicatePessimisticFixpoint(); 5665 5666 // If a candicate was found in this update, return CHANGED. 5667 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5668 : ChangeStatus ::CHANGED; 5669 } 5670 5671 /// See AbstractAttribute::trackStatistics() 5672 void trackStatistics() const override { 5673 STATS_DECLTRACK_FLOATING_ATTR(value_simplify) 5674 } 5675 }; 5676 5677 struct AAValueSimplifyFunction : AAValueSimplifyImpl { 5678 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) 5679 : AAValueSimplifyImpl(IRP, A) {} 5680 5681 /// See AbstractAttribute::initialize(...). 5682 void initialize(Attributor &A) override { 5683 SimplifiedAssociatedValue = nullptr; 5684 indicateOptimisticFixpoint(); 5685 } 5686 /// See AbstractAttribute::initialize(...). 5687 ChangeStatus updateImpl(Attributor &A) override { 5688 llvm_unreachable( 5689 "AAValueSimplify(Function|CallSite)::updateImpl will not be called"); 5690 } 5691 /// See AbstractAttribute::trackStatistics() 5692 void trackStatistics() const override { 5693 STATS_DECLTRACK_FN_ATTR(value_simplify) 5694 } 5695 }; 5696 5697 struct AAValueSimplifyCallSite : AAValueSimplifyFunction { 5698 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) 5699 : AAValueSimplifyFunction(IRP, A) {} 5700 /// See AbstractAttribute::trackStatistics() 5701 void trackStatistics() const override { 5702 STATS_DECLTRACK_CS_ATTR(value_simplify) 5703 } 5704 }; 5705 5706 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl { 5707 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) 5708 : AAValueSimplifyImpl(IRP, A) {} 5709 5710 void initialize(Attributor &A) override { 5711 AAValueSimplifyImpl::initialize(A); 5712 if (!getAssociatedFunction()) 5713 indicatePessimisticFixpoint(); 5714 } 5715 5716 /// See AbstractAttribute::updateImpl(...). 5717 ChangeStatus updateImpl(Attributor &A) override { 5718 auto Before = SimplifiedAssociatedValue; 5719 auto &RetAA = A.getAAFor<AAReturnedValues>( 5720 *this, IRPosition::function(*getAssociatedFunction()), 5721 DepClassTy::REQUIRED); 5722 auto PredForReturned = 5723 [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 5724 bool UsedAssumedInformation = false; 5725 Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent( 5726 &RetVal, *cast<CallBase>(getCtxI()), *this, 5727 UsedAssumedInformation); 5728 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5729 SimplifiedAssociatedValue, CSRetVal, getAssociatedType()); 5730 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5731 }; 5732 if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned)) 5733 if (!askSimplifiedValueForOtherAAs(A)) 5734 return indicatePessimisticFixpoint(); 5735 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5736 : ChangeStatus ::CHANGED; 5737 } 5738 5739 void trackStatistics() const override { 5740 STATS_DECLTRACK_CSRET_ATTR(value_simplify) 5741 } 5742 }; 5743 5744 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { 5745 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) 5746 : AAValueSimplifyFloating(IRP, A) {} 5747 5748 /// See AbstractAttribute::manifest(...). 5749 ChangeStatus manifest(Attributor &A) override { 5750 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5751 5752 if (auto *NewV = getReplacementValue(A)) { 5753 Use &U = cast<CallBase>(&getAnchorValue()) 5754 ->getArgOperandUse(getCallSiteArgNo()); 5755 if (A.changeUseAfterManifest(U, *NewV)) 5756 Changed = ChangeStatus::CHANGED; 5757 } 5758 5759 return Changed | AAValueSimplify::manifest(A); 5760 } 5761 5762 void trackStatistics() const override { 5763 STATS_DECLTRACK_CSARG_ATTR(value_simplify) 5764 } 5765 }; 5766 5767 /// ----------------------- Heap-To-Stack Conversion --------------------------- 5768 struct AAHeapToStackFunction final : public AAHeapToStack { 5769 5770 struct AllocationInfo { 5771 /// The call that allocates the memory. 5772 CallBase *const CB; 5773 5774 /// The kind of allocation. 5775 const enum class AllocationKind { 5776 MALLOC, 5777 CALLOC, 5778 ALIGNED_ALLOC, 5779 } Kind; 5780 5781 /// The library function id for the allocation. 5782 LibFunc LibraryFunctionId = NotLibFunc; 5783 5784 /// The status wrt. a rewrite. 5785 enum { 5786 STACK_DUE_TO_USE, 5787 STACK_DUE_TO_FREE, 5788 INVALID, 5789 } Status = STACK_DUE_TO_USE; 5790 5791 /// Flag to indicate if we encountered a use that might free this allocation 5792 /// but which is not in the deallocation infos. 5793 bool HasPotentiallyFreeingUnknownUses = false; 5794 5795 /// The set of free calls that use this allocation. 5796 SmallPtrSet<CallBase *, 1> PotentialFreeCalls{}; 5797 }; 5798 5799 struct DeallocationInfo { 5800 /// The call that deallocates the memory. 5801 CallBase *const CB; 5802 5803 /// Flag to indicate if we don't know all objects this deallocation might 5804 /// free. 5805 bool MightFreeUnknownObjects = false; 5806 5807 /// The set of allocation calls that are potentially freed. 5808 SmallPtrSet<CallBase *, 1> PotentialAllocationCalls{}; 5809 }; 5810 5811 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) 5812 : AAHeapToStack(IRP, A) {} 5813 5814 ~AAHeapToStackFunction() { 5815 // Ensure we call the destructor so we release any memory allocated in the 5816 // sets. 5817 for (auto &It : AllocationInfos) 5818 It.getSecond()->~AllocationInfo(); 5819 for (auto &It : DeallocationInfos) 5820 It.getSecond()->~DeallocationInfo(); 5821 } 5822 5823 void initialize(Attributor &A) override { 5824 AAHeapToStack::initialize(A); 5825 5826 const Function *F = getAnchorScope(); 5827 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5828 5829 auto AllocationIdentifierCB = [&](Instruction &I) { 5830 CallBase *CB = dyn_cast<CallBase>(&I); 5831 if (!CB) 5832 return true; 5833 if (isFreeCall(CB, TLI)) { 5834 DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB}; 5835 return true; 5836 } 5837 bool IsMalloc = isMallocLikeFn(CB, TLI); 5838 bool IsAlignedAllocLike = !IsMalloc && isAlignedAllocLikeFn(CB, TLI); 5839 bool IsCalloc = 5840 !IsMalloc && !IsAlignedAllocLike && isCallocLikeFn(CB, TLI); 5841 if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) 5842 return true; 5843 auto Kind = 5844 IsMalloc ? AllocationInfo::AllocationKind::MALLOC 5845 : (IsCalloc ? AllocationInfo::AllocationKind::CALLOC 5846 : AllocationInfo::AllocationKind::ALIGNED_ALLOC); 5847 5848 AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB, Kind}; 5849 AllocationInfos[CB] = AI; 5850 TLI->getLibFunc(*CB, AI->LibraryFunctionId); 5851 return true; 5852 }; 5853 5854 bool UsedAssumedInformation = false; 5855 bool Success = A.checkForAllCallLikeInstructions( 5856 AllocationIdentifierCB, *this, UsedAssumedInformation, 5857 /* CheckBBLivenessOnly */ false, 5858 /* CheckPotentiallyDead */ true); 5859 (void)Success; 5860 assert(Success && "Did not expect the call base visit callback to fail!"); 5861 } 5862 5863 const std::string getAsStr() const override { 5864 unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0; 5865 for (const auto &It : AllocationInfos) { 5866 if (It.second->Status == AllocationInfo::INVALID) 5867 ++NumInvalidMallocs; 5868 else 5869 ++NumH2SMallocs; 5870 } 5871 return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" + 5872 std::to_string(NumInvalidMallocs); 5873 } 5874 5875 /// See AbstractAttribute::trackStatistics(). 5876 void trackStatistics() const override { 5877 STATS_DECL( 5878 MallocCalls, Function, 5879 "Number of malloc/calloc/aligned_alloc calls converted to allocas"); 5880 for (auto &It : AllocationInfos) 5881 if (It.second->Status != AllocationInfo::INVALID) 5882 ++BUILD_STAT_NAME(MallocCalls, Function); 5883 } 5884 5885 bool isAssumedHeapToStack(const CallBase &CB) const override { 5886 if (isValidState()) 5887 if (AllocationInfo *AI = AllocationInfos.lookup(&CB)) 5888 return AI->Status != AllocationInfo::INVALID; 5889 return false; 5890 } 5891 5892 bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override { 5893 if (!isValidState()) 5894 return false; 5895 5896 for (auto &It : AllocationInfos) { 5897 AllocationInfo &AI = *It.second; 5898 if (AI.Status == AllocationInfo::INVALID) 5899 continue; 5900 5901 if (AI.PotentialFreeCalls.count(&CB)) 5902 return true; 5903 } 5904 5905 return false; 5906 } 5907 5908 ChangeStatus manifest(Attributor &A) override { 5909 assert(getState().isValidState() && 5910 "Attempted to manifest an invalid state!"); 5911 5912 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 5913 Function *F = getAnchorScope(); 5914 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5915 5916 for (auto &It : AllocationInfos) { 5917 AllocationInfo &AI = *It.second; 5918 if (AI.Status == AllocationInfo::INVALID) 5919 continue; 5920 5921 for (CallBase *FreeCall : AI.PotentialFreeCalls) { 5922 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); 5923 A.deleteAfterManifest(*FreeCall); 5924 HasChanged = ChangeStatus::CHANGED; 5925 } 5926 5927 LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB 5928 << "\n"); 5929 5930 auto Remark = [&](OptimizationRemark OR) { 5931 LibFunc IsAllocShared; 5932 if (TLI->getLibFunc(*AI.CB, IsAllocShared)) 5933 if (IsAllocShared == LibFunc___kmpc_alloc_shared) 5934 return OR << "Moving globalized variable to the stack."; 5935 return OR << "Moving memory allocation from the heap to the stack."; 5936 }; 5937 if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 5938 A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark); 5939 else 5940 A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark); 5941 5942 Value *Size; 5943 Optional<APInt> SizeAPI = getSize(A, *this, AI); 5944 if (SizeAPI.hasValue()) { 5945 Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI); 5946 } else if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) { 5947 auto *Num = AI.CB->getOperand(0); 5948 auto *SizeT = AI.CB->getOperand(1); 5949 IRBuilder<> B(AI.CB); 5950 Size = B.CreateMul(Num, SizeT, "h2s.calloc.size"); 5951 } else if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) { 5952 Size = AI.CB->getOperand(1); 5953 } else { 5954 Size = AI.CB->getOperand(0); 5955 } 5956 5957 Align Alignment(1); 5958 if (MaybeAlign RetAlign = AI.CB->getRetAlign()) 5959 Alignment = max(Alignment, RetAlign); 5960 if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) { 5961 Optional<APInt> AlignmentAPI = 5962 getAPInt(A, *this, *AI.CB->getArgOperand(0)); 5963 assert(AlignmentAPI.hasValue() && 5964 "Expected an alignment during manifest!"); 5965 Alignment = 5966 max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue())); 5967 } 5968 5969 unsigned AS = cast<PointerType>(AI.CB->getType())->getAddressSpace(); 5970 Instruction *Alloca = 5971 new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment, 5972 "", AI.CB->getNextNode()); 5973 5974 if (Alloca->getType() != AI.CB->getType()) 5975 Alloca = new BitCastInst(Alloca, AI.CB->getType(), "malloc_bc", 5976 Alloca->getNextNode()); 5977 5978 A.changeValueAfterManifest(*AI.CB, *Alloca); 5979 5980 if (auto *II = dyn_cast<InvokeInst>(AI.CB)) { 5981 auto *NBB = II->getNormalDest(); 5982 BranchInst::Create(NBB, AI.CB->getParent()); 5983 A.deleteAfterManifest(*AI.CB); 5984 } else { 5985 A.deleteAfterManifest(*AI.CB); 5986 } 5987 5988 // Zero out the allocated memory if it was a calloc. 5989 if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) { 5990 auto *BI = new BitCastInst(Alloca, AI.CB->getType(), "calloc_bc", 5991 Alloca->getNextNode()); 5992 Value *Ops[] = { 5993 BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size, 5994 ConstantInt::get(Type::getInt1Ty(F->getContext()), false)}; 5995 5996 Type *Tys[] = {BI->getType(), AI.CB->getOperand(0)->getType()}; 5997 Module *M = F->getParent(); 5998 Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys); 5999 CallInst::Create(Fn, Ops, "", BI->getNextNode()); 6000 } 6001 HasChanged = ChangeStatus::CHANGED; 6002 } 6003 6004 return HasChanged; 6005 } 6006 6007 Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA, 6008 Value &V) { 6009 bool UsedAssumedInformation = false; 6010 Optional<Constant *> SimpleV = 6011 A.getAssumedConstant(V, AA, UsedAssumedInformation); 6012 if (!SimpleV.hasValue()) 6013 return APInt(64, 0); 6014 if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue())) 6015 return CI->getValue(); 6016 return llvm::None; 6017 } 6018 6019 Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA, 6020 AllocationInfo &AI) { 6021 6022 if (AI.Kind == AllocationInfo::AllocationKind::MALLOC) 6023 return getAPInt(A, AA, *AI.CB->getArgOperand(0)); 6024 6025 if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) 6026 // Only if the alignment is also constant we return a size. 6027 return getAPInt(A, AA, *AI.CB->getArgOperand(0)).hasValue() 6028 ? getAPInt(A, AA, *AI.CB->getArgOperand(1)) 6029 : llvm::None; 6030 6031 assert(AI.Kind == AllocationInfo::AllocationKind::CALLOC && 6032 "Expected only callocs are left"); 6033 Optional<APInt> Num = getAPInt(A, AA, *AI.CB->getArgOperand(0)); 6034 Optional<APInt> Size = getAPInt(A, AA, *AI.CB->getArgOperand(1)); 6035 if (!Num.hasValue() || !Size.hasValue()) 6036 return llvm::None; 6037 bool Overflow = false; 6038 Size = Size.getValue().umul_ov(Num.getValue(), Overflow); 6039 return Overflow ? llvm::None : Size; 6040 } 6041 6042 /// Collection of all malloc-like calls in a function with associated 6043 /// information. 6044 DenseMap<CallBase *, AllocationInfo *> AllocationInfos; 6045 6046 /// Collection of all free-like calls in a function with associated 6047 /// information. 6048 DenseMap<CallBase *, DeallocationInfo *> DeallocationInfos; 6049 6050 ChangeStatus updateImpl(Attributor &A) override; 6051 }; 6052 6053 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) { 6054 ChangeStatus Changed = ChangeStatus::UNCHANGED; 6055 const Function *F = getAnchorScope(); 6056 6057 const auto &LivenessAA = 6058 A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE); 6059 6060 MustBeExecutedContextExplorer &Explorer = 6061 A.getInfoCache().getMustBeExecutedContextExplorer(); 6062 6063 bool StackIsAccessibleByOtherThreads = 6064 A.getInfoCache().stackIsAccessibleByOtherThreads(); 6065 6066 // Flag to ensure we update our deallocation information at most once per 6067 // updateImpl call and only if we use the free check reasoning. 6068 bool HasUpdatedFrees = false; 6069 6070 auto UpdateFrees = [&]() { 6071 HasUpdatedFrees = true; 6072 6073 for (auto &It : DeallocationInfos) { 6074 DeallocationInfo &DI = *It.second; 6075 // For now we cannot use deallocations that have unknown inputs, skip 6076 // them. 6077 if (DI.MightFreeUnknownObjects) 6078 continue; 6079 6080 // No need to analyze dead calls, ignore them instead. 6081 bool UsedAssumedInformation = false; 6082 if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation, 6083 /* CheckBBLivenessOnly */ true)) 6084 continue; 6085 6086 // Use the optimistic version to get the freed objects, ignoring dead 6087 // branches etc. 6088 SmallVector<Value *, 8> Objects; 6089 if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects, 6090 *this, DI.CB)) { 6091 LLVM_DEBUG( 6092 dbgs() 6093 << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n"); 6094 DI.MightFreeUnknownObjects = true; 6095 continue; 6096 } 6097 6098 // Check each object explicitly. 6099 for (auto *Obj : Objects) { 6100 // Free of null and undef can be ignored as no-ops (or UB in the latter 6101 // case). 6102 if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj)) 6103 continue; 6104 6105 CallBase *ObjCB = dyn_cast<CallBase>(Obj); 6106 if (!ObjCB) { 6107 LLVM_DEBUG(dbgs() 6108 << "[H2S] Free of a non-call object: " << *Obj << "\n"); 6109 DI.MightFreeUnknownObjects = true; 6110 continue; 6111 } 6112 6113 AllocationInfo *AI = AllocationInfos.lookup(ObjCB); 6114 if (!AI) { 6115 LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj 6116 << "\n"); 6117 DI.MightFreeUnknownObjects = true; 6118 continue; 6119 } 6120 6121 DI.PotentialAllocationCalls.insert(ObjCB); 6122 } 6123 } 6124 }; 6125 6126 auto FreeCheck = [&](AllocationInfo &AI) { 6127 // If the stack is not accessible by other threads, the "must-free" logic 6128 // doesn't apply as the pointer could be shared and needs to be places in 6129 // "shareable" memory. 6130 if (!StackIsAccessibleByOtherThreads) { 6131 auto &NoSyncAA = 6132 A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL); 6133 if (!NoSyncAA.isAssumedNoSync()) { 6134 LLVM_DEBUG( 6135 dbgs() << "[H2S] found an escaping use, stack is not accessible by " 6136 "other threads and function is not nosync:\n"); 6137 return false; 6138 } 6139 } 6140 if (!HasUpdatedFrees) 6141 UpdateFrees(); 6142 6143 // TODO: Allow multi exit functions that have different free calls. 6144 if (AI.PotentialFreeCalls.size() != 1) { 6145 LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but " 6146 << AI.PotentialFreeCalls.size() << "\n"); 6147 return false; 6148 } 6149 CallBase *UniqueFree = *AI.PotentialFreeCalls.begin(); 6150 DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree); 6151 if (!DI) { 6152 LLVM_DEBUG( 6153 dbgs() << "[H2S] unique free call was not known as deallocation call " 6154 << *UniqueFree << "\n"); 6155 return false; 6156 } 6157 if (DI->MightFreeUnknownObjects) { 6158 LLVM_DEBUG( 6159 dbgs() << "[H2S] unique free call might free unknown allocations\n"); 6160 return false; 6161 } 6162 if (DI->PotentialAllocationCalls.size() > 1) { 6163 LLVM_DEBUG(dbgs() << "[H2S] unique free call might free " 6164 << DI->PotentialAllocationCalls.size() 6165 << " different allocations\n"); 6166 return false; 6167 } 6168 if (*DI->PotentialAllocationCalls.begin() != AI.CB) { 6169 LLVM_DEBUG( 6170 dbgs() 6171 << "[H2S] unique free call not known to free this allocation but " 6172 << **DI->PotentialAllocationCalls.begin() << "\n"); 6173 return false; 6174 } 6175 Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode(); 6176 if (!Explorer.findInContextOf(UniqueFree, CtxI)) { 6177 LLVM_DEBUG( 6178 dbgs() 6179 << "[H2S] unique free call might not be executed with the allocation " 6180 << *UniqueFree << "\n"); 6181 return false; 6182 } 6183 return true; 6184 }; 6185 6186 auto UsesCheck = [&](AllocationInfo &AI) { 6187 bool ValidUsesOnly = true; 6188 6189 auto Pred = [&](const Use &U, bool &Follow) -> bool { 6190 Instruction *UserI = cast<Instruction>(U.getUser()); 6191 if (isa<LoadInst>(UserI)) 6192 return true; 6193 if (auto *SI = dyn_cast<StoreInst>(UserI)) { 6194 if (SI->getValueOperand() == U.get()) { 6195 LLVM_DEBUG(dbgs() 6196 << "[H2S] escaping store to memory: " << *UserI << "\n"); 6197 ValidUsesOnly = false; 6198 } else { 6199 // A store into the malloc'ed memory is fine. 6200 } 6201 return true; 6202 } 6203 if (auto *CB = dyn_cast<CallBase>(UserI)) { 6204 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) 6205 return true; 6206 if (DeallocationInfos.count(CB)) { 6207 AI.PotentialFreeCalls.insert(CB); 6208 return true; 6209 } 6210 6211 unsigned ArgNo = CB->getArgOperandNo(&U); 6212 6213 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 6214 *this, IRPosition::callsite_argument(*CB, ArgNo), 6215 DepClassTy::OPTIONAL); 6216 6217 // If a call site argument use is nofree, we are fine. 6218 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( 6219 *this, IRPosition::callsite_argument(*CB, ArgNo), 6220 DepClassTy::OPTIONAL); 6221 6222 bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture(); 6223 bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree(); 6224 if (MaybeCaptured || 6225 (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared && 6226 MaybeFreed)) { 6227 AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed; 6228 6229 // Emit a missed remark if this is missed OpenMP globalization. 6230 auto Remark = [&](OptimizationRemarkMissed ORM) { 6231 return ORM 6232 << "Could not move globalized variable to the stack. " 6233 "Variable is potentially captured in call. Mark " 6234 "parameter as `__attribute__((noescape))` to override."; 6235 }; 6236 6237 if (ValidUsesOnly && 6238 AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 6239 A.emitRemark<OptimizationRemarkMissed>(AI.CB, "OMP113", Remark); 6240 6241 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n"); 6242 ValidUsesOnly = false; 6243 } 6244 return true; 6245 } 6246 6247 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 6248 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 6249 Follow = true; 6250 return true; 6251 } 6252 // Unknown user for which we can not track uses further (in a way that 6253 // makes sense). 6254 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n"); 6255 ValidUsesOnly = false; 6256 return true; 6257 }; 6258 if (!A.checkForAllUses(Pred, *this, *AI.CB)) 6259 return false; 6260 return ValidUsesOnly; 6261 }; 6262 6263 // The actual update starts here. We look at all allocations and depending on 6264 // their status perform the appropriate check(s). 6265 for (auto &It : AllocationInfos) { 6266 AllocationInfo &AI = *It.second; 6267 if (AI.Status == AllocationInfo::INVALID) 6268 continue; 6269 6270 if (MaxHeapToStackSize == -1) { 6271 if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) 6272 if (!getAPInt(A, *this, *AI.CB->getArgOperand(0)).hasValue()) { 6273 LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB 6274 << "\n"); 6275 AI.Status = AllocationInfo::INVALID; 6276 Changed = ChangeStatus::CHANGED; 6277 continue; 6278 } 6279 } else { 6280 Optional<APInt> Size = getSize(A, *this, AI); 6281 if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) { 6282 LLVM_DEBUG({ 6283 if (!Size.hasValue()) 6284 dbgs() << "[H2S] Unknown allocation size (or alignment): " << *AI.CB 6285 << "\n"; 6286 else 6287 dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " 6288 << MaxHeapToStackSize << "\n"; 6289 }); 6290 6291 AI.Status = AllocationInfo::INVALID; 6292 Changed = ChangeStatus::CHANGED; 6293 continue; 6294 } 6295 } 6296 6297 switch (AI.Status) { 6298 case AllocationInfo::STACK_DUE_TO_USE: 6299 if (UsesCheck(AI)) 6300 continue; 6301 AI.Status = AllocationInfo::STACK_DUE_TO_FREE; 6302 LLVM_FALLTHROUGH; 6303 case AllocationInfo::STACK_DUE_TO_FREE: 6304 if (FreeCheck(AI)) 6305 continue; 6306 AI.Status = AllocationInfo::INVALID; 6307 Changed = ChangeStatus::CHANGED; 6308 continue; 6309 case AllocationInfo::INVALID: 6310 llvm_unreachable("Invalid allocations should never reach this point!"); 6311 }; 6312 } 6313 6314 return Changed; 6315 } 6316 6317 /// ----------------------- Privatizable Pointers ------------------------------ 6318 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { 6319 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) 6320 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} 6321 6322 ChangeStatus indicatePessimisticFixpoint() override { 6323 AAPrivatizablePtr::indicatePessimisticFixpoint(); 6324 PrivatizableType = nullptr; 6325 return ChangeStatus::CHANGED; 6326 } 6327 6328 /// Identify the type we can chose for a private copy of the underlying 6329 /// argument. None means it is not clear yet, nullptr means there is none. 6330 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; 6331 6332 /// Return a privatizable type that encloses both T0 and T1. 6333 /// TODO: This is merely a stub for now as we should manage a mapping as well. 6334 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { 6335 if (!T0.hasValue()) 6336 return T1; 6337 if (!T1.hasValue()) 6338 return T0; 6339 if (T0 == T1) 6340 return T0; 6341 return nullptr; 6342 } 6343 6344 Optional<Type *> getPrivatizableType() const override { 6345 return PrivatizableType; 6346 } 6347 6348 const std::string getAsStr() const override { 6349 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; 6350 } 6351 6352 protected: 6353 Optional<Type *> PrivatizableType; 6354 }; 6355 6356 // TODO: Do this for call site arguments (probably also other values) as well. 6357 6358 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { 6359 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) 6360 : AAPrivatizablePtrImpl(IRP, A) {} 6361 6362 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6363 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6364 // If this is a byval argument and we know all the call sites (so we can 6365 // rewrite them), there is no need to check them explicitly. 6366 bool AllCallSitesKnown; 6367 if (getIRPosition().hasAttr(Attribute::ByVal) && 6368 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, 6369 true, AllCallSitesKnown)) 6370 return getAssociatedValue().getType()->getPointerElementType(); 6371 6372 Optional<Type *> Ty; 6373 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 6374 6375 // Make sure the associated call site argument has the same type at all call 6376 // sites and it is an allocation we know is safe to privatize, for now that 6377 // means we only allow alloca instructions. 6378 // TODO: We can additionally analyze the accesses in the callee to create 6379 // the type from that information instead. That is a little more 6380 // involved and will be done in a follow up patch. 6381 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6382 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 6383 // Check if a coresponding argument was found or if it is one not 6384 // associated (which can happen for callback calls). 6385 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 6386 return false; 6387 6388 // Check that all call sites agree on a type. 6389 auto &PrivCSArgAA = 6390 A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED); 6391 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); 6392 6393 LLVM_DEBUG({ 6394 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; 6395 if (CSTy.hasValue() && CSTy.getValue()) 6396 CSTy.getValue()->print(dbgs()); 6397 else if (CSTy.hasValue()) 6398 dbgs() << "<nullptr>"; 6399 else 6400 dbgs() << "<none>"; 6401 }); 6402 6403 Ty = combineTypes(Ty, CSTy); 6404 6405 LLVM_DEBUG({ 6406 dbgs() << " : New Type: "; 6407 if (Ty.hasValue() && Ty.getValue()) 6408 Ty.getValue()->print(dbgs()); 6409 else if (Ty.hasValue()) 6410 dbgs() << "<nullptr>"; 6411 else 6412 dbgs() << "<none>"; 6413 dbgs() << "\n"; 6414 }); 6415 6416 return !Ty.hasValue() || Ty.getValue(); 6417 }; 6418 6419 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown)) 6420 return nullptr; 6421 return Ty; 6422 } 6423 6424 /// See AbstractAttribute::updateImpl(...). 6425 ChangeStatus updateImpl(Attributor &A) override { 6426 PrivatizableType = identifyPrivatizableType(A); 6427 if (!PrivatizableType.hasValue()) 6428 return ChangeStatus::UNCHANGED; 6429 if (!PrivatizableType.getValue()) 6430 return indicatePessimisticFixpoint(); 6431 6432 // The dependence is optional so we don't give up once we give up on the 6433 // alignment. 6434 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), 6435 DepClassTy::OPTIONAL); 6436 6437 // Avoid arguments with padding for now. 6438 if (!getIRPosition().hasAttr(Attribute::ByVal) && 6439 !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(), 6440 A.getInfoCache().getDL())) { 6441 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n"); 6442 return indicatePessimisticFixpoint(); 6443 } 6444 6445 // Collect the types that will replace the privatizable type in the function 6446 // signature. 6447 SmallVector<Type *, 16> ReplacementTypes; 6448 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 6449 6450 // Verify callee and caller agree on how the promoted argument would be 6451 // passed. 6452 Function &Fn = *getIRPosition().getAnchorScope(); 6453 const auto *TTI = 6454 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); 6455 if (!TTI) { 6456 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function " 6457 << Fn.getName() << "\n"); 6458 return indicatePessimisticFixpoint(); 6459 } 6460 6461 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6462 CallBase *CB = ACS.getInstruction(); 6463 return TTI->areTypesABICompatible( 6464 CB->getCaller(), CB->getCalledFunction(), ReplacementTypes); 6465 }; 6466 bool AllCallSitesKnown; 6467 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, 6468 AllCallSitesKnown)) { 6469 LLVM_DEBUG( 6470 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " 6471 << Fn.getName() << "\n"); 6472 return indicatePessimisticFixpoint(); 6473 } 6474 6475 // Register a rewrite of the argument. 6476 Argument *Arg = getAssociatedArgument(); 6477 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { 6478 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"); 6479 return indicatePessimisticFixpoint(); 6480 } 6481 6482 unsigned ArgNo = Arg->getArgNo(); 6483 6484 // Helper to check if for the given call site the associated argument is 6485 // passed to a callback where the privatization would be different. 6486 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { 6487 SmallVector<const Use *, 4> CallbackUses; 6488 AbstractCallSite::getCallbackUses(CB, CallbackUses); 6489 for (const Use *U : CallbackUses) { 6490 AbstractCallSite CBACS(U); 6491 assert(CBACS && CBACS.isCallbackCall()); 6492 for (Argument &CBArg : CBACS.getCalledFunction()->args()) { 6493 int CBArgNo = CBACS.getCallArgOperandNo(CBArg); 6494 6495 LLVM_DEBUG({ 6496 dbgs() 6497 << "[AAPrivatizablePtr] Argument " << *Arg 6498 << "check if can be privatized in the context of its parent (" 6499 << Arg->getParent()->getName() 6500 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6501 "callback (" 6502 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6503 << ")\n[AAPrivatizablePtr] " << CBArg << " : " 6504 << CBACS.getCallArgOperand(CBArg) << " vs " 6505 << CB.getArgOperand(ArgNo) << "\n" 6506 << "[AAPrivatizablePtr] " << CBArg << " : " 6507 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; 6508 }); 6509 6510 if (CBArgNo != int(ArgNo)) 6511 continue; 6512 const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6513 *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED); 6514 if (CBArgPrivAA.isValidState()) { 6515 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); 6516 if (!CBArgPrivTy.hasValue()) 6517 continue; 6518 if (CBArgPrivTy.getValue() == PrivatizableType) 6519 continue; 6520 } 6521 6522 LLVM_DEBUG({ 6523 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6524 << " cannot be privatized in the context of its parent (" 6525 << Arg->getParent()->getName() 6526 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6527 "callback (" 6528 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6529 << ").\n[AAPrivatizablePtr] for which the argument " 6530 "privatization is not compatible.\n"; 6531 }); 6532 return false; 6533 } 6534 } 6535 return true; 6536 }; 6537 6538 // Helper to check if for the given call site the associated argument is 6539 // passed to a direct call where the privatization would be different. 6540 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { 6541 CallBase *DC = cast<CallBase>(ACS.getInstruction()); 6542 int DCArgNo = ACS.getCallArgOperandNo(ArgNo); 6543 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() && 6544 "Expected a direct call operand for callback call operand"); 6545 6546 LLVM_DEBUG({ 6547 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6548 << " check if be privatized in the context of its parent (" 6549 << Arg->getParent()->getName() 6550 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6551 "direct call of (" 6552 << DCArgNo << "@" << DC->getCalledFunction()->getName() 6553 << ").\n"; 6554 }); 6555 6556 Function *DCCallee = DC->getCalledFunction(); 6557 if (unsigned(DCArgNo) < DCCallee->arg_size()) { 6558 const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6559 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)), 6560 DepClassTy::REQUIRED); 6561 if (DCArgPrivAA.isValidState()) { 6562 auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); 6563 if (!DCArgPrivTy.hasValue()) 6564 return true; 6565 if (DCArgPrivTy.getValue() == PrivatizableType) 6566 return true; 6567 } 6568 } 6569 6570 LLVM_DEBUG({ 6571 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6572 << " cannot be privatized in the context of its parent (" 6573 << Arg->getParent()->getName() 6574 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6575 "direct call of (" 6576 << ACS.getInstruction()->getCalledFunction()->getName() 6577 << ").\n[AAPrivatizablePtr] for which the argument " 6578 "privatization is not compatible.\n"; 6579 }); 6580 return false; 6581 }; 6582 6583 // Helper to check if the associated argument is used at the given abstract 6584 // call site in a way that is incompatible with the privatization assumed 6585 // here. 6586 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { 6587 if (ACS.isDirectCall()) 6588 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); 6589 if (ACS.isCallbackCall()) 6590 return IsCompatiblePrivArgOfDirectCS(ACS); 6591 return false; 6592 }; 6593 6594 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true, 6595 AllCallSitesKnown)) 6596 return indicatePessimisticFixpoint(); 6597 6598 return ChangeStatus::UNCHANGED; 6599 } 6600 6601 /// Given a type to private \p PrivType, collect the constituates (which are 6602 /// used) in \p ReplacementTypes. 6603 static void 6604 identifyReplacementTypes(Type *PrivType, 6605 SmallVectorImpl<Type *> &ReplacementTypes) { 6606 // TODO: For now we expand the privatization type to the fullest which can 6607 // lead to dead arguments that need to be removed later. 6608 assert(PrivType && "Expected privatizable type!"); 6609 6610 // Traverse the type, extract constituate types on the outermost level. 6611 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6612 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) 6613 ReplacementTypes.push_back(PrivStructType->getElementType(u)); 6614 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6615 ReplacementTypes.append(PrivArrayType->getNumElements(), 6616 PrivArrayType->getElementType()); 6617 } else { 6618 ReplacementTypes.push_back(PrivType); 6619 } 6620 } 6621 6622 /// Initialize \p Base according to the type \p PrivType at position \p IP. 6623 /// The values needed are taken from the arguments of \p F starting at 6624 /// position \p ArgNo. 6625 static void createInitialization(Type *PrivType, Value &Base, Function &F, 6626 unsigned ArgNo, Instruction &IP) { 6627 assert(PrivType && "Expected privatizable type!"); 6628 6629 IRBuilder<NoFolder> IRB(&IP); 6630 const DataLayout &DL = F.getParent()->getDataLayout(); 6631 6632 // Traverse the type, build GEPs and stores. 6633 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6634 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6635 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6636 Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo(); 6637 Value *Ptr = 6638 constructPointer(PointeeTy, PrivType, &Base, 6639 PrivStructLayout->getElementOffset(u), IRB, DL); 6640 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6641 } 6642 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6643 Type *PointeeTy = PrivArrayType->getElementType(); 6644 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6645 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6646 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6647 Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base, 6648 u * PointeeTySize, IRB, DL); 6649 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6650 } 6651 } else { 6652 new StoreInst(F.getArg(ArgNo), &Base, &IP); 6653 } 6654 } 6655 6656 /// Extract values from \p Base according to the type \p PrivType at the 6657 /// call position \p ACS. The values are appended to \p ReplacementValues. 6658 void createReplacementValues(Align Alignment, Type *PrivType, 6659 AbstractCallSite ACS, Value *Base, 6660 SmallVectorImpl<Value *> &ReplacementValues) { 6661 assert(Base && "Expected base value!"); 6662 assert(PrivType && "Expected privatizable type!"); 6663 Instruction *IP = ACS.getInstruction(); 6664 6665 IRBuilder<NoFolder> IRB(IP); 6666 const DataLayout &DL = IP->getModule()->getDataLayout(); 6667 6668 if (Base->getType()->getPointerElementType() != PrivType) 6669 Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(), 6670 "", ACS.getInstruction()); 6671 6672 // Traverse the type, build GEPs and loads. 6673 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6674 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6675 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6676 Type *PointeeTy = PrivStructType->getElementType(u); 6677 Value *Ptr = 6678 constructPointer(PointeeTy->getPointerTo(), PrivType, Base, 6679 PrivStructLayout->getElementOffset(u), IRB, DL); 6680 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6681 L->setAlignment(Alignment); 6682 ReplacementValues.push_back(L); 6683 } 6684 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6685 Type *PointeeTy = PrivArrayType->getElementType(); 6686 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6687 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6688 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6689 Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base, 6690 u * PointeeTySize, IRB, DL); 6691 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6692 L->setAlignment(Alignment); 6693 ReplacementValues.push_back(L); 6694 } 6695 } else { 6696 LoadInst *L = new LoadInst(PrivType, Base, "", IP); 6697 L->setAlignment(Alignment); 6698 ReplacementValues.push_back(L); 6699 } 6700 } 6701 6702 /// See AbstractAttribute::manifest(...) 6703 ChangeStatus manifest(Attributor &A) override { 6704 if (!PrivatizableType.hasValue()) 6705 return ChangeStatus::UNCHANGED; 6706 assert(PrivatizableType.getValue() && "Expected privatizable type!"); 6707 6708 // Collect all tail calls in the function as we cannot allow new allocas to 6709 // escape into tail recursion. 6710 // TODO: Be smarter about new allocas escaping into tail calls. 6711 SmallVector<CallInst *, 16> TailCalls; 6712 bool UsedAssumedInformation = false; 6713 if (!A.checkForAllInstructions( 6714 [&](Instruction &I) { 6715 CallInst &CI = cast<CallInst>(I); 6716 if (CI.isTailCall()) 6717 TailCalls.push_back(&CI); 6718 return true; 6719 }, 6720 *this, {Instruction::Call}, UsedAssumedInformation)) 6721 return ChangeStatus::UNCHANGED; 6722 6723 Argument *Arg = getAssociatedArgument(); 6724 // Query AAAlign attribute for alignment of associated argument to 6725 // determine the best alignment of loads. 6726 const auto &AlignAA = 6727 A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE); 6728 6729 // Callback to repair the associated function. A new alloca is placed at the 6730 // beginning and initialized with the values passed through arguments. The 6731 // new alloca replaces the use of the old pointer argument. 6732 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = 6733 [=](const Attributor::ArgumentReplacementInfo &ARI, 6734 Function &ReplacementFn, Function::arg_iterator ArgIt) { 6735 BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); 6736 Instruction *IP = &*EntryBB.getFirstInsertionPt(); 6737 Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0, 6738 Arg->getName() + ".priv", IP); 6739 createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn, 6740 ArgIt->getArgNo(), *IP); 6741 6742 if (AI->getType() != Arg->getType()) 6743 AI = 6744 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP); 6745 Arg->replaceAllUsesWith(AI); 6746 6747 for (CallInst *CI : TailCalls) 6748 CI->setTailCall(false); 6749 }; 6750 6751 // Callback to repair a call site of the associated function. The elements 6752 // of the privatizable type are loaded prior to the call and passed to the 6753 // new function version. 6754 Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = 6755 [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI, 6756 AbstractCallSite ACS, 6757 SmallVectorImpl<Value *> &NewArgOperands) { 6758 // When no alignment is specified for the load instruction, 6759 // natural alignment is assumed. 6760 createReplacementValues( 6761 assumeAligned(AlignAA.getAssumedAlign()), 6762 PrivatizableType.getValue(), ACS, 6763 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()), 6764 NewArgOperands); 6765 }; 6766 6767 // Collect the types that will replace the privatizable type in the function 6768 // signature. 6769 SmallVector<Type *, 16> ReplacementTypes; 6770 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 6771 6772 // Register a rewrite of the argument. 6773 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes, 6774 std::move(FnRepairCB), 6775 std::move(ACSRepairCB))) 6776 return ChangeStatus::CHANGED; 6777 return ChangeStatus::UNCHANGED; 6778 } 6779 6780 /// See AbstractAttribute::trackStatistics() 6781 void trackStatistics() const override { 6782 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr); 6783 } 6784 }; 6785 6786 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { 6787 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) 6788 : AAPrivatizablePtrImpl(IRP, A) {} 6789 6790 /// See AbstractAttribute::initialize(...). 6791 virtual void initialize(Attributor &A) override { 6792 // TODO: We can privatize more than arguments. 6793 indicatePessimisticFixpoint(); 6794 } 6795 6796 ChangeStatus updateImpl(Attributor &A) override { 6797 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" 6798 "updateImpl will not be called"); 6799 } 6800 6801 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6802 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6803 Value *Obj = getUnderlyingObject(&getAssociatedValue()); 6804 if (!Obj) { 6805 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n"); 6806 return nullptr; 6807 } 6808 6809 if (auto *AI = dyn_cast<AllocaInst>(Obj)) 6810 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 6811 if (CI->isOne()) 6812 return Obj->getType()->getPointerElementType(); 6813 if (auto *Arg = dyn_cast<Argument>(Obj)) { 6814 auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>( 6815 *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED); 6816 if (PrivArgAA.isAssumedPrivatizablePtr()) 6817 return Obj->getType()->getPointerElementType(); 6818 } 6819 6820 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " 6821 "alloca nor privatizable argument: " 6822 << *Obj << "!\n"); 6823 return nullptr; 6824 } 6825 6826 /// See AbstractAttribute::trackStatistics() 6827 void trackStatistics() const override { 6828 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr); 6829 } 6830 }; 6831 6832 struct AAPrivatizablePtrCallSiteArgument final 6833 : public AAPrivatizablePtrFloating { 6834 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) 6835 : AAPrivatizablePtrFloating(IRP, A) {} 6836 6837 /// See AbstractAttribute::initialize(...). 6838 void initialize(Attributor &A) override { 6839 if (getIRPosition().hasAttr(Attribute::ByVal)) 6840 indicateOptimisticFixpoint(); 6841 } 6842 6843 /// See AbstractAttribute::updateImpl(...). 6844 ChangeStatus updateImpl(Attributor &A) override { 6845 PrivatizableType = identifyPrivatizableType(A); 6846 if (!PrivatizableType.hasValue()) 6847 return ChangeStatus::UNCHANGED; 6848 if (!PrivatizableType.getValue()) 6849 return indicatePessimisticFixpoint(); 6850 6851 const IRPosition &IRP = getIRPosition(); 6852 auto &NoCaptureAA = 6853 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED); 6854 if (!NoCaptureAA.isAssumedNoCapture()) { 6855 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n"); 6856 return indicatePessimisticFixpoint(); 6857 } 6858 6859 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED); 6860 if (!NoAliasAA.isAssumedNoAlias()) { 6861 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n"); 6862 return indicatePessimisticFixpoint(); 6863 } 6864 6865 const auto &MemBehaviorAA = 6866 A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED); 6867 if (!MemBehaviorAA.isAssumedReadOnly()) { 6868 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n"); 6869 return indicatePessimisticFixpoint(); 6870 } 6871 6872 return ChangeStatus::UNCHANGED; 6873 } 6874 6875 /// See AbstractAttribute::trackStatistics() 6876 void trackStatistics() const override { 6877 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr); 6878 } 6879 }; 6880 6881 struct AAPrivatizablePtrCallSiteReturned final 6882 : public AAPrivatizablePtrFloating { 6883 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) 6884 : AAPrivatizablePtrFloating(IRP, A) {} 6885 6886 /// See AbstractAttribute::initialize(...). 6887 void initialize(Attributor &A) override { 6888 // TODO: We can privatize more than arguments. 6889 indicatePessimisticFixpoint(); 6890 } 6891 6892 /// See AbstractAttribute::trackStatistics() 6893 void trackStatistics() const override { 6894 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr); 6895 } 6896 }; 6897 6898 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { 6899 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) 6900 : AAPrivatizablePtrFloating(IRP, A) {} 6901 6902 /// See AbstractAttribute::initialize(...). 6903 void initialize(Attributor &A) override { 6904 // TODO: We can privatize more than arguments. 6905 indicatePessimisticFixpoint(); 6906 } 6907 6908 /// See AbstractAttribute::trackStatistics() 6909 void trackStatistics() const override { 6910 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr); 6911 } 6912 }; 6913 6914 /// -------------------- Memory Behavior Attributes ---------------------------- 6915 /// Includes read-none, read-only, and write-only. 6916 /// ---------------------------------------------------------------------------- 6917 struct AAMemoryBehaviorImpl : public AAMemoryBehavior { 6918 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) 6919 : AAMemoryBehavior(IRP, A) {} 6920 6921 /// See AbstractAttribute::initialize(...). 6922 void initialize(Attributor &A) override { 6923 intersectAssumedBits(BEST_STATE); 6924 getKnownStateFromValue(getIRPosition(), getState()); 6925 AAMemoryBehavior::initialize(A); 6926 } 6927 6928 /// Return the memory behavior information encoded in the IR for \p IRP. 6929 static void getKnownStateFromValue(const IRPosition &IRP, 6930 BitIntegerState &State, 6931 bool IgnoreSubsumingPositions = false) { 6932 SmallVector<Attribute, 2> Attrs; 6933 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 6934 for (const Attribute &Attr : Attrs) { 6935 switch (Attr.getKindAsEnum()) { 6936 case Attribute::ReadNone: 6937 State.addKnownBits(NO_ACCESSES); 6938 break; 6939 case Attribute::ReadOnly: 6940 State.addKnownBits(NO_WRITES); 6941 break; 6942 case Attribute::WriteOnly: 6943 State.addKnownBits(NO_READS); 6944 break; 6945 default: 6946 llvm_unreachable("Unexpected attribute!"); 6947 } 6948 } 6949 6950 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) { 6951 if (!I->mayReadFromMemory()) 6952 State.addKnownBits(NO_READS); 6953 if (!I->mayWriteToMemory()) 6954 State.addKnownBits(NO_WRITES); 6955 } 6956 } 6957 6958 /// See AbstractAttribute::getDeducedAttributes(...). 6959 void getDeducedAttributes(LLVMContext &Ctx, 6960 SmallVectorImpl<Attribute> &Attrs) const override { 6961 assert(Attrs.size() == 0); 6962 if (isAssumedReadNone()) 6963 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 6964 else if (isAssumedReadOnly()) 6965 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly)); 6966 else if (isAssumedWriteOnly()) 6967 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly)); 6968 assert(Attrs.size() <= 1); 6969 } 6970 6971 /// See AbstractAttribute::manifest(...). 6972 ChangeStatus manifest(Attributor &A) override { 6973 if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true)) 6974 return ChangeStatus::UNCHANGED; 6975 6976 const IRPosition &IRP = getIRPosition(); 6977 6978 // Check if we would improve the existing attributes first. 6979 SmallVector<Attribute, 4> DeducedAttrs; 6980 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 6981 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 6982 return IRP.hasAttr(Attr.getKindAsEnum(), 6983 /* IgnoreSubsumingPositions */ true); 6984 })) 6985 return ChangeStatus::UNCHANGED; 6986 6987 // Clear existing attributes. 6988 IRP.removeAttrs(AttrKinds); 6989 6990 // Use the generic manifest method. 6991 return IRAttribute::manifest(A); 6992 } 6993 6994 /// See AbstractState::getAsStr(). 6995 const std::string getAsStr() const override { 6996 if (isAssumedReadNone()) 6997 return "readnone"; 6998 if (isAssumedReadOnly()) 6999 return "readonly"; 7000 if (isAssumedWriteOnly()) 7001 return "writeonly"; 7002 return "may-read/write"; 7003 } 7004 7005 /// The set of IR attributes AAMemoryBehavior deals with. 7006 static const Attribute::AttrKind AttrKinds[3]; 7007 }; 7008 7009 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { 7010 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; 7011 7012 /// Memory behavior attribute for a floating value. 7013 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { 7014 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) 7015 : AAMemoryBehaviorImpl(IRP, A) {} 7016 7017 /// See AbstractAttribute::updateImpl(...). 7018 ChangeStatus updateImpl(Attributor &A) override; 7019 7020 /// See AbstractAttribute::trackStatistics() 7021 void trackStatistics() const override { 7022 if (isAssumedReadNone()) 7023 STATS_DECLTRACK_FLOATING_ATTR(readnone) 7024 else if (isAssumedReadOnly()) 7025 STATS_DECLTRACK_FLOATING_ATTR(readonly) 7026 else if (isAssumedWriteOnly()) 7027 STATS_DECLTRACK_FLOATING_ATTR(writeonly) 7028 } 7029 7030 private: 7031 /// Return true if users of \p UserI might access the underlying 7032 /// variable/location described by \p U and should therefore be analyzed. 7033 bool followUsersOfUseIn(Attributor &A, const Use &U, 7034 const Instruction *UserI); 7035 7036 /// Update the state according to the effect of use \p U in \p UserI. 7037 void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI); 7038 }; 7039 7040 /// Memory behavior attribute for function argument. 7041 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { 7042 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) 7043 : AAMemoryBehaviorFloating(IRP, A) {} 7044 7045 /// See AbstractAttribute::initialize(...). 7046 void initialize(Attributor &A) override { 7047 intersectAssumedBits(BEST_STATE); 7048 const IRPosition &IRP = getIRPosition(); 7049 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we 7050 // can query it when we use has/getAttr. That would allow us to reuse the 7051 // initialize of the base class here. 7052 bool HasByVal = 7053 IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true); 7054 getKnownStateFromValue(IRP, getState(), 7055 /* IgnoreSubsumingPositions */ HasByVal); 7056 7057 // Initialize the use vector with all direct uses of the associated value. 7058 Argument *Arg = getAssociatedArgument(); 7059 if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) 7060 indicatePessimisticFixpoint(); 7061 } 7062 7063 ChangeStatus manifest(Attributor &A) override { 7064 // TODO: Pointer arguments are not supported on vectors of pointers yet. 7065 if (!getAssociatedValue().getType()->isPointerTy()) 7066 return ChangeStatus::UNCHANGED; 7067 7068 // TODO: From readattrs.ll: "inalloca parameters are always 7069 // considered written" 7070 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) { 7071 removeKnownBits(NO_WRITES); 7072 removeAssumedBits(NO_WRITES); 7073 } 7074 return AAMemoryBehaviorFloating::manifest(A); 7075 } 7076 7077 /// See AbstractAttribute::trackStatistics() 7078 void trackStatistics() const override { 7079 if (isAssumedReadNone()) 7080 STATS_DECLTRACK_ARG_ATTR(readnone) 7081 else if (isAssumedReadOnly()) 7082 STATS_DECLTRACK_ARG_ATTR(readonly) 7083 else if (isAssumedWriteOnly()) 7084 STATS_DECLTRACK_ARG_ATTR(writeonly) 7085 } 7086 }; 7087 7088 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { 7089 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) 7090 : AAMemoryBehaviorArgument(IRP, A) {} 7091 7092 /// See AbstractAttribute::initialize(...). 7093 void initialize(Attributor &A) override { 7094 // If we don't have an associated attribute this is either a variadic call 7095 // or an indirect call, either way, nothing to do here. 7096 Argument *Arg = getAssociatedArgument(); 7097 if (!Arg) { 7098 indicatePessimisticFixpoint(); 7099 return; 7100 } 7101 if (Arg->hasByValAttr()) { 7102 addKnownBits(NO_WRITES); 7103 removeKnownBits(NO_READS); 7104 removeAssumedBits(NO_READS); 7105 } 7106 AAMemoryBehaviorArgument::initialize(A); 7107 if (getAssociatedFunction()->isDeclaration()) 7108 indicatePessimisticFixpoint(); 7109 } 7110 7111 /// See AbstractAttribute::updateImpl(...). 7112 ChangeStatus updateImpl(Attributor &A) override { 7113 // TODO: Once we have call site specific value information we can provide 7114 // call site specific liveness liveness information and then it makes 7115 // sense to specialize attributes for call sites arguments instead of 7116 // redirecting requests to the callee argument. 7117 Argument *Arg = getAssociatedArgument(); 7118 const IRPosition &ArgPos = IRPosition::argument(*Arg); 7119 auto &ArgAA = 7120 A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED); 7121 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 7122 } 7123 7124 /// See AbstractAttribute::trackStatistics() 7125 void trackStatistics() const override { 7126 if (isAssumedReadNone()) 7127 STATS_DECLTRACK_CSARG_ATTR(readnone) 7128 else if (isAssumedReadOnly()) 7129 STATS_DECLTRACK_CSARG_ATTR(readonly) 7130 else if (isAssumedWriteOnly()) 7131 STATS_DECLTRACK_CSARG_ATTR(writeonly) 7132 } 7133 }; 7134 7135 /// Memory behavior attribute for a call site return position. 7136 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { 7137 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) 7138 : AAMemoryBehaviorFloating(IRP, A) {} 7139 7140 /// See AbstractAttribute::initialize(...). 7141 void initialize(Attributor &A) override { 7142 AAMemoryBehaviorImpl::initialize(A); 7143 Function *F = getAssociatedFunction(); 7144 if (!F || F->isDeclaration()) 7145 indicatePessimisticFixpoint(); 7146 } 7147 7148 /// See AbstractAttribute::manifest(...). 7149 ChangeStatus manifest(Attributor &A) override { 7150 // We do not annotate returned values. 7151 return ChangeStatus::UNCHANGED; 7152 } 7153 7154 /// See AbstractAttribute::trackStatistics() 7155 void trackStatistics() const override {} 7156 }; 7157 7158 /// An AA to represent the memory behavior function attributes. 7159 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { 7160 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) 7161 : AAMemoryBehaviorImpl(IRP, A) {} 7162 7163 /// See AbstractAttribute::updateImpl(Attributor &A). 7164 virtual ChangeStatus updateImpl(Attributor &A) override; 7165 7166 /// See AbstractAttribute::manifest(...). 7167 ChangeStatus manifest(Attributor &A) override { 7168 Function &F = cast<Function>(getAnchorValue()); 7169 if (isAssumedReadNone()) { 7170 F.removeFnAttr(Attribute::ArgMemOnly); 7171 F.removeFnAttr(Attribute::InaccessibleMemOnly); 7172 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); 7173 } 7174 return AAMemoryBehaviorImpl::manifest(A); 7175 } 7176 7177 /// See AbstractAttribute::trackStatistics() 7178 void trackStatistics() const override { 7179 if (isAssumedReadNone()) 7180 STATS_DECLTRACK_FN_ATTR(readnone) 7181 else if (isAssumedReadOnly()) 7182 STATS_DECLTRACK_FN_ATTR(readonly) 7183 else if (isAssumedWriteOnly()) 7184 STATS_DECLTRACK_FN_ATTR(writeonly) 7185 } 7186 }; 7187 7188 /// AAMemoryBehavior attribute for call sites. 7189 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl { 7190 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) 7191 : AAMemoryBehaviorImpl(IRP, A) {} 7192 7193 /// See AbstractAttribute::initialize(...). 7194 void initialize(Attributor &A) override { 7195 AAMemoryBehaviorImpl::initialize(A); 7196 Function *F = getAssociatedFunction(); 7197 if (!F || F->isDeclaration()) 7198 indicatePessimisticFixpoint(); 7199 } 7200 7201 /// See AbstractAttribute::updateImpl(...). 7202 ChangeStatus updateImpl(Attributor &A) override { 7203 // TODO: Once we have call site specific value information we can provide 7204 // call site specific liveness liveness information and then it makes 7205 // sense to specialize attributes for call sites arguments instead of 7206 // redirecting requests to the callee argument. 7207 Function *F = getAssociatedFunction(); 7208 const IRPosition &FnPos = IRPosition::function(*F); 7209 auto &FnAA = 7210 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED); 7211 return clampStateAndIndicateChange(getState(), FnAA.getState()); 7212 } 7213 7214 /// See AbstractAttribute::trackStatistics() 7215 void trackStatistics() const override { 7216 if (isAssumedReadNone()) 7217 STATS_DECLTRACK_CS_ATTR(readnone) 7218 else if (isAssumedReadOnly()) 7219 STATS_DECLTRACK_CS_ATTR(readonly) 7220 else if (isAssumedWriteOnly()) 7221 STATS_DECLTRACK_CS_ATTR(writeonly) 7222 } 7223 }; 7224 7225 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { 7226 7227 // The current assumed state used to determine a change. 7228 auto AssumedState = getAssumed(); 7229 7230 auto CheckRWInst = [&](Instruction &I) { 7231 // If the instruction has an own memory behavior state, use it to restrict 7232 // the local state. No further analysis is required as the other memory 7233 // state is as optimistic as it gets. 7234 if (const auto *CB = dyn_cast<CallBase>(&I)) { 7235 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 7236 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 7237 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7238 return !isAtFixpoint(); 7239 } 7240 7241 // Remove access kind modifiers if necessary. 7242 if (I.mayReadFromMemory()) 7243 removeAssumedBits(NO_READS); 7244 if (I.mayWriteToMemory()) 7245 removeAssumedBits(NO_WRITES); 7246 return !isAtFixpoint(); 7247 }; 7248 7249 bool UsedAssumedInformation = false; 7250 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 7251 UsedAssumedInformation)) 7252 return indicatePessimisticFixpoint(); 7253 7254 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7255 : ChangeStatus::UNCHANGED; 7256 } 7257 7258 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { 7259 7260 const IRPosition &IRP = getIRPosition(); 7261 const IRPosition &FnPos = IRPosition::function_scope(IRP); 7262 AAMemoryBehavior::StateType &S = getState(); 7263 7264 // First, check the function scope. We take the known information and we avoid 7265 // work if the assumed information implies the current assumed information for 7266 // this attribute. This is a valid for all but byval arguments. 7267 Argument *Arg = IRP.getAssociatedArgument(); 7268 AAMemoryBehavior::base_t FnMemAssumedState = 7269 AAMemoryBehavior::StateType::getWorstState(); 7270 if (!Arg || !Arg->hasByValAttr()) { 7271 const auto &FnMemAA = 7272 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL); 7273 FnMemAssumedState = FnMemAA.getAssumed(); 7274 S.addKnownBits(FnMemAA.getKnown()); 7275 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed()) 7276 return ChangeStatus::UNCHANGED; 7277 } 7278 7279 // The current assumed state used to determine a change. 7280 auto AssumedState = S.getAssumed(); 7281 7282 // Make sure the value is not captured (except through "return"), if 7283 // it is, any information derived would be irrelevant anyway as we cannot 7284 // check the potential aliases introduced by the capture. However, no need 7285 // to fall back to anythign less optimistic than the function state. 7286 const auto &ArgNoCaptureAA = 7287 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL); 7288 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 7289 S.intersectAssumedBits(FnMemAssumedState); 7290 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7291 : ChangeStatus::UNCHANGED; 7292 } 7293 7294 // Visit and expand uses until all are analyzed or a fixpoint is reached. 7295 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 7296 Instruction *UserI = cast<Instruction>(U.getUser()); 7297 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI 7298 << " \n"); 7299 7300 // Droppable users, e.g., llvm::assume does not actually perform any action. 7301 if (UserI->isDroppable()) 7302 return true; 7303 7304 // Check if the users of UserI should also be visited. 7305 Follow = followUsersOfUseIn(A, U, UserI); 7306 7307 // If UserI might touch memory we analyze the use in detail. 7308 if (UserI->mayReadOrWriteMemory()) 7309 analyzeUseIn(A, U, UserI); 7310 7311 return !isAtFixpoint(); 7312 }; 7313 7314 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) 7315 return indicatePessimisticFixpoint(); 7316 7317 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7318 : ChangeStatus::UNCHANGED; 7319 } 7320 7321 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U, 7322 const Instruction *UserI) { 7323 // The loaded value is unrelated to the pointer argument, no need to 7324 // follow the users of the load. 7325 if (isa<LoadInst>(UserI)) 7326 return false; 7327 7328 // By default we follow all uses assuming UserI might leak information on U, 7329 // we have special handling for call sites operands though. 7330 const auto *CB = dyn_cast<CallBase>(UserI); 7331 if (!CB || !CB->isArgOperand(&U)) 7332 return true; 7333 7334 // If the use is a call argument known not to be captured, the users of 7335 // the call do not need to be visited because they have to be unrelated to 7336 // the input. Note that this check is not trivial even though we disallow 7337 // general capturing of the underlying argument. The reason is that the 7338 // call might the argument "through return", which we allow and for which we 7339 // need to check call users. 7340 if (U.get()->getType()->isPointerTy()) { 7341 unsigned ArgNo = CB->getArgOperandNo(&U); 7342 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 7343 *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL); 7344 return !ArgNoCaptureAA.isAssumedNoCapture(); 7345 } 7346 7347 return true; 7348 } 7349 7350 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U, 7351 const Instruction *UserI) { 7352 assert(UserI->mayReadOrWriteMemory()); 7353 7354 switch (UserI->getOpcode()) { 7355 default: 7356 // TODO: Handle all atomics and other side-effect operations we know of. 7357 break; 7358 case Instruction::Load: 7359 // Loads cause the NO_READS property to disappear. 7360 removeAssumedBits(NO_READS); 7361 return; 7362 7363 case Instruction::Store: 7364 // Stores cause the NO_WRITES property to disappear if the use is the 7365 // pointer operand. Note that while capturing was taken care of somewhere 7366 // else we need to deal with stores of the value that is not looked through. 7367 if (cast<StoreInst>(UserI)->getPointerOperand() == U.get()) 7368 removeAssumedBits(NO_WRITES); 7369 else 7370 indicatePessimisticFixpoint(); 7371 return; 7372 7373 case Instruction::Call: 7374 case Instruction::CallBr: 7375 case Instruction::Invoke: { 7376 // For call sites we look at the argument memory behavior attribute (this 7377 // could be recursive!) in order to restrict our own state. 7378 const auto *CB = cast<CallBase>(UserI); 7379 7380 // Give up on operand bundles. 7381 if (CB->isBundleOperand(&U)) { 7382 indicatePessimisticFixpoint(); 7383 return; 7384 } 7385 7386 // Calling a function does read the function pointer, maybe write it if the 7387 // function is self-modifying. 7388 if (CB->isCallee(&U)) { 7389 removeAssumedBits(NO_READS); 7390 break; 7391 } 7392 7393 // Adjust the possible access behavior based on the information on the 7394 // argument. 7395 IRPosition Pos; 7396 if (U.get()->getType()->isPointerTy()) 7397 Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)); 7398 else 7399 Pos = IRPosition::callsite_function(*CB); 7400 const auto &MemBehaviorAA = 7401 A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL); 7402 // "assumed" has at most the same bits as the MemBehaviorAA assumed 7403 // and at least "known". 7404 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7405 return; 7406 } 7407 }; 7408 7409 // Generally, look at the "may-properties" and adjust the assumed state if we 7410 // did not trigger special handling before. 7411 if (UserI->mayReadFromMemory()) 7412 removeAssumedBits(NO_READS); 7413 if (UserI->mayWriteToMemory()) 7414 removeAssumedBits(NO_WRITES); 7415 } 7416 } // namespace 7417 7418 /// -------------------- Memory Locations Attributes --------------------------- 7419 /// Includes read-none, argmemonly, inaccessiblememonly, 7420 /// inaccessiblememorargmemonly 7421 /// ---------------------------------------------------------------------------- 7422 7423 std::string AAMemoryLocation::getMemoryLocationsAsStr( 7424 AAMemoryLocation::MemoryLocationsKind MLK) { 7425 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) 7426 return "all memory"; 7427 if (MLK == AAMemoryLocation::NO_LOCATIONS) 7428 return "no memory"; 7429 std::string S = "memory:"; 7430 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) 7431 S += "stack,"; 7432 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) 7433 S += "constant,"; 7434 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) 7435 S += "internal global,"; 7436 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) 7437 S += "external global,"; 7438 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) 7439 S += "argument,"; 7440 if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) 7441 S += "inaccessible,"; 7442 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) 7443 S += "malloced,"; 7444 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) 7445 S += "unknown,"; 7446 S.pop_back(); 7447 return S; 7448 } 7449 7450 namespace { 7451 struct AAMemoryLocationImpl : public AAMemoryLocation { 7452 7453 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) 7454 : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { 7455 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7456 AccessKind2Accesses[u] = nullptr; 7457 } 7458 7459 ~AAMemoryLocationImpl() { 7460 // The AccessSets are allocated via a BumpPtrAllocator, we call 7461 // the destructor manually. 7462 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7463 if (AccessKind2Accesses[u]) 7464 AccessKind2Accesses[u]->~AccessSet(); 7465 } 7466 7467 /// See AbstractAttribute::initialize(...). 7468 void initialize(Attributor &A) override { 7469 intersectAssumedBits(BEST_STATE); 7470 getKnownStateFromValue(A, getIRPosition(), getState()); 7471 AAMemoryLocation::initialize(A); 7472 } 7473 7474 /// Return the memory behavior information encoded in the IR for \p IRP. 7475 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, 7476 BitIntegerState &State, 7477 bool IgnoreSubsumingPositions = false) { 7478 // For internal functions we ignore `argmemonly` and 7479 // `inaccessiblememorargmemonly` as we might break it via interprocedural 7480 // constant propagation. It is unclear if this is the best way but it is 7481 // unlikely this will cause real performance problems. If we are deriving 7482 // attributes for the anchor function we even remove the attribute in 7483 // addition to ignoring it. 7484 bool UseArgMemOnly = true; 7485 Function *AnchorFn = IRP.getAnchorScope(); 7486 if (AnchorFn && A.isRunOn(*AnchorFn)) 7487 UseArgMemOnly = !AnchorFn->hasLocalLinkage(); 7488 7489 SmallVector<Attribute, 2> Attrs; 7490 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 7491 for (const Attribute &Attr : Attrs) { 7492 switch (Attr.getKindAsEnum()) { 7493 case Attribute::ReadNone: 7494 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM); 7495 break; 7496 case Attribute::InaccessibleMemOnly: 7497 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 7498 break; 7499 case Attribute::ArgMemOnly: 7500 if (UseArgMemOnly) 7501 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true)); 7502 else 7503 IRP.removeAttrs({Attribute::ArgMemOnly}); 7504 break; 7505 case Attribute::InaccessibleMemOrArgMemOnly: 7506 if (UseArgMemOnly) 7507 State.addKnownBits(inverseLocation( 7508 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 7509 else 7510 IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly}); 7511 break; 7512 default: 7513 llvm_unreachable("Unexpected attribute!"); 7514 } 7515 } 7516 } 7517 7518 /// See AbstractAttribute::getDeducedAttributes(...). 7519 void getDeducedAttributes(LLVMContext &Ctx, 7520 SmallVectorImpl<Attribute> &Attrs) const override { 7521 assert(Attrs.size() == 0); 7522 if (isAssumedReadNone()) { 7523 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 7524 } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { 7525 if (isAssumedInaccessibleMemOnly()) 7526 Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly)); 7527 else if (isAssumedArgMemOnly()) 7528 Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly)); 7529 else if (isAssumedInaccessibleOrArgMemOnly()) 7530 Attrs.push_back( 7531 Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly)); 7532 } 7533 assert(Attrs.size() <= 1); 7534 } 7535 7536 /// See AbstractAttribute::manifest(...). 7537 ChangeStatus manifest(Attributor &A) override { 7538 const IRPosition &IRP = getIRPosition(); 7539 7540 // Check if we would improve the existing attributes first. 7541 SmallVector<Attribute, 4> DeducedAttrs; 7542 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 7543 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 7544 return IRP.hasAttr(Attr.getKindAsEnum(), 7545 /* IgnoreSubsumingPositions */ true); 7546 })) 7547 return ChangeStatus::UNCHANGED; 7548 7549 // Clear existing attributes. 7550 IRP.removeAttrs(AttrKinds); 7551 if (isAssumedReadNone()) 7552 IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds); 7553 7554 // Use the generic manifest method. 7555 return IRAttribute::manifest(A); 7556 } 7557 7558 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). 7559 bool checkForAllAccessesToMemoryKind( 7560 function_ref<bool(const Instruction *, const Value *, AccessKind, 7561 MemoryLocationsKind)> 7562 Pred, 7563 MemoryLocationsKind RequestedMLK) const override { 7564 if (!isValidState()) 7565 return false; 7566 7567 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); 7568 if (AssumedMLK == NO_LOCATIONS) 7569 return true; 7570 7571 unsigned Idx = 0; 7572 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; 7573 CurMLK *= 2, ++Idx) { 7574 if (CurMLK & RequestedMLK) 7575 continue; 7576 7577 if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) 7578 for (const AccessInfo &AI : *Accesses) 7579 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) 7580 return false; 7581 } 7582 7583 return true; 7584 } 7585 7586 ChangeStatus indicatePessimisticFixpoint() override { 7587 // If we give up and indicate a pessimistic fixpoint this instruction will 7588 // become an access for all potential access kinds: 7589 // TODO: Add pointers for argmemonly and globals to improve the results of 7590 // checkForAllAccessesToMemoryKind. 7591 bool Changed = false; 7592 MemoryLocationsKind KnownMLK = getKnown(); 7593 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 7594 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) 7595 if (!(CurMLK & KnownMLK)) 7596 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed, 7597 getAccessKindFromInst(I)); 7598 return AAMemoryLocation::indicatePessimisticFixpoint(); 7599 } 7600 7601 protected: 7602 /// Helper struct to tie together an instruction that has a read or write 7603 /// effect with the pointer it accesses (if any). 7604 struct AccessInfo { 7605 7606 /// The instruction that caused the access. 7607 const Instruction *I; 7608 7609 /// The base pointer that is accessed, or null if unknown. 7610 const Value *Ptr; 7611 7612 /// The kind of access (read/write/read+write). 7613 AccessKind Kind; 7614 7615 bool operator==(const AccessInfo &RHS) const { 7616 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; 7617 } 7618 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { 7619 if (LHS.I != RHS.I) 7620 return LHS.I < RHS.I; 7621 if (LHS.Ptr != RHS.Ptr) 7622 return LHS.Ptr < RHS.Ptr; 7623 if (LHS.Kind != RHS.Kind) 7624 return LHS.Kind < RHS.Kind; 7625 return false; 7626 } 7627 }; 7628 7629 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the 7630 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. 7631 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; 7632 AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()]; 7633 7634 /// Categorize the pointer arguments of CB that might access memory in 7635 /// AccessedLoc and update the state and access map accordingly. 7636 void 7637 categorizeArgumentPointerLocations(Attributor &A, CallBase &CB, 7638 AAMemoryLocation::StateType &AccessedLocs, 7639 bool &Changed); 7640 7641 /// Return the kind(s) of location that may be accessed by \p V. 7642 AAMemoryLocation::MemoryLocationsKind 7643 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); 7644 7645 /// Return the access kind as determined by \p I. 7646 AccessKind getAccessKindFromInst(const Instruction *I) { 7647 AccessKind AK = READ_WRITE; 7648 if (I) { 7649 AK = I->mayReadFromMemory() ? READ : NONE; 7650 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); 7651 } 7652 return AK; 7653 } 7654 7655 /// Update the state \p State and the AccessKind2Accesses given that \p I is 7656 /// an access of kind \p AK to a \p MLK memory location with the access 7657 /// pointer \p Ptr. 7658 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, 7659 MemoryLocationsKind MLK, const Instruction *I, 7660 const Value *Ptr, bool &Changed, 7661 AccessKind AK = READ_WRITE) { 7662 7663 assert(isPowerOf2_32(MLK) && "Expected a single location set!"); 7664 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)]; 7665 if (!Accesses) 7666 Accesses = new (Allocator) AccessSet(); 7667 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second; 7668 State.removeAssumedBits(MLK); 7669 } 7670 7671 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or 7672 /// arguments, and update the state and access map accordingly. 7673 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, 7674 AAMemoryLocation::StateType &State, bool &Changed); 7675 7676 /// Used to allocate access sets. 7677 BumpPtrAllocator &Allocator; 7678 7679 /// The set of IR attributes AAMemoryLocation deals with. 7680 static const Attribute::AttrKind AttrKinds[4]; 7681 }; 7682 7683 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = { 7684 Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly, 7685 Attribute::InaccessibleMemOrArgMemOnly}; 7686 7687 void AAMemoryLocationImpl::categorizePtrValue( 7688 Attributor &A, const Instruction &I, const Value &Ptr, 7689 AAMemoryLocation::StateType &State, bool &Changed) { 7690 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for " 7691 << Ptr << " [" 7692 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n"); 7693 7694 SmallVector<Value *, 8> Objects; 7695 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I)) { 7696 LLVM_DEBUG( 7697 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"); 7698 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed, 7699 getAccessKindFromInst(&I)); 7700 return; 7701 } 7702 7703 for (Value *Obj : Objects) { 7704 // TODO: recognize the TBAA used for constant accesses. 7705 MemoryLocationsKind MLK = NO_LOCATIONS; 7706 if (isa<UndefValue>(Obj)) 7707 continue; 7708 if (isa<Argument>(Obj)) { 7709 // TODO: For now we do not treat byval arguments as local copies performed 7710 // on the call edge, though, we should. To make that happen we need to 7711 // teach various passes, e.g., DSE, about the copy effect of a byval. That 7712 // would also allow us to mark functions only accessing byval arguments as 7713 // readnone again, atguably their acceses have no effect outside of the 7714 // function, like accesses to allocas. 7715 MLK = NO_ARGUMENT_MEM; 7716 } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) { 7717 // Reading constant memory is not treated as a read "effect" by the 7718 // function attr pass so we won't neither. Constants defined by TBAA are 7719 // similar. (We know we do not write it because it is constant.) 7720 if (auto *GVar = dyn_cast<GlobalVariable>(GV)) 7721 if (GVar->isConstant()) 7722 continue; 7723 7724 if (GV->hasLocalLinkage()) 7725 MLK = NO_GLOBAL_INTERNAL_MEM; 7726 else 7727 MLK = NO_GLOBAL_EXTERNAL_MEM; 7728 } else if (isa<ConstantPointerNull>(Obj) && 7729 !NullPointerIsDefined(getAssociatedFunction(), 7730 Ptr.getType()->getPointerAddressSpace())) { 7731 continue; 7732 } else if (isa<AllocaInst>(Obj)) { 7733 MLK = NO_LOCAL_MEM; 7734 } else if (const auto *CB = dyn_cast<CallBase>(Obj)) { 7735 const auto &NoAliasAA = A.getAAFor<AANoAlias>( 7736 *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL); 7737 if (NoAliasAA.isAssumedNoAlias()) 7738 MLK = NO_MALLOCED_MEM; 7739 else 7740 MLK = NO_UNKOWN_MEM; 7741 } else { 7742 MLK = NO_UNKOWN_MEM; 7743 } 7744 7745 assert(MLK != NO_LOCATIONS && "No location specified!"); 7746 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: " 7747 << *Obj << " -> " << getMemoryLocationsAsStr(MLK) 7748 << "\n"); 7749 updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed, 7750 getAccessKindFromInst(&I)); 7751 } 7752 7753 LLVM_DEBUG( 7754 dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: " 7755 << getMemoryLocationsAsStr(State.getAssumed()) << "\n"); 7756 } 7757 7758 void AAMemoryLocationImpl::categorizeArgumentPointerLocations( 7759 Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs, 7760 bool &Changed) { 7761 for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) { 7762 7763 // Skip non-pointer arguments. 7764 const Value *ArgOp = CB.getArgOperand(ArgNo); 7765 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 7766 continue; 7767 7768 // Skip readnone arguments. 7769 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo); 7770 const auto &ArgOpMemLocationAA = 7771 A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL); 7772 7773 if (ArgOpMemLocationAA.isAssumedReadNone()) 7774 continue; 7775 7776 // Categorize potentially accessed pointer arguments as if there was an 7777 // access instruction with them as pointer. 7778 categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed); 7779 } 7780 } 7781 7782 AAMemoryLocation::MemoryLocationsKind 7783 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, 7784 bool &Changed) { 7785 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for " 7786 << I << "\n"); 7787 7788 AAMemoryLocation::StateType AccessedLocs; 7789 AccessedLocs.intersectAssumedBits(NO_LOCATIONS); 7790 7791 if (auto *CB = dyn_cast<CallBase>(&I)) { 7792 7793 // First check if we assume any memory is access is visible. 7794 const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>( 7795 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 7796 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I 7797 << " [" << CBMemLocationAA << "]\n"); 7798 7799 if (CBMemLocationAA.isAssumedReadNone()) 7800 return NO_LOCATIONS; 7801 7802 if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) { 7803 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr, 7804 Changed, getAccessKindFromInst(&I)); 7805 return AccessedLocs.getAssumed(); 7806 } 7807 7808 uint32_t CBAssumedNotAccessedLocs = 7809 CBMemLocationAA.getAssumedNotAccessedLocation(); 7810 7811 // Set the argmemonly and global bit as we handle them separately below. 7812 uint32_t CBAssumedNotAccessedLocsNoArgMem = 7813 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; 7814 7815 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { 7816 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) 7817 continue; 7818 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed, 7819 getAccessKindFromInst(&I)); 7820 } 7821 7822 // Now handle global memory if it might be accessed. This is slightly tricky 7823 // as NO_GLOBAL_MEM has multiple bits set. 7824 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); 7825 if (HasGlobalAccesses) { 7826 auto AccessPred = [&](const Instruction *, const Value *Ptr, 7827 AccessKind Kind, MemoryLocationsKind MLK) { 7828 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed, 7829 getAccessKindFromInst(&I)); 7830 return true; 7831 }; 7832 if (!CBMemLocationAA.checkForAllAccessesToMemoryKind( 7833 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false))) 7834 return AccessedLocs.getWorstState(); 7835 } 7836 7837 LLVM_DEBUG( 7838 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " 7839 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 7840 7841 // Now handle argument memory if it might be accessed. 7842 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); 7843 if (HasArgAccesses) 7844 categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed); 7845 7846 LLVM_DEBUG( 7847 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " 7848 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 7849 7850 return AccessedLocs.getAssumed(); 7851 } 7852 7853 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) { 7854 LLVM_DEBUG( 7855 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " 7856 << I << " [" << *Ptr << "]\n"); 7857 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed); 7858 return AccessedLocs.getAssumed(); 7859 } 7860 7861 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " 7862 << I << "\n"); 7863 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed, 7864 getAccessKindFromInst(&I)); 7865 return AccessedLocs.getAssumed(); 7866 } 7867 7868 /// An AA to represent the memory behavior function attributes. 7869 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { 7870 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) 7871 : AAMemoryLocationImpl(IRP, A) {} 7872 7873 /// See AbstractAttribute::updateImpl(Attributor &A). 7874 virtual ChangeStatus updateImpl(Attributor &A) override { 7875 7876 const auto &MemBehaviorAA = 7877 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 7878 if (MemBehaviorAA.isAssumedReadNone()) { 7879 if (MemBehaviorAA.isKnownReadNone()) 7880 return indicateOptimisticFixpoint(); 7881 assert(isAssumedReadNone() && 7882 "AAMemoryLocation was not read-none but AAMemoryBehavior was!"); 7883 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 7884 return ChangeStatus::UNCHANGED; 7885 } 7886 7887 // The current assumed state used to determine a change. 7888 auto AssumedState = getAssumed(); 7889 bool Changed = false; 7890 7891 auto CheckRWInst = [&](Instruction &I) { 7892 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); 7893 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I 7894 << ": " << getMemoryLocationsAsStr(MLK) << "\n"); 7895 removeAssumedBits(inverseLocation(MLK, false, false)); 7896 // Stop once only the valid bit set in the *not assumed location*, thus 7897 // once we don't actually exclude any memory locations in the state. 7898 return getAssumedNotAccessedLocation() != VALID_STATE; 7899 }; 7900 7901 bool UsedAssumedInformation = false; 7902 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 7903 UsedAssumedInformation)) 7904 return indicatePessimisticFixpoint(); 7905 7906 Changed |= AssumedState != getAssumed(); 7907 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 7908 } 7909 7910 /// See AbstractAttribute::trackStatistics() 7911 void trackStatistics() const override { 7912 if (isAssumedReadNone()) 7913 STATS_DECLTRACK_FN_ATTR(readnone) 7914 else if (isAssumedArgMemOnly()) 7915 STATS_DECLTRACK_FN_ATTR(argmemonly) 7916 else if (isAssumedInaccessibleMemOnly()) 7917 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly) 7918 else if (isAssumedInaccessibleOrArgMemOnly()) 7919 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly) 7920 } 7921 }; 7922 7923 /// AAMemoryLocation attribute for call sites. 7924 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { 7925 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) 7926 : AAMemoryLocationImpl(IRP, A) {} 7927 7928 /// See AbstractAttribute::initialize(...). 7929 void initialize(Attributor &A) override { 7930 AAMemoryLocationImpl::initialize(A); 7931 Function *F = getAssociatedFunction(); 7932 if (!F || F->isDeclaration()) 7933 indicatePessimisticFixpoint(); 7934 } 7935 7936 /// See AbstractAttribute::updateImpl(...). 7937 ChangeStatus updateImpl(Attributor &A) override { 7938 // TODO: Once we have call site specific value information we can provide 7939 // call site specific liveness liveness information and then it makes 7940 // sense to specialize attributes for call sites arguments instead of 7941 // redirecting requests to the callee argument. 7942 Function *F = getAssociatedFunction(); 7943 const IRPosition &FnPos = IRPosition::function(*F); 7944 auto &FnAA = 7945 A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED); 7946 bool Changed = false; 7947 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 7948 AccessKind Kind, MemoryLocationsKind MLK) { 7949 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed, 7950 getAccessKindFromInst(I)); 7951 return true; 7952 }; 7953 if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS)) 7954 return indicatePessimisticFixpoint(); 7955 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 7956 } 7957 7958 /// See AbstractAttribute::trackStatistics() 7959 void trackStatistics() const override { 7960 if (isAssumedReadNone()) 7961 STATS_DECLTRACK_CS_ATTR(readnone) 7962 } 7963 }; 7964 7965 /// ------------------ Value Constant Range Attribute ------------------------- 7966 7967 struct AAValueConstantRangeImpl : AAValueConstantRange { 7968 using StateType = IntegerRangeState; 7969 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) 7970 : AAValueConstantRange(IRP, A) {} 7971 7972 /// See AbstractAttribute::initialize(..). 7973 void initialize(Attributor &A) override { 7974 if (A.hasSimplificationCallback(getIRPosition())) { 7975 indicatePessimisticFixpoint(); 7976 return; 7977 } 7978 7979 // Intersect a range given by SCEV. 7980 intersectKnown(getConstantRangeFromSCEV(A, getCtxI())); 7981 7982 // Intersect a range given by LVI. 7983 intersectKnown(getConstantRangeFromLVI(A, getCtxI())); 7984 } 7985 7986 /// See AbstractAttribute::getAsStr(). 7987 const std::string getAsStr() const override { 7988 std::string Str; 7989 llvm::raw_string_ostream OS(Str); 7990 OS << "range(" << getBitWidth() << ")<"; 7991 getKnown().print(OS); 7992 OS << " / "; 7993 getAssumed().print(OS); 7994 OS << ">"; 7995 return OS.str(); 7996 } 7997 7998 /// Helper function to get a SCEV expr for the associated value at program 7999 /// point \p I. 8000 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { 8001 if (!getAnchorScope()) 8002 return nullptr; 8003 8004 ScalarEvolution *SE = 8005 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 8006 *getAnchorScope()); 8007 8008 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( 8009 *getAnchorScope()); 8010 8011 if (!SE || !LI) 8012 return nullptr; 8013 8014 const SCEV *S = SE->getSCEV(&getAssociatedValue()); 8015 if (!I) 8016 return S; 8017 8018 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent())); 8019 } 8020 8021 /// Helper function to get a range from SCEV for the associated value at 8022 /// program point \p I. 8023 ConstantRange getConstantRangeFromSCEV(Attributor &A, 8024 const Instruction *I = nullptr) const { 8025 if (!getAnchorScope()) 8026 return getWorstState(getBitWidth()); 8027 8028 ScalarEvolution *SE = 8029 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 8030 *getAnchorScope()); 8031 8032 const SCEV *S = getSCEV(A, I); 8033 if (!SE || !S) 8034 return getWorstState(getBitWidth()); 8035 8036 return SE->getUnsignedRange(S); 8037 } 8038 8039 /// Helper function to get a range from LVI for the associated value at 8040 /// program point \p I. 8041 ConstantRange 8042 getConstantRangeFromLVI(Attributor &A, 8043 const Instruction *CtxI = nullptr) const { 8044 if (!getAnchorScope()) 8045 return getWorstState(getBitWidth()); 8046 8047 LazyValueInfo *LVI = 8048 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( 8049 *getAnchorScope()); 8050 8051 if (!LVI || !CtxI) 8052 return getWorstState(getBitWidth()); 8053 return LVI->getConstantRange(&getAssociatedValue(), 8054 const_cast<Instruction *>(CtxI)); 8055 } 8056 8057 /// Return true if \p CtxI is valid for querying outside analyses. 8058 /// This basically makes sure we do not ask intra-procedural analysis 8059 /// about a context in the wrong function or a context that violates 8060 /// dominance assumptions they might have. The \p AllowAACtxI flag indicates 8061 /// if the original context of this AA is OK or should be considered invalid. 8062 bool isValidCtxInstructionForOutsideAnalysis(Attributor &A, 8063 const Instruction *CtxI, 8064 bool AllowAACtxI) const { 8065 if (!CtxI || (!AllowAACtxI && CtxI == getCtxI())) 8066 return false; 8067 8068 // Our context might be in a different function, neither intra-procedural 8069 // analysis (ScalarEvolution nor LazyValueInfo) can handle that. 8070 if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction())) 8071 return false; 8072 8073 // If the context is not dominated by the value there are paths to the 8074 // context that do not define the value. This cannot be handled by 8075 // LazyValueInfo so we need to bail. 8076 if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) { 8077 InformationCache &InfoCache = A.getInfoCache(); 8078 const DominatorTree *DT = 8079 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( 8080 *I->getFunction()); 8081 return DT && DT->dominates(I, CtxI); 8082 } 8083 8084 return true; 8085 } 8086 8087 /// See AAValueConstantRange::getKnownConstantRange(..). 8088 ConstantRange 8089 getKnownConstantRange(Attributor &A, 8090 const Instruction *CtxI = nullptr) const override { 8091 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8092 /* AllowAACtxI */ false)) 8093 return getKnown(); 8094 8095 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8096 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8097 return getKnown().intersectWith(SCEVR).intersectWith(LVIR); 8098 } 8099 8100 /// See AAValueConstantRange::getAssumedConstantRange(..). 8101 ConstantRange 8102 getAssumedConstantRange(Attributor &A, 8103 const Instruction *CtxI = nullptr) const override { 8104 // TODO: Make SCEV use Attributor assumption. 8105 // We may be able to bound a variable range via assumptions in 8106 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to 8107 // evolve to x^2 + x, then we can say that y is in [2, 12]. 8108 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8109 /* AllowAACtxI */ false)) 8110 return getAssumed(); 8111 8112 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8113 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8114 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR); 8115 } 8116 8117 /// Helper function to create MDNode for range metadata. 8118 static MDNode * 8119 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, 8120 const ConstantRange &AssumedConstantRange) { 8121 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get( 8122 Ty, AssumedConstantRange.getLower())), 8123 ConstantAsMetadata::get(ConstantInt::get( 8124 Ty, AssumedConstantRange.getUpper()))}; 8125 return MDNode::get(Ctx, LowAndHigh); 8126 } 8127 8128 /// Return true if \p Assumed is included in \p KnownRanges. 8129 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { 8130 8131 if (Assumed.isFullSet()) 8132 return false; 8133 8134 if (!KnownRanges) 8135 return true; 8136 8137 // If multiple ranges are annotated in IR, we give up to annotate assumed 8138 // range for now. 8139 8140 // TODO: If there exists a known range which containts assumed range, we 8141 // can say assumed range is better. 8142 if (KnownRanges->getNumOperands() > 2) 8143 return false; 8144 8145 ConstantInt *Lower = 8146 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0)); 8147 ConstantInt *Upper = 8148 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1)); 8149 8150 ConstantRange Known(Lower->getValue(), Upper->getValue()); 8151 return Known.contains(Assumed) && Known != Assumed; 8152 } 8153 8154 /// Helper function to set range metadata. 8155 static bool 8156 setRangeMetadataIfisBetterRange(Instruction *I, 8157 const ConstantRange &AssumedConstantRange) { 8158 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range); 8159 if (isBetterRange(AssumedConstantRange, OldRangeMD)) { 8160 if (!AssumedConstantRange.isEmptySet()) { 8161 I->setMetadata(LLVMContext::MD_range, 8162 getMDNodeForConstantRange(I->getType(), I->getContext(), 8163 AssumedConstantRange)); 8164 return true; 8165 } 8166 } 8167 return false; 8168 } 8169 8170 /// See AbstractAttribute::manifest() 8171 ChangeStatus manifest(Attributor &A) override { 8172 ChangeStatus Changed = ChangeStatus::UNCHANGED; 8173 ConstantRange AssumedConstantRange = getAssumedConstantRange(A); 8174 assert(!AssumedConstantRange.isFullSet() && "Invalid state"); 8175 8176 auto &V = getAssociatedValue(); 8177 if (!AssumedConstantRange.isEmptySet() && 8178 !AssumedConstantRange.isSingleElement()) { 8179 if (Instruction *I = dyn_cast<Instruction>(&V)) { 8180 assert(I == getCtxI() && "Should not annotate an instruction which is " 8181 "not the context instruction"); 8182 if (isa<CallInst>(I) || isa<LoadInst>(I)) 8183 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) 8184 Changed = ChangeStatus::CHANGED; 8185 } 8186 } 8187 8188 return Changed; 8189 } 8190 }; 8191 8192 struct AAValueConstantRangeArgument final 8193 : AAArgumentFromCallSiteArguments< 8194 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8195 true /* BridgeCallBaseContext */> { 8196 using Base = AAArgumentFromCallSiteArguments< 8197 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8198 true /* BridgeCallBaseContext */>; 8199 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) 8200 : Base(IRP, A) {} 8201 8202 /// See AbstractAttribute::initialize(..). 8203 void initialize(Attributor &A) override { 8204 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8205 indicatePessimisticFixpoint(); 8206 } else { 8207 Base::initialize(A); 8208 } 8209 } 8210 8211 /// See AbstractAttribute::trackStatistics() 8212 void trackStatistics() const override { 8213 STATS_DECLTRACK_ARG_ATTR(value_range) 8214 } 8215 }; 8216 8217 struct AAValueConstantRangeReturned 8218 : AAReturnedFromReturnedValues<AAValueConstantRange, 8219 AAValueConstantRangeImpl, 8220 AAValueConstantRangeImpl::StateType, 8221 /* PropogateCallBaseContext */ true> { 8222 using Base = 8223 AAReturnedFromReturnedValues<AAValueConstantRange, 8224 AAValueConstantRangeImpl, 8225 AAValueConstantRangeImpl::StateType, 8226 /* PropogateCallBaseContext */ true>; 8227 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) 8228 : Base(IRP, A) {} 8229 8230 /// See AbstractAttribute::initialize(...). 8231 void initialize(Attributor &A) override {} 8232 8233 /// See AbstractAttribute::trackStatistics() 8234 void trackStatistics() const override { 8235 STATS_DECLTRACK_FNRET_ATTR(value_range) 8236 } 8237 }; 8238 8239 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { 8240 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) 8241 : AAValueConstantRangeImpl(IRP, A) {} 8242 8243 /// See AbstractAttribute::initialize(...). 8244 void initialize(Attributor &A) override { 8245 AAValueConstantRangeImpl::initialize(A); 8246 if (isAtFixpoint()) 8247 return; 8248 8249 Value &V = getAssociatedValue(); 8250 8251 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8252 unionAssumed(ConstantRange(C->getValue())); 8253 indicateOptimisticFixpoint(); 8254 return; 8255 } 8256 8257 if (isa<UndefValue>(&V)) { 8258 // Collapse the undef state to 0. 8259 unionAssumed(ConstantRange(APInt(getBitWidth(), 0))); 8260 indicateOptimisticFixpoint(); 8261 return; 8262 } 8263 8264 if (isa<CallBase>(&V)) 8265 return; 8266 8267 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V)) 8268 return; 8269 8270 // If it is a load instruction with range metadata, use it. 8271 if (LoadInst *LI = dyn_cast<LoadInst>(&V)) 8272 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) { 8273 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8274 return; 8275 } 8276 8277 // We can work with PHI and select instruction as we traverse their operands 8278 // during update. 8279 if (isa<SelectInst>(V) || isa<PHINode>(V)) 8280 return; 8281 8282 // Otherwise we give up. 8283 indicatePessimisticFixpoint(); 8284 8285 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: " 8286 << getAssociatedValue() << "\n"); 8287 } 8288 8289 bool calculateBinaryOperator( 8290 Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, 8291 const Instruction *CtxI, 8292 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8293 Value *LHS = BinOp->getOperand(0); 8294 Value *RHS = BinOp->getOperand(1); 8295 8296 // Simplify the operands first. 8297 bool UsedAssumedInformation = false; 8298 const auto &SimplifiedLHS = 8299 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8300 *this, UsedAssumedInformation); 8301 if (!SimplifiedLHS.hasValue()) 8302 return true; 8303 if (!SimplifiedLHS.getValue()) 8304 return false; 8305 LHS = *SimplifiedLHS; 8306 8307 const auto &SimplifiedRHS = 8308 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8309 *this, UsedAssumedInformation); 8310 if (!SimplifiedRHS.hasValue()) 8311 return true; 8312 if (!SimplifiedRHS.getValue()) 8313 return false; 8314 RHS = *SimplifiedRHS; 8315 8316 // TODO: Allow non integers as well. 8317 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8318 return false; 8319 8320 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8321 *this, IRPosition::value(*LHS, getCallBaseContext()), 8322 DepClassTy::REQUIRED); 8323 QuerriedAAs.push_back(&LHSAA); 8324 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8325 8326 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8327 *this, IRPosition::value(*RHS, getCallBaseContext()), 8328 DepClassTy::REQUIRED); 8329 QuerriedAAs.push_back(&RHSAA); 8330 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8331 8332 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange); 8333 8334 T.unionAssumed(AssumedRange); 8335 8336 // TODO: Track a known state too. 8337 8338 return T.isValidState(); 8339 } 8340 8341 bool calculateCastInst( 8342 Attributor &A, CastInst *CastI, IntegerRangeState &T, 8343 const Instruction *CtxI, 8344 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8345 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!"); 8346 // TODO: Allow non integers as well. 8347 Value *OpV = CastI->getOperand(0); 8348 8349 // Simplify the operand first. 8350 bool UsedAssumedInformation = false; 8351 const auto &SimplifiedOpV = 8352 A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()), 8353 *this, UsedAssumedInformation); 8354 if (!SimplifiedOpV.hasValue()) 8355 return true; 8356 if (!SimplifiedOpV.getValue()) 8357 return false; 8358 OpV = *SimplifiedOpV; 8359 8360 if (!OpV->getType()->isIntegerTy()) 8361 return false; 8362 8363 auto &OpAA = A.getAAFor<AAValueConstantRange>( 8364 *this, IRPosition::value(*OpV, getCallBaseContext()), 8365 DepClassTy::REQUIRED); 8366 QuerriedAAs.push_back(&OpAA); 8367 T.unionAssumed( 8368 OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth())); 8369 return T.isValidState(); 8370 } 8371 8372 bool 8373 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, 8374 const Instruction *CtxI, 8375 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8376 Value *LHS = CmpI->getOperand(0); 8377 Value *RHS = CmpI->getOperand(1); 8378 8379 // Simplify the operands first. 8380 bool UsedAssumedInformation = false; 8381 const auto &SimplifiedLHS = 8382 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8383 *this, UsedAssumedInformation); 8384 if (!SimplifiedLHS.hasValue()) 8385 return true; 8386 if (!SimplifiedLHS.getValue()) 8387 return false; 8388 LHS = *SimplifiedLHS; 8389 8390 const auto &SimplifiedRHS = 8391 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8392 *this, UsedAssumedInformation); 8393 if (!SimplifiedRHS.hasValue()) 8394 return true; 8395 if (!SimplifiedRHS.getValue()) 8396 return false; 8397 RHS = *SimplifiedRHS; 8398 8399 // TODO: Allow non integers as well. 8400 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8401 return false; 8402 8403 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8404 *this, IRPosition::value(*LHS, getCallBaseContext()), 8405 DepClassTy::REQUIRED); 8406 QuerriedAAs.push_back(&LHSAA); 8407 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8408 *this, IRPosition::value(*RHS, getCallBaseContext()), 8409 DepClassTy::REQUIRED); 8410 QuerriedAAs.push_back(&RHSAA); 8411 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8412 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8413 8414 // If one of them is empty set, we can't decide. 8415 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) 8416 return true; 8417 8418 bool MustTrue = false, MustFalse = false; 8419 8420 auto AllowedRegion = 8421 ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange); 8422 8423 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet()) 8424 MustFalse = true; 8425 8426 if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange)) 8427 MustTrue = true; 8428 8429 assert((!MustTrue || !MustFalse) && 8430 "Either MustTrue or MustFalse should be false!"); 8431 8432 if (MustTrue) 8433 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); 8434 else if (MustFalse) 8435 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); 8436 else 8437 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); 8438 8439 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA 8440 << " " << RHSAA << "\n"); 8441 8442 // TODO: Track a known state too. 8443 return T.isValidState(); 8444 } 8445 8446 /// See AbstractAttribute::updateImpl(...). 8447 ChangeStatus updateImpl(Attributor &A) override { 8448 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 8449 IntegerRangeState &T, bool Stripped) -> bool { 8450 Instruction *I = dyn_cast<Instruction>(&V); 8451 if (!I || isa<CallBase>(I)) { 8452 8453 // Simplify the operand first. 8454 bool UsedAssumedInformation = false; 8455 const auto &SimplifiedOpV = 8456 A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()), 8457 *this, UsedAssumedInformation); 8458 if (!SimplifiedOpV.hasValue()) 8459 return true; 8460 if (!SimplifiedOpV.getValue()) 8461 return false; 8462 Value *VPtr = *SimplifiedOpV; 8463 8464 // If the value is not instruction, we query AA to Attributor. 8465 const auto &AA = A.getAAFor<AAValueConstantRange>( 8466 *this, IRPosition::value(*VPtr, getCallBaseContext()), 8467 DepClassTy::REQUIRED); 8468 8469 // Clamp operator is not used to utilize a program point CtxI. 8470 T.unionAssumed(AA.getAssumedConstantRange(A, CtxI)); 8471 8472 return T.isValidState(); 8473 } 8474 8475 SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; 8476 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) { 8477 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) 8478 return false; 8479 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) { 8480 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) 8481 return false; 8482 } else if (auto *CastI = dyn_cast<CastInst>(I)) { 8483 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) 8484 return false; 8485 } else { 8486 // Give up with other instructions. 8487 // TODO: Add other instructions 8488 8489 T.indicatePessimisticFixpoint(); 8490 return false; 8491 } 8492 8493 // Catch circular reasoning in a pessimistic way for now. 8494 // TODO: Check how the range evolves and if we stripped anything, see also 8495 // AADereferenceable or AAAlign for similar situations. 8496 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { 8497 if (QueriedAA != this) 8498 continue; 8499 // If we are in a stady state we do not need to worry. 8500 if (T.getAssumed() == getState().getAssumed()) 8501 continue; 8502 T.indicatePessimisticFixpoint(); 8503 } 8504 8505 return T.isValidState(); 8506 }; 8507 8508 IntegerRangeState T(getBitWidth()); 8509 8510 if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T, 8511 VisitValueCB, getCtxI(), 8512 /* UseValueSimplify */ false)) 8513 return indicatePessimisticFixpoint(); 8514 8515 return clampStateAndIndicateChange(getState(), T); 8516 } 8517 8518 /// See AbstractAttribute::trackStatistics() 8519 void trackStatistics() const override { 8520 STATS_DECLTRACK_FLOATING_ATTR(value_range) 8521 } 8522 }; 8523 8524 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { 8525 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) 8526 : AAValueConstantRangeImpl(IRP, A) {} 8527 8528 /// See AbstractAttribute::initialize(...). 8529 ChangeStatus updateImpl(Attributor &A) override { 8530 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will " 8531 "not be called"); 8532 } 8533 8534 /// See AbstractAttribute::trackStatistics() 8535 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) } 8536 }; 8537 8538 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { 8539 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) 8540 : AAValueConstantRangeFunction(IRP, A) {} 8541 8542 /// See AbstractAttribute::trackStatistics() 8543 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) } 8544 }; 8545 8546 struct AAValueConstantRangeCallSiteReturned 8547 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8548 AAValueConstantRangeImpl, 8549 AAValueConstantRangeImpl::StateType, 8550 /* IntroduceCallBaseContext */ true> { 8551 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) 8552 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8553 AAValueConstantRangeImpl, 8554 AAValueConstantRangeImpl::StateType, 8555 /* IntroduceCallBaseContext */ true>(IRP, 8556 A) { 8557 } 8558 8559 /// See AbstractAttribute::initialize(...). 8560 void initialize(Attributor &A) override { 8561 // If it is a load instruction with range metadata, use the metadata. 8562 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue())) 8563 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range)) 8564 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8565 8566 AAValueConstantRangeImpl::initialize(A); 8567 } 8568 8569 /// See AbstractAttribute::trackStatistics() 8570 void trackStatistics() const override { 8571 STATS_DECLTRACK_CSRET_ATTR(value_range) 8572 } 8573 }; 8574 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { 8575 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) 8576 : AAValueConstantRangeFloating(IRP, A) {} 8577 8578 /// See AbstractAttribute::manifest() 8579 ChangeStatus manifest(Attributor &A) override { 8580 return ChangeStatus::UNCHANGED; 8581 } 8582 8583 /// See AbstractAttribute::trackStatistics() 8584 void trackStatistics() const override { 8585 STATS_DECLTRACK_CSARG_ATTR(value_range) 8586 } 8587 }; 8588 8589 /// ------------------ Potential Values Attribute ------------------------- 8590 8591 struct AAPotentialValuesImpl : AAPotentialValues { 8592 using StateType = PotentialConstantIntValuesState; 8593 8594 AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A) 8595 : AAPotentialValues(IRP, A) {} 8596 8597 /// See AbstractAttribute::initialize(..). 8598 void initialize(Attributor &A) override { 8599 if (A.hasSimplificationCallback(getIRPosition())) 8600 indicatePessimisticFixpoint(); 8601 else 8602 AAPotentialValues::initialize(A); 8603 } 8604 8605 /// See AbstractAttribute::getAsStr(). 8606 const std::string getAsStr() const override { 8607 std::string Str; 8608 llvm::raw_string_ostream OS(Str); 8609 OS << getState(); 8610 return OS.str(); 8611 } 8612 8613 /// See AbstractAttribute::updateImpl(...). 8614 ChangeStatus updateImpl(Attributor &A) override { 8615 return indicatePessimisticFixpoint(); 8616 } 8617 }; 8618 8619 struct AAPotentialValuesArgument final 8620 : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 8621 PotentialConstantIntValuesState> { 8622 using Base = 8623 AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 8624 PotentialConstantIntValuesState>; 8625 AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A) 8626 : Base(IRP, A) {} 8627 8628 /// See AbstractAttribute::initialize(..). 8629 void initialize(Attributor &A) override { 8630 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8631 indicatePessimisticFixpoint(); 8632 } else { 8633 Base::initialize(A); 8634 } 8635 } 8636 8637 /// See AbstractAttribute::trackStatistics() 8638 void trackStatistics() const override { 8639 STATS_DECLTRACK_ARG_ATTR(potential_values) 8640 } 8641 }; 8642 8643 struct AAPotentialValuesReturned 8644 : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> { 8645 using Base = 8646 AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>; 8647 AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A) 8648 : Base(IRP, A) {} 8649 8650 /// See AbstractAttribute::trackStatistics() 8651 void trackStatistics() const override { 8652 STATS_DECLTRACK_FNRET_ATTR(potential_values) 8653 } 8654 }; 8655 8656 struct AAPotentialValuesFloating : AAPotentialValuesImpl { 8657 AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A) 8658 : AAPotentialValuesImpl(IRP, A) {} 8659 8660 /// See AbstractAttribute::initialize(..). 8661 void initialize(Attributor &A) override { 8662 AAPotentialValuesImpl::initialize(A); 8663 if (isAtFixpoint()) 8664 return; 8665 8666 Value &V = getAssociatedValue(); 8667 8668 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8669 unionAssumed(C->getValue()); 8670 indicateOptimisticFixpoint(); 8671 return; 8672 } 8673 8674 if (isa<UndefValue>(&V)) { 8675 unionAssumedWithUndef(); 8676 indicateOptimisticFixpoint(); 8677 return; 8678 } 8679 8680 if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V)) 8681 return; 8682 8683 if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V)) 8684 return; 8685 8686 indicatePessimisticFixpoint(); 8687 8688 LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: " 8689 << getAssociatedValue() << "\n"); 8690 } 8691 8692 static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS, 8693 const APInt &RHS) { 8694 return ICmpInst::compare(LHS, RHS, ICI->getPredicate()); 8695 } 8696 8697 static APInt calculateCastInst(const CastInst *CI, const APInt &Src, 8698 uint32_t ResultBitWidth) { 8699 Instruction::CastOps CastOp = CI->getOpcode(); 8700 switch (CastOp) { 8701 default: 8702 llvm_unreachable("unsupported or not integer cast"); 8703 case Instruction::Trunc: 8704 return Src.trunc(ResultBitWidth); 8705 case Instruction::SExt: 8706 return Src.sext(ResultBitWidth); 8707 case Instruction::ZExt: 8708 return Src.zext(ResultBitWidth); 8709 case Instruction::BitCast: 8710 return Src; 8711 } 8712 } 8713 8714 static APInt calculateBinaryOperator(const BinaryOperator *BinOp, 8715 const APInt &LHS, const APInt &RHS, 8716 bool &SkipOperation, bool &Unsupported) { 8717 Instruction::BinaryOps BinOpcode = BinOp->getOpcode(); 8718 // Unsupported is set to true when the binary operator is not supported. 8719 // SkipOperation is set to true when UB occur with the given operand pair 8720 // (LHS, RHS). 8721 // TODO: we should look at nsw and nuw keywords to handle operations 8722 // that create poison or undef value. 8723 switch (BinOpcode) { 8724 default: 8725 Unsupported = true; 8726 return LHS; 8727 case Instruction::Add: 8728 return LHS + RHS; 8729 case Instruction::Sub: 8730 return LHS - RHS; 8731 case Instruction::Mul: 8732 return LHS * RHS; 8733 case Instruction::UDiv: 8734 if (RHS.isZero()) { 8735 SkipOperation = true; 8736 return LHS; 8737 } 8738 return LHS.udiv(RHS); 8739 case Instruction::SDiv: 8740 if (RHS.isZero()) { 8741 SkipOperation = true; 8742 return LHS; 8743 } 8744 return LHS.sdiv(RHS); 8745 case Instruction::URem: 8746 if (RHS.isZero()) { 8747 SkipOperation = true; 8748 return LHS; 8749 } 8750 return LHS.urem(RHS); 8751 case Instruction::SRem: 8752 if (RHS.isZero()) { 8753 SkipOperation = true; 8754 return LHS; 8755 } 8756 return LHS.srem(RHS); 8757 case Instruction::Shl: 8758 return LHS.shl(RHS); 8759 case Instruction::LShr: 8760 return LHS.lshr(RHS); 8761 case Instruction::AShr: 8762 return LHS.ashr(RHS); 8763 case Instruction::And: 8764 return LHS & RHS; 8765 case Instruction::Or: 8766 return LHS | RHS; 8767 case Instruction::Xor: 8768 return LHS ^ RHS; 8769 } 8770 } 8771 8772 bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp, 8773 const APInt &LHS, const APInt &RHS) { 8774 bool SkipOperation = false; 8775 bool Unsupported = false; 8776 APInt Result = 8777 calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported); 8778 if (Unsupported) 8779 return false; 8780 // If SkipOperation is true, we can ignore this operand pair (L, R). 8781 if (!SkipOperation) 8782 unionAssumed(Result); 8783 return isValidState(); 8784 } 8785 8786 ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) { 8787 auto AssumedBefore = getAssumed(); 8788 Value *LHS = ICI->getOperand(0); 8789 Value *RHS = ICI->getOperand(1); 8790 8791 // Simplify the operands first. 8792 bool UsedAssumedInformation = false; 8793 const auto &SimplifiedLHS = 8794 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8795 *this, UsedAssumedInformation); 8796 if (!SimplifiedLHS.hasValue()) 8797 return ChangeStatus::UNCHANGED; 8798 if (!SimplifiedLHS.getValue()) 8799 return indicatePessimisticFixpoint(); 8800 LHS = *SimplifiedLHS; 8801 8802 const auto &SimplifiedRHS = 8803 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8804 *this, UsedAssumedInformation); 8805 if (!SimplifiedRHS.hasValue()) 8806 return ChangeStatus::UNCHANGED; 8807 if (!SimplifiedRHS.getValue()) 8808 return indicatePessimisticFixpoint(); 8809 RHS = *SimplifiedRHS; 8810 8811 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8812 return indicatePessimisticFixpoint(); 8813 8814 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 8815 DepClassTy::REQUIRED); 8816 if (!LHSAA.isValidState()) 8817 return indicatePessimisticFixpoint(); 8818 8819 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 8820 DepClassTy::REQUIRED); 8821 if (!RHSAA.isValidState()) 8822 return indicatePessimisticFixpoint(); 8823 8824 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 8825 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 8826 8827 // TODO: make use of undef flag to limit potential values aggressively. 8828 bool MaybeTrue = false, MaybeFalse = false; 8829 const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0); 8830 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 8831 // The result of any comparison between undefs can be soundly replaced 8832 // with undef. 8833 unionAssumedWithUndef(); 8834 } else if (LHSAA.undefIsContained()) { 8835 for (const APInt &R : RHSAAPVS) { 8836 bool CmpResult = calculateICmpInst(ICI, Zero, R); 8837 MaybeTrue |= CmpResult; 8838 MaybeFalse |= !CmpResult; 8839 if (MaybeTrue & MaybeFalse) 8840 return indicatePessimisticFixpoint(); 8841 } 8842 } else if (RHSAA.undefIsContained()) { 8843 for (const APInt &L : LHSAAPVS) { 8844 bool CmpResult = calculateICmpInst(ICI, L, Zero); 8845 MaybeTrue |= CmpResult; 8846 MaybeFalse |= !CmpResult; 8847 if (MaybeTrue & MaybeFalse) 8848 return indicatePessimisticFixpoint(); 8849 } 8850 } else { 8851 for (const APInt &L : LHSAAPVS) { 8852 for (const APInt &R : RHSAAPVS) { 8853 bool CmpResult = calculateICmpInst(ICI, L, R); 8854 MaybeTrue |= CmpResult; 8855 MaybeFalse |= !CmpResult; 8856 if (MaybeTrue & MaybeFalse) 8857 return indicatePessimisticFixpoint(); 8858 } 8859 } 8860 } 8861 if (MaybeTrue) 8862 unionAssumed(APInt(/* numBits */ 1, /* val */ 1)); 8863 if (MaybeFalse) 8864 unionAssumed(APInt(/* numBits */ 1, /* val */ 0)); 8865 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 8866 : ChangeStatus::CHANGED; 8867 } 8868 8869 ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) { 8870 auto AssumedBefore = getAssumed(); 8871 Value *LHS = SI->getTrueValue(); 8872 Value *RHS = SI->getFalseValue(); 8873 8874 // Simplify the operands first. 8875 bool UsedAssumedInformation = false; 8876 const auto &SimplifiedLHS = 8877 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8878 *this, UsedAssumedInformation); 8879 if (!SimplifiedLHS.hasValue()) 8880 return ChangeStatus::UNCHANGED; 8881 if (!SimplifiedLHS.getValue()) 8882 return indicatePessimisticFixpoint(); 8883 LHS = *SimplifiedLHS; 8884 8885 const auto &SimplifiedRHS = 8886 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8887 *this, UsedAssumedInformation); 8888 if (!SimplifiedRHS.hasValue()) 8889 return ChangeStatus::UNCHANGED; 8890 if (!SimplifiedRHS.getValue()) 8891 return indicatePessimisticFixpoint(); 8892 RHS = *SimplifiedRHS; 8893 8894 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8895 return indicatePessimisticFixpoint(); 8896 8897 Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this, 8898 UsedAssumedInformation); 8899 8900 // Check if we only need one operand. 8901 bool OnlyLeft = false, OnlyRight = false; 8902 if (C.hasValue() && *C && (*C)->isOneValue()) 8903 OnlyLeft = true; 8904 else if (C.hasValue() && *C && (*C)->isZeroValue()) 8905 OnlyRight = true; 8906 8907 const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr; 8908 if (!OnlyRight) { 8909 LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 8910 DepClassTy::REQUIRED); 8911 if (!LHSAA->isValidState()) 8912 return indicatePessimisticFixpoint(); 8913 } 8914 if (!OnlyLeft) { 8915 RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 8916 DepClassTy::REQUIRED); 8917 if (!RHSAA->isValidState()) 8918 return indicatePessimisticFixpoint(); 8919 } 8920 8921 if (!LHSAA || !RHSAA) { 8922 // select (true/false), lhs, rhs 8923 auto *OpAA = LHSAA ? LHSAA : RHSAA; 8924 8925 if (OpAA->undefIsContained()) 8926 unionAssumedWithUndef(); 8927 else 8928 unionAssumed(*OpAA); 8929 8930 } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) { 8931 // select i1 *, undef , undef => undef 8932 unionAssumedWithUndef(); 8933 } else { 8934 unionAssumed(*LHSAA); 8935 unionAssumed(*RHSAA); 8936 } 8937 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 8938 : ChangeStatus::CHANGED; 8939 } 8940 8941 ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) { 8942 auto AssumedBefore = getAssumed(); 8943 if (!CI->isIntegerCast()) 8944 return indicatePessimisticFixpoint(); 8945 assert(CI->getNumOperands() == 1 && "Expected cast to be unary!"); 8946 uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth(); 8947 Value *Src = CI->getOperand(0); 8948 8949 // Simplify the operand first. 8950 bool UsedAssumedInformation = false; 8951 const auto &SimplifiedSrc = 8952 A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()), 8953 *this, UsedAssumedInformation); 8954 if (!SimplifiedSrc.hasValue()) 8955 return ChangeStatus::UNCHANGED; 8956 if (!SimplifiedSrc.getValue()) 8957 return indicatePessimisticFixpoint(); 8958 Src = *SimplifiedSrc; 8959 8960 auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src), 8961 DepClassTy::REQUIRED); 8962 if (!SrcAA.isValidState()) 8963 return indicatePessimisticFixpoint(); 8964 const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet(); 8965 if (SrcAA.undefIsContained()) 8966 unionAssumedWithUndef(); 8967 else { 8968 for (const APInt &S : SrcAAPVS) { 8969 APInt T = calculateCastInst(CI, S, ResultBitWidth); 8970 unionAssumed(T); 8971 } 8972 } 8973 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 8974 : ChangeStatus::CHANGED; 8975 } 8976 8977 ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) { 8978 auto AssumedBefore = getAssumed(); 8979 Value *LHS = BinOp->getOperand(0); 8980 Value *RHS = BinOp->getOperand(1); 8981 8982 // Simplify the operands first. 8983 bool UsedAssumedInformation = false; 8984 const auto &SimplifiedLHS = 8985 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8986 *this, UsedAssumedInformation); 8987 if (!SimplifiedLHS.hasValue()) 8988 return ChangeStatus::UNCHANGED; 8989 if (!SimplifiedLHS.getValue()) 8990 return indicatePessimisticFixpoint(); 8991 LHS = *SimplifiedLHS; 8992 8993 const auto &SimplifiedRHS = 8994 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8995 *this, UsedAssumedInformation); 8996 if (!SimplifiedRHS.hasValue()) 8997 return ChangeStatus::UNCHANGED; 8998 if (!SimplifiedRHS.getValue()) 8999 return indicatePessimisticFixpoint(); 9000 RHS = *SimplifiedRHS; 9001 9002 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 9003 return indicatePessimisticFixpoint(); 9004 9005 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 9006 DepClassTy::REQUIRED); 9007 if (!LHSAA.isValidState()) 9008 return indicatePessimisticFixpoint(); 9009 9010 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 9011 DepClassTy::REQUIRED); 9012 if (!RHSAA.isValidState()) 9013 return indicatePessimisticFixpoint(); 9014 9015 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 9016 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 9017 const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0); 9018 9019 // TODO: make use of undef flag to limit potential values aggressively. 9020 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 9021 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero)) 9022 return indicatePessimisticFixpoint(); 9023 } else if (LHSAA.undefIsContained()) { 9024 for (const APInt &R : RHSAAPVS) { 9025 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R)) 9026 return indicatePessimisticFixpoint(); 9027 } 9028 } else if (RHSAA.undefIsContained()) { 9029 for (const APInt &L : LHSAAPVS) { 9030 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero)) 9031 return indicatePessimisticFixpoint(); 9032 } 9033 } else { 9034 for (const APInt &L : LHSAAPVS) { 9035 for (const APInt &R : RHSAAPVS) { 9036 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R)) 9037 return indicatePessimisticFixpoint(); 9038 } 9039 } 9040 } 9041 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9042 : ChangeStatus::CHANGED; 9043 } 9044 9045 ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) { 9046 auto AssumedBefore = getAssumed(); 9047 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 9048 Value *IncomingValue = PHI->getIncomingValue(u); 9049 9050 // Simplify the operand first. 9051 bool UsedAssumedInformation = false; 9052 const auto &SimplifiedIncomingValue = A.getAssumedSimplified( 9053 IRPosition::value(*IncomingValue, getCallBaseContext()), *this, 9054 UsedAssumedInformation); 9055 if (!SimplifiedIncomingValue.hasValue()) 9056 continue; 9057 if (!SimplifiedIncomingValue.getValue()) 9058 return indicatePessimisticFixpoint(); 9059 IncomingValue = *SimplifiedIncomingValue; 9060 9061 auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>( 9062 *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED); 9063 if (!PotentialValuesAA.isValidState()) 9064 return indicatePessimisticFixpoint(); 9065 if (PotentialValuesAA.undefIsContained()) 9066 unionAssumedWithUndef(); 9067 else 9068 unionAssumed(PotentialValuesAA.getAssumed()); 9069 } 9070 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9071 : ChangeStatus::CHANGED; 9072 } 9073 9074 ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) { 9075 if (!L.getType()->isIntegerTy()) 9076 return indicatePessimisticFixpoint(); 9077 9078 auto Union = [&](Value &V) { 9079 if (isa<UndefValue>(V)) { 9080 unionAssumedWithUndef(); 9081 return true; 9082 } 9083 if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) { 9084 unionAssumed(CI->getValue()); 9085 return true; 9086 } 9087 return false; 9088 }; 9089 auto AssumedBefore = getAssumed(); 9090 9091 if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union)) 9092 return indicatePessimisticFixpoint(); 9093 9094 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9095 : ChangeStatus::CHANGED; 9096 } 9097 9098 /// See AbstractAttribute::updateImpl(...). 9099 ChangeStatus updateImpl(Attributor &A) override { 9100 Value &V = getAssociatedValue(); 9101 Instruction *I = dyn_cast<Instruction>(&V); 9102 9103 if (auto *ICI = dyn_cast<ICmpInst>(I)) 9104 return updateWithICmpInst(A, ICI); 9105 9106 if (auto *SI = dyn_cast<SelectInst>(I)) 9107 return updateWithSelectInst(A, SI); 9108 9109 if (auto *CI = dyn_cast<CastInst>(I)) 9110 return updateWithCastInst(A, CI); 9111 9112 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) 9113 return updateWithBinaryOperator(A, BinOp); 9114 9115 if (auto *PHI = dyn_cast<PHINode>(I)) 9116 return updateWithPHINode(A, PHI); 9117 9118 if (auto *L = dyn_cast<LoadInst>(I)) 9119 return updateWithLoad(A, *L); 9120 9121 return indicatePessimisticFixpoint(); 9122 } 9123 9124 /// See AbstractAttribute::trackStatistics() 9125 void trackStatistics() const override { 9126 STATS_DECLTRACK_FLOATING_ATTR(potential_values) 9127 } 9128 }; 9129 9130 struct AAPotentialValuesFunction : AAPotentialValuesImpl { 9131 AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A) 9132 : AAPotentialValuesImpl(IRP, A) {} 9133 9134 /// See AbstractAttribute::initialize(...). 9135 ChangeStatus updateImpl(Attributor &A) override { 9136 llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will " 9137 "not be called"); 9138 } 9139 9140 /// See AbstractAttribute::trackStatistics() 9141 void trackStatistics() const override { 9142 STATS_DECLTRACK_FN_ATTR(potential_values) 9143 } 9144 }; 9145 9146 struct AAPotentialValuesCallSite : AAPotentialValuesFunction { 9147 AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A) 9148 : AAPotentialValuesFunction(IRP, A) {} 9149 9150 /// See AbstractAttribute::trackStatistics() 9151 void trackStatistics() const override { 9152 STATS_DECLTRACK_CS_ATTR(potential_values) 9153 } 9154 }; 9155 9156 struct AAPotentialValuesCallSiteReturned 9157 : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> { 9158 AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A) 9159 : AACallSiteReturnedFromReturned<AAPotentialValues, 9160 AAPotentialValuesImpl>(IRP, A) {} 9161 9162 /// See AbstractAttribute::trackStatistics() 9163 void trackStatistics() const override { 9164 STATS_DECLTRACK_CSRET_ATTR(potential_values) 9165 } 9166 }; 9167 9168 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating { 9169 AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A) 9170 : AAPotentialValuesFloating(IRP, A) {} 9171 9172 /// See AbstractAttribute::initialize(..). 9173 void initialize(Attributor &A) override { 9174 AAPotentialValuesImpl::initialize(A); 9175 if (isAtFixpoint()) 9176 return; 9177 9178 Value &V = getAssociatedValue(); 9179 9180 if (auto *C = dyn_cast<ConstantInt>(&V)) { 9181 unionAssumed(C->getValue()); 9182 indicateOptimisticFixpoint(); 9183 return; 9184 } 9185 9186 if (isa<UndefValue>(&V)) { 9187 unionAssumedWithUndef(); 9188 indicateOptimisticFixpoint(); 9189 return; 9190 } 9191 } 9192 9193 /// See AbstractAttribute::updateImpl(...). 9194 ChangeStatus updateImpl(Attributor &A) override { 9195 Value &V = getAssociatedValue(); 9196 auto AssumedBefore = getAssumed(); 9197 auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V), 9198 DepClassTy::REQUIRED); 9199 const auto &S = AA.getAssumed(); 9200 unionAssumed(S); 9201 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9202 : ChangeStatus::CHANGED; 9203 } 9204 9205 /// See AbstractAttribute::trackStatistics() 9206 void trackStatistics() const override { 9207 STATS_DECLTRACK_CSARG_ATTR(potential_values) 9208 } 9209 }; 9210 9211 /// ------------------------ NoUndef Attribute --------------------------------- 9212 struct AANoUndefImpl : AANoUndef { 9213 AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {} 9214 9215 /// See AbstractAttribute::initialize(...). 9216 void initialize(Attributor &A) override { 9217 if (getIRPosition().hasAttr({Attribute::NoUndef})) { 9218 indicateOptimisticFixpoint(); 9219 return; 9220 } 9221 Value &V = getAssociatedValue(); 9222 if (isa<UndefValue>(V)) 9223 indicatePessimisticFixpoint(); 9224 else if (isa<FreezeInst>(V)) 9225 indicateOptimisticFixpoint(); 9226 else if (getPositionKind() != IRPosition::IRP_RETURNED && 9227 isGuaranteedNotToBeUndefOrPoison(&V)) 9228 indicateOptimisticFixpoint(); 9229 else 9230 AANoUndef::initialize(A); 9231 } 9232 9233 /// See followUsesInMBEC 9234 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 9235 AANoUndef::StateType &State) { 9236 const Value *UseV = U->get(); 9237 const DominatorTree *DT = nullptr; 9238 AssumptionCache *AC = nullptr; 9239 InformationCache &InfoCache = A.getInfoCache(); 9240 if (Function *F = getAnchorScope()) { 9241 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 9242 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 9243 } 9244 State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT)); 9245 bool TrackUse = false; 9246 // Track use for instructions which must produce undef or poison bits when 9247 // at least one operand contains such bits. 9248 if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I)) 9249 TrackUse = true; 9250 return TrackUse; 9251 } 9252 9253 /// See AbstractAttribute::getAsStr(). 9254 const std::string getAsStr() const override { 9255 return getAssumed() ? "noundef" : "may-undef-or-poison"; 9256 } 9257 9258 ChangeStatus manifest(Attributor &A) override { 9259 // We don't manifest noundef attribute for dead positions because the 9260 // associated values with dead positions would be replaced with undef 9261 // values. 9262 bool UsedAssumedInformation = false; 9263 if (A.isAssumedDead(getIRPosition(), nullptr, nullptr, 9264 UsedAssumedInformation)) 9265 return ChangeStatus::UNCHANGED; 9266 // A position whose simplified value does not have any value is 9267 // considered to be dead. We don't manifest noundef in such positions for 9268 // the same reason above. 9269 if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation) 9270 .hasValue()) 9271 return ChangeStatus::UNCHANGED; 9272 return AANoUndef::manifest(A); 9273 } 9274 }; 9275 9276 struct AANoUndefFloating : public AANoUndefImpl { 9277 AANoUndefFloating(const IRPosition &IRP, Attributor &A) 9278 : AANoUndefImpl(IRP, A) {} 9279 9280 /// See AbstractAttribute::initialize(...). 9281 void initialize(Attributor &A) override { 9282 AANoUndefImpl::initialize(A); 9283 if (!getState().isAtFixpoint()) 9284 if (Instruction *CtxI = getCtxI()) 9285 followUsesInMBEC(*this, A, getState(), *CtxI); 9286 } 9287 9288 /// See AbstractAttribute::updateImpl(...). 9289 ChangeStatus updateImpl(Attributor &A) override { 9290 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 9291 AANoUndef::StateType &T, bool Stripped) -> bool { 9292 const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V), 9293 DepClassTy::REQUIRED); 9294 if (!Stripped && this == &AA) { 9295 T.indicatePessimisticFixpoint(); 9296 } else { 9297 const AANoUndef::StateType &S = 9298 static_cast<const AANoUndef::StateType &>(AA.getState()); 9299 T ^= S; 9300 } 9301 return T.isValidState(); 9302 }; 9303 9304 StateType T; 9305 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 9306 VisitValueCB, getCtxI())) 9307 return indicatePessimisticFixpoint(); 9308 9309 return clampStateAndIndicateChange(getState(), T); 9310 } 9311 9312 /// See AbstractAttribute::trackStatistics() 9313 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9314 }; 9315 9316 struct AANoUndefReturned final 9317 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> { 9318 AANoUndefReturned(const IRPosition &IRP, Attributor &A) 9319 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {} 9320 9321 /// See AbstractAttribute::trackStatistics() 9322 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9323 }; 9324 9325 struct AANoUndefArgument final 9326 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> { 9327 AANoUndefArgument(const IRPosition &IRP, Attributor &A) 9328 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {} 9329 9330 /// See AbstractAttribute::trackStatistics() 9331 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) } 9332 }; 9333 9334 struct AANoUndefCallSiteArgument final : AANoUndefFloating { 9335 AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A) 9336 : AANoUndefFloating(IRP, A) {} 9337 9338 /// See AbstractAttribute::trackStatistics() 9339 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) } 9340 }; 9341 9342 struct AANoUndefCallSiteReturned final 9343 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> { 9344 AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A) 9345 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {} 9346 9347 /// See AbstractAttribute::trackStatistics() 9348 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) } 9349 }; 9350 9351 struct AACallEdgesImpl : public AACallEdges { 9352 AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {} 9353 9354 virtual const SetVector<Function *> &getOptimisticEdges() const override { 9355 return CalledFunctions; 9356 } 9357 9358 virtual bool hasUnknownCallee() const override { return HasUnknownCallee; } 9359 9360 virtual bool hasNonAsmUnknownCallee() const override { 9361 return HasUnknownCalleeNonAsm; 9362 } 9363 9364 const std::string getAsStr() const override { 9365 return "CallEdges[" + std::to_string(HasUnknownCallee) + "," + 9366 std::to_string(CalledFunctions.size()) + "]"; 9367 } 9368 9369 void trackStatistics() const override {} 9370 9371 protected: 9372 void addCalledFunction(Function *Fn, ChangeStatus &Change) { 9373 if (CalledFunctions.insert(Fn)) { 9374 Change = ChangeStatus::CHANGED; 9375 LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName() 9376 << "\n"); 9377 } 9378 } 9379 9380 void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) { 9381 if (!HasUnknownCallee) 9382 Change = ChangeStatus::CHANGED; 9383 if (NonAsm && !HasUnknownCalleeNonAsm) 9384 Change = ChangeStatus::CHANGED; 9385 HasUnknownCalleeNonAsm |= NonAsm; 9386 HasUnknownCallee = true; 9387 } 9388 9389 private: 9390 /// Optimistic set of functions that might be called by this position. 9391 SetVector<Function *> CalledFunctions; 9392 9393 /// Is there any call with a unknown callee. 9394 bool HasUnknownCallee = false; 9395 9396 /// Is there any call with a unknown callee, excluding any inline asm. 9397 bool HasUnknownCalleeNonAsm = false; 9398 }; 9399 9400 struct AACallEdgesCallSite : public AACallEdgesImpl { 9401 AACallEdgesCallSite(const IRPosition &IRP, Attributor &A) 9402 : AACallEdgesImpl(IRP, A) {} 9403 /// See AbstractAttribute::updateImpl(...). 9404 ChangeStatus updateImpl(Attributor &A) override { 9405 ChangeStatus Change = ChangeStatus::UNCHANGED; 9406 9407 auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown, 9408 bool Stripped) -> bool { 9409 if (Function *Fn = dyn_cast<Function>(&V)) { 9410 addCalledFunction(Fn, Change); 9411 } else { 9412 LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n"); 9413 setHasUnknownCallee(true, Change); 9414 } 9415 9416 // Explore all values. 9417 return true; 9418 }; 9419 9420 // Process any value that we might call. 9421 auto ProcessCalledOperand = [&](Value *V) { 9422 bool DummyValue = false; 9423 if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this, 9424 DummyValue, VisitValue, nullptr, 9425 false)) { 9426 // If we haven't gone through all values, assume that there are unknown 9427 // callees. 9428 setHasUnknownCallee(true, Change); 9429 } 9430 }; 9431 9432 CallBase *CB = static_cast<CallBase *>(getCtxI()); 9433 9434 if (CB->isInlineAsm()) { 9435 setHasUnknownCallee(false, Change); 9436 return Change; 9437 } 9438 9439 // Process callee metadata if available. 9440 if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) { 9441 for (auto &Op : MD->operands()) { 9442 Function *Callee = mdconst::dyn_extract_or_null<Function>(Op); 9443 if (Callee) 9444 addCalledFunction(Callee, Change); 9445 } 9446 return Change; 9447 } 9448 9449 // The most simple case. 9450 ProcessCalledOperand(CB->getCalledOperand()); 9451 9452 // Process callback functions. 9453 SmallVector<const Use *, 4u> CallbackUses; 9454 AbstractCallSite::getCallbackUses(*CB, CallbackUses); 9455 for (const Use *U : CallbackUses) 9456 ProcessCalledOperand(U->get()); 9457 9458 return Change; 9459 } 9460 }; 9461 9462 struct AACallEdgesFunction : public AACallEdgesImpl { 9463 AACallEdgesFunction(const IRPosition &IRP, Attributor &A) 9464 : AACallEdgesImpl(IRP, A) {} 9465 9466 /// See AbstractAttribute::updateImpl(...). 9467 ChangeStatus updateImpl(Attributor &A) override { 9468 ChangeStatus Change = ChangeStatus::UNCHANGED; 9469 9470 auto ProcessCallInst = [&](Instruction &Inst) { 9471 CallBase &CB = static_cast<CallBase &>(Inst); 9472 9473 auto &CBEdges = A.getAAFor<AACallEdges>( 9474 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9475 if (CBEdges.hasNonAsmUnknownCallee()) 9476 setHasUnknownCallee(true, Change); 9477 if (CBEdges.hasUnknownCallee()) 9478 setHasUnknownCallee(false, Change); 9479 9480 for (Function *F : CBEdges.getOptimisticEdges()) 9481 addCalledFunction(F, Change); 9482 9483 return true; 9484 }; 9485 9486 // Visit all callable instructions. 9487 bool UsedAssumedInformation = false; 9488 if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this, 9489 UsedAssumedInformation)) { 9490 // If we haven't looked at all call like instructions, assume that there 9491 // are unknown callees. 9492 setHasUnknownCallee(true, Change); 9493 } 9494 9495 return Change; 9496 } 9497 }; 9498 9499 struct AAFunctionReachabilityFunction : public AAFunctionReachability { 9500 private: 9501 struct QuerySet { 9502 void markReachable(Function *Fn) { 9503 Reachable.insert(Fn); 9504 Unreachable.erase(Fn); 9505 } 9506 9507 ChangeStatus update(Attributor &A, const AAFunctionReachability &AA, 9508 ArrayRef<const AACallEdges *> AAEdgesList) { 9509 ChangeStatus Change = ChangeStatus::UNCHANGED; 9510 9511 for (auto *AAEdges : AAEdgesList) { 9512 if (AAEdges->hasUnknownCallee()) { 9513 if (!CanReachUnknownCallee) 9514 Change = ChangeStatus::CHANGED; 9515 CanReachUnknownCallee = true; 9516 return Change; 9517 } 9518 } 9519 9520 for (Function *Fn : make_early_inc_range(Unreachable)) { 9521 if (checkIfReachable(A, AA, AAEdgesList, Fn)) { 9522 Change = ChangeStatus::CHANGED; 9523 markReachable(Fn); 9524 } 9525 } 9526 return Change; 9527 } 9528 9529 bool isReachable(Attributor &A, const AAFunctionReachability &AA, 9530 ArrayRef<const AACallEdges *> AAEdgesList, Function *Fn) { 9531 // Assume that we can reach the function. 9532 // TODO: Be more specific with the unknown callee. 9533 if (CanReachUnknownCallee) 9534 return true; 9535 9536 if (Reachable.count(Fn)) 9537 return true; 9538 9539 if (Unreachable.count(Fn)) 9540 return false; 9541 9542 // We need to assume that this function can't reach Fn to prevent 9543 // an infinite loop if this function is recursive. 9544 Unreachable.insert(Fn); 9545 9546 bool Result = checkIfReachable(A, AA, AAEdgesList, Fn); 9547 if (Result) 9548 markReachable(Fn); 9549 return Result; 9550 } 9551 9552 bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA, 9553 ArrayRef<const AACallEdges *> AAEdgesList, 9554 Function *Fn) const { 9555 9556 // Handle the most trivial case first. 9557 for (auto *AAEdges : AAEdgesList) { 9558 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9559 9560 if (Edges.count(Fn)) 9561 return true; 9562 } 9563 9564 SmallVector<const AAFunctionReachability *, 8> Deps; 9565 for (auto &AAEdges : AAEdgesList) { 9566 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9567 9568 for (Function *Edge : Edges) { 9569 // We don't need a dependency if the result is reachable. 9570 const AAFunctionReachability &EdgeReachability = 9571 A.getAAFor<AAFunctionReachability>( 9572 AA, IRPosition::function(*Edge), DepClassTy::NONE); 9573 Deps.push_back(&EdgeReachability); 9574 9575 if (EdgeReachability.canReach(A, Fn)) 9576 return true; 9577 } 9578 } 9579 9580 // The result is false for now, set dependencies and leave. 9581 for (auto Dep : Deps) 9582 A.recordDependence(AA, *Dep, DepClassTy::REQUIRED); 9583 9584 return false; 9585 } 9586 9587 /// Set of functions that we know for sure is reachable. 9588 DenseSet<Function *> Reachable; 9589 9590 /// Set of functions that are unreachable, but might become reachable. 9591 DenseSet<Function *> Unreachable; 9592 9593 /// If we can reach a function with a call to a unknown function we assume 9594 /// that we can reach any function. 9595 bool CanReachUnknownCallee = false; 9596 }; 9597 9598 public: 9599 AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A) 9600 : AAFunctionReachability(IRP, A) {} 9601 9602 bool canReach(Attributor &A, Function *Fn) const override { 9603 const AACallEdges &AAEdges = 9604 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9605 9606 // Attributor returns attributes as const, so this function has to be 9607 // const for users of this attribute to use it without having to do 9608 // a const_cast. 9609 // This is a hack for us to be able to cache queries. 9610 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9611 bool Result = 9612 NonConstThis->WholeFunction.isReachable(A, *this, {&AAEdges}, Fn); 9613 9614 return Result; 9615 } 9616 9617 /// Can \p CB reach \p Fn 9618 bool canReach(Attributor &A, CallBase &CB, Function *Fn) const override { 9619 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9620 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9621 9622 // Attributor returns attributes as const, so this function has to be 9623 // const for users of this attribute to use it without having to do 9624 // a const_cast. 9625 // This is a hack for us to be able to cache queries. 9626 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9627 QuerySet &CBQuery = NonConstThis->CBQueries[&CB]; 9628 9629 bool Result = CBQuery.isReachable(A, *this, {&AAEdges}, Fn); 9630 9631 return Result; 9632 } 9633 9634 /// See AbstractAttribute::updateImpl(...). 9635 ChangeStatus updateImpl(Attributor &A) override { 9636 const AACallEdges &AAEdges = 9637 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9638 ChangeStatus Change = ChangeStatus::UNCHANGED; 9639 9640 Change |= WholeFunction.update(A, *this, {&AAEdges}); 9641 9642 for (auto CBPair : CBQueries) { 9643 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9644 *this, IRPosition::callsite_function(*CBPair.first), 9645 DepClassTy::REQUIRED); 9646 9647 Change |= CBPair.second.update(A, *this, {&AAEdges}); 9648 } 9649 9650 return Change; 9651 } 9652 9653 const std::string getAsStr() const override { 9654 size_t QueryCount = 9655 WholeFunction.Reachable.size() + WholeFunction.Unreachable.size(); 9656 9657 return "FunctionReachability [" + 9658 std::to_string(WholeFunction.Reachable.size()) + "," + 9659 std::to_string(QueryCount) + "]"; 9660 } 9661 9662 void trackStatistics() const override {} 9663 9664 private: 9665 bool canReachUnknownCallee() const override { 9666 return WholeFunction.CanReachUnknownCallee; 9667 } 9668 9669 /// Used to answer if a the whole function can reacha a specific function. 9670 QuerySet WholeFunction; 9671 9672 /// Used to answer if a call base inside this function can reach a specific 9673 /// function. 9674 DenseMap<CallBase *, QuerySet> CBQueries; 9675 }; 9676 9677 /// ---------------------- Assumption Propagation ------------------------------ 9678 struct AAAssumptionInfoImpl : public AAAssumptionInfo { 9679 AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A, 9680 const DenseSet<StringRef> &Known) 9681 : AAAssumptionInfo(IRP, A, Known) {} 9682 9683 bool hasAssumption(const StringRef Assumption) const override { 9684 return isValidState() && setContains(Assumption); 9685 } 9686 9687 /// See AbstractAttribute::getAsStr() 9688 const std::string getAsStr() const override { 9689 const SetContents &Known = getKnown(); 9690 const SetContents &Assumed = getAssumed(); 9691 9692 const std::string KnownStr = 9693 llvm::join(Known.getSet().begin(), Known.getSet().end(), ","); 9694 const std::string AssumedStr = 9695 (Assumed.isUniversal()) 9696 ? "Universal" 9697 : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ","); 9698 9699 return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]"; 9700 } 9701 }; 9702 9703 /// Propagates assumption information from parent functions to all of their 9704 /// successors. An assumption can be propagated if the containing function 9705 /// dominates the called function. 9706 /// 9707 /// We start with a "known" set of assumptions already valid for the associated 9708 /// function and an "assumed" set that initially contains all possible 9709 /// assumptions. The assumed set is inter-procedurally updated by narrowing its 9710 /// contents as concrete values are known. The concrete values are seeded by the 9711 /// first nodes that are either entries into the call graph, or contains no 9712 /// assumptions. Each node is updated as the intersection of the assumed state 9713 /// with all of its predecessors. 9714 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl { 9715 AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A) 9716 : AAAssumptionInfoImpl(IRP, A, 9717 getAssumptions(*IRP.getAssociatedFunction())) {} 9718 9719 /// See AbstractAttribute::manifest(...). 9720 ChangeStatus manifest(Attributor &A) override { 9721 const auto &Assumptions = getKnown(); 9722 9723 // Don't manifest a universal set if it somehow made it here. 9724 if (Assumptions.isUniversal()) 9725 return ChangeStatus::UNCHANGED; 9726 9727 Function *AssociatedFunction = getAssociatedFunction(); 9728 9729 bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet()); 9730 9731 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9732 } 9733 9734 /// See AbstractAttribute::updateImpl(...). 9735 ChangeStatus updateImpl(Attributor &A) override { 9736 bool Changed = false; 9737 9738 auto CallSitePred = [&](AbstractCallSite ACS) { 9739 const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>( 9740 *this, IRPosition::callsite_function(*ACS.getInstruction()), 9741 DepClassTy::REQUIRED); 9742 // Get the set of assumptions shared by all of this function's callers. 9743 Changed |= getIntersection(AssumptionAA.getAssumed()); 9744 return !getAssumed().empty() || !getKnown().empty(); 9745 }; 9746 9747 bool AllCallSitesKnown; 9748 // Get the intersection of all assumptions held by this node's predecessors. 9749 // If we don't know all the call sites then this is either an entry into the 9750 // call graph or an empty node. This node is known to only contain its own 9751 // assumptions and can be propagated to its successors. 9752 if (!A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) 9753 return indicatePessimisticFixpoint(); 9754 9755 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9756 } 9757 9758 void trackStatistics() const override {} 9759 }; 9760 9761 /// Assumption Info defined for call sites. 9762 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl { 9763 9764 AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A) 9765 : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {} 9766 9767 /// See AbstractAttribute::initialize(...). 9768 void initialize(Attributor &A) override { 9769 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 9770 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 9771 } 9772 9773 /// See AbstractAttribute::manifest(...). 9774 ChangeStatus manifest(Attributor &A) override { 9775 // Don't manifest a universal set if it somehow made it here. 9776 if (getKnown().isUniversal()) 9777 return ChangeStatus::UNCHANGED; 9778 9779 CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue()); 9780 bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet()); 9781 9782 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9783 } 9784 9785 /// See AbstractAttribute::updateImpl(...). 9786 ChangeStatus updateImpl(Attributor &A) override { 9787 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 9788 auto &AssumptionAA = 9789 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 9790 bool Changed = getIntersection(AssumptionAA.getAssumed()); 9791 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9792 } 9793 9794 /// See AbstractAttribute::trackStatistics() 9795 void trackStatistics() const override {} 9796 9797 private: 9798 /// Helper to initialized the known set as all the assumptions this call and 9799 /// the callee contain. 9800 DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) { 9801 const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue()); 9802 auto Assumptions = getAssumptions(CB); 9803 if (Function *F = IRP.getAssociatedFunction()) 9804 set_union(Assumptions, getAssumptions(*F)); 9805 if (Function *F = IRP.getAssociatedFunction()) 9806 set_union(Assumptions, getAssumptions(*F)); 9807 return Assumptions; 9808 } 9809 }; 9810 9811 } // namespace 9812 9813 AACallGraphNode *AACallEdgeIterator::operator*() const { 9814 return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>( 9815 &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I)))); 9816 } 9817 9818 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); } 9819 9820 const char AAReturnedValues::ID = 0; 9821 const char AANoUnwind::ID = 0; 9822 const char AANoSync::ID = 0; 9823 const char AANoFree::ID = 0; 9824 const char AANonNull::ID = 0; 9825 const char AANoRecurse::ID = 0; 9826 const char AAWillReturn::ID = 0; 9827 const char AAUndefinedBehavior::ID = 0; 9828 const char AANoAlias::ID = 0; 9829 const char AAReachability::ID = 0; 9830 const char AANoReturn::ID = 0; 9831 const char AAIsDead::ID = 0; 9832 const char AADereferenceable::ID = 0; 9833 const char AAAlign::ID = 0; 9834 const char AANoCapture::ID = 0; 9835 const char AAValueSimplify::ID = 0; 9836 const char AAHeapToStack::ID = 0; 9837 const char AAPrivatizablePtr::ID = 0; 9838 const char AAMemoryBehavior::ID = 0; 9839 const char AAMemoryLocation::ID = 0; 9840 const char AAValueConstantRange::ID = 0; 9841 const char AAPotentialValues::ID = 0; 9842 const char AANoUndef::ID = 0; 9843 const char AACallEdges::ID = 0; 9844 const char AAFunctionReachability::ID = 0; 9845 const char AAPointerInfo::ID = 0; 9846 const char AAAssumptionInfo::ID = 0; 9847 9848 // Macro magic to create the static generator function for attributes that 9849 // follow the naming scheme. 9850 9851 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ 9852 case IRPosition::PK: \ 9853 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); 9854 9855 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ 9856 case IRPosition::PK: \ 9857 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ 9858 ++NumAAs; \ 9859 break; 9860 9861 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9862 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9863 CLASS *AA = nullptr; \ 9864 switch (IRP.getPositionKind()) { \ 9865 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9866 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 9867 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 9868 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 9869 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 9870 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 9871 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 9872 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 9873 } \ 9874 return *AA; \ 9875 } 9876 9877 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9878 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9879 CLASS *AA = nullptr; \ 9880 switch (IRP.getPositionKind()) { \ 9881 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9882 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ 9883 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 9884 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 9885 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 9886 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 9887 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 9888 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 9889 } \ 9890 return *AA; \ 9891 } 9892 9893 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9894 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9895 CLASS *AA = nullptr; \ 9896 switch (IRP.getPositionKind()) { \ 9897 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9898 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 9899 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 9900 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 9901 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 9902 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 9903 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 9904 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 9905 } \ 9906 return *AA; \ 9907 } 9908 9909 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9910 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9911 CLASS *AA = nullptr; \ 9912 switch (IRP.getPositionKind()) { \ 9913 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9914 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 9915 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 9916 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 9917 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 9918 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 9919 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 9920 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 9921 } \ 9922 return *AA; \ 9923 } 9924 9925 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9926 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9927 CLASS *AA = nullptr; \ 9928 switch (IRP.getPositionKind()) { \ 9929 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9930 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 9931 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 9932 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 9933 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 9934 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 9935 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 9936 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 9937 } \ 9938 return *AA; \ 9939 } 9940 9941 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) 9942 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) 9943 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) 9944 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) 9945 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) 9946 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) 9947 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) 9948 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges) 9949 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo) 9950 9951 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) 9952 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) 9953 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) 9954 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) 9955 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) 9956 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) 9957 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) 9958 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues) 9959 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef) 9960 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo) 9961 9962 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) 9963 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) 9964 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) 9965 9966 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) 9967 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability) 9968 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) 9969 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability) 9970 9971 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) 9972 9973 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION 9974 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION 9975 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION 9976 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION 9977 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION 9978 #undef SWITCH_PK_CREATE 9979 #undef SWITCH_PK_INV 9980