1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // See the Attributor.h file comment and the class descriptions in that file for 10 // more information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/IPO/Attributor.h" 15 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/SCCIterator.h" 18 #include "llvm/ADT/SetOperations.h" 19 #include "llvm/ADT/SmallPtrSet.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumeBundleQueries.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/LazyValueInfo.h" 27 #include "llvm/Analysis/MemoryBuiltins.h" 28 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 29 #include "llvm/Analysis/ScalarEvolution.h" 30 #include "llvm/Analysis/TargetTransformInfo.h" 31 #include "llvm/Analysis/ValueTracking.h" 32 #include "llvm/IR/Assumptions.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/IRBuilder.h" 35 #include "llvm/IR/Instruction.h" 36 #include "llvm/IR/Instructions.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/IR/NoFolder.h" 39 #include "llvm/Support/Alignment.h" 40 #include "llvm/Support/Casting.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/FileSystem.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Transforms/IPO/ArgumentPromotion.h" 46 #include "llvm/Transforms/Utils/Local.h" 47 #include <cassert> 48 49 using namespace llvm; 50 51 #define DEBUG_TYPE "attributor" 52 53 static cl::opt<bool> ManifestInternal( 54 "attributor-manifest-internal", cl::Hidden, 55 cl::desc("Manifest Attributor internal string attributes."), 56 cl::init(false)); 57 58 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), 59 cl::Hidden); 60 61 template <> 62 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0; 63 64 static cl::opt<unsigned, true> MaxPotentialValues( 65 "attributor-max-potential-values", cl::Hidden, 66 cl::desc("Maximum number of potential values to be " 67 "tracked for each position."), 68 cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues), 69 cl::init(7)); 70 71 STATISTIC(NumAAs, "Number of abstract attributes created"); 72 73 // Some helper macros to deal with statistics tracking. 74 // 75 // Usage: 76 // For simple IR attribute tracking overload trackStatistics in the abstract 77 // attribute and choose the right STATS_DECLTRACK_********* macro, 78 // e.g.,: 79 // void trackStatistics() const override { 80 // STATS_DECLTRACK_ARG_ATTR(returned) 81 // } 82 // If there is a single "increment" side one can use the macro 83 // STATS_DECLTRACK with a custom message. If there are multiple increment 84 // sides, STATS_DECL and STATS_TRACK can also be used separately. 85 // 86 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ 87 ("Number of " #TYPE " marked '" #NAME "'") 88 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME 89 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); 90 #define STATS_DECL(NAME, TYPE, MSG) \ 91 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); 92 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); 93 #define STATS_DECLTRACK(NAME, TYPE, MSG) \ 94 { \ 95 STATS_DECL(NAME, TYPE, MSG) \ 96 STATS_TRACK(NAME, TYPE) \ 97 } 98 #define STATS_DECLTRACK_ARG_ATTR(NAME) \ 99 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) 100 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ 101 STATS_DECLTRACK(NAME, CSArguments, \ 102 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) 103 #define STATS_DECLTRACK_FN_ATTR(NAME) \ 104 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) 105 #define STATS_DECLTRACK_CS_ATTR(NAME) \ 106 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) 107 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ 108 STATS_DECLTRACK(NAME, FunctionReturn, \ 109 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) 110 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ 111 STATS_DECLTRACK(NAME, CSReturn, \ 112 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) 113 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ 114 STATS_DECLTRACK(NAME, Floating, \ 115 ("Number of floating values known to be '" #NAME "'")) 116 117 // Specialization of the operator<< for abstract attributes subclasses. This 118 // disambiguates situations where multiple operators are applicable. 119 namespace llvm { 120 #define PIPE_OPERATOR(CLASS) \ 121 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ 122 return OS << static_cast<const AbstractAttribute &>(AA); \ 123 } 124 125 PIPE_OPERATOR(AAIsDead) 126 PIPE_OPERATOR(AANoUnwind) 127 PIPE_OPERATOR(AANoSync) 128 PIPE_OPERATOR(AANoRecurse) 129 PIPE_OPERATOR(AAWillReturn) 130 PIPE_OPERATOR(AANoReturn) 131 PIPE_OPERATOR(AAReturnedValues) 132 PIPE_OPERATOR(AANonNull) 133 PIPE_OPERATOR(AANoAlias) 134 PIPE_OPERATOR(AADereferenceable) 135 PIPE_OPERATOR(AAAlign) 136 PIPE_OPERATOR(AANoCapture) 137 PIPE_OPERATOR(AAValueSimplify) 138 PIPE_OPERATOR(AANoFree) 139 PIPE_OPERATOR(AAHeapToStack) 140 PIPE_OPERATOR(AAReachability) 141 PIPE_OPERATOR(AAMemoryBehavior) 142 PIPE_OPERATOR(AAMemoryLocation) 143 PIPE_OPERATOR(AAValueConstantRange) 144 PIPE_OPERATOR(AAPrivatizablePtr) 145 PIPE_OPERATOR(AAUndefinedBehavior) 146 PIPE_OPERATOR(AAPotentialValues) 147 PIPE_OPERATOR(AANoUndef) 148 PIPE_OPERATOR(AACallEdges) 149 PIPE_OPERATOR(AAFunctionReachability) 150 PIPE_OPERATOR(AAPointerInfo) 151 PIPE_OPERATOR(AAAssumptionInfo) 152 153 #undef PIPE_OPERATOR 154 155 template <> 156 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, 157 const DerefState &R) { 158 ChangeStatus CS0 = 159 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); 160 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); 161 return CS0 | CS1; 162 } 163 164 } // namespace llvm 165 166 /// Get pointer operand of memory accessing instruction. If \p I is 167 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, 168 /// is set to false and the instruction is volatile, return nullptr. 169 static const Value *getPointerOperand(const Instruction *I, 170 bool AllowVolatile) { 171 if (!AllowVolatile && I->isVolatile()) 172 return nullptr; 173 174 if (auto *LI = dyn_cast<LoadInst>(I)) { 175 return LI->getPointerOperand(); 176 } 177 178 if (auto *SI = dyn_cast<StoreInst>(I)) { 179 return SI->getPointerOperand(); 180 } 181 182 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { 183 return CXI->getPointerOperand(); 184 } 185 186 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { 187 return RMWI->getPointerOperand(); 188 } 189 190 return nullptr; 191 } 192 193 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and 194 /// advanced by \p Offset bytes. To aid later analysis the method tries to build 195 /// getelement pointer instructions that traverse the natural type of \p Ptr if 196 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence 197 /// through a cast to i8*. 198 /// 199 /// TODO: This could probably live somewhere more prominantly if it doesn't 200 /// already exist. 201 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr, 202 int64_t Offset, IRBuilder<NoFolder> &IRB, 203 const DataLayout &DL) { 204 assert(Offset >= 0 && "Negative offset not supported yet!"); 205 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset 206 << "-bytes as " << *ResTy << "\n"); 207 208 if (Offset) { 209 Type *Ty = PtrElemTy; 210 APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset); 211 SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset); 212 213 SmallVector<Value *, 4> ValIndices; 214 std::string GEPName = Ptr->getName().str(); 215 for (const APInt &Index : IntIndices) { 216 ValIndices.push_back(IRB.getInt(Index)); 217 GEPName += "." + std::to_string(Index.getZExtValue()); 218 } 219 220 // Create a GEP for the indices collected above. 221 Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName); 222 223 // If an offset is left we use byte-wise adjustment. 224 if (IntOffset != 0) { 225 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); 226 Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset), 227 GEPName + ".b" + Twine(IntOffset.getZExtValue())); 228 } 229 } 230 231 // Ensure the result has the requested type. 232 Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast"); 233 234 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n"); 235 return Ptr; 236 } 237 238 /// Recursively visit all values that might become \p IRP at some point. This 239 /// will be done by looking through cast instructions, selects, phis, and calls 240 /// with the "returned" attribute. Once we cannot look through the value any 241 /// further, the callback \p VisitValueCB is invoked and passed the current 242 /// value, the \p State, and a flag to indicate if we stripped anything. 243 /// Stripped means that we unpacked the value associated with \p IRP at least 244 /// once. Note that the value used for the callback may still be the value 245 /// associated with \p IRP (due to PHIs). To limit how much effort is invested, 246 /// we will never visit more values than specified by \p MaxValues. 247 template <typename StateTy> 248 static bool genericValueTraversal( 249 Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA, 250 StateTy &State, 251 function_ref<bool(Value &, const Instruction *, StateTy &, bool)> 252 VisitValueCB, 253 const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16, 254 function_ref<Value *(Value *)> StripCB = nullptr) { 255 256 const AAIsDead *LivenessAA = nullptr; 257 if (IRP.getAnchorScope()) 258 LivenessAA = &A.getAAFor<AAIsDead>( 259 QueryingAA, 260 IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()), 261 DepClassTy::NONE); 262 bool AnyDead = false; 263 264 Value *InitialV = &IRP.getAssociatedValue(); 265 using Item = std::pair<Value *, const Instruction *>; 266 SmallSet<Item, 16> Visited; 267 SmallVector<Item, 16> Worklist; 268 Worklist.push_back({InitialV, CtxI}); 269 270 int Iteration = 0; 271 do { 272 Item I = Worklist.pop_back_val(); 273 Value *V = I.first; 274 CtxI = I.second; 275 if (StripCB) 276 V = StripCB(V); 277 278 // Check if we should process the current value. To prevent endless 279 // recursion keep a record of the values we followed! 280 if (!Visited.insert(I).second) 281 continue; 282 283 // Make sure we limit the compile time for complex expressions. 284 if (Iteration++ >= MaxValues) 285 return false; 286 287 // Explicitly look through calls with a "returned" attribute if we do 288 // not have a pointer as stripPointerCasts only works on them. 289 Value *NewV = nullptr; 290 if (V->getType()->isPointerTy()) { 291 NewV = V->stripPointerCasts(); 292 } else { 293 auto *CB = dyn_cast<CallBase>(V); 294 if (CB && CB->getCalledFunction()) { 295 for (Argument &Arg : CB->getCalledFunction()->args()) 296 if (Arg.hasReturnedAttr()) { 297 NewV = CB->getArgOperand(Arg.getArgNo()); 298 break; 299 } 300 } 301 } 302 if (NewV && NewV != V) { 303 Worklist.push_back({NewV, CtxI}); 304 continue; 305 } 306 307 // Look through select instructions, visit assumed potential values. 308 if (auto *SI = dyn_cast<SelectInst>(V)) { 309 bool UsedAssumedInformation = false; 310 Optional<Constant *> C = A.getAssumedConstant( 311 *SI->getCondition(), QueryingAA, UsedAssumedInformation); 312 bool NoValueYet = !C.hasValue(); 313 if (NoValueYet || isa_and_nonnull<UndefValue>(*C)) 314 continue; 315 if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) { 316 if (CI->isZero()) 317 Worklist.push_back({SI->getFalseValue(), CtxI}); 318 else 319 Worklist.push_back({SI->getTrueValue(), CtxI}); 320 continue; 321 } 322 // We could not simplify the condition, assume both values.( 323 Worklist.push_back({SI->getTrueValue(), CtxI}); 324 Worklist.push_back({SI->getFalseValue(), CtxI}); 325 continue; 326 } 327 328 // Look through phi nodes, visit all live operands. 329 if (auto *PHI = dyn_cast<PHINode>(V)) { 330 assert(LivenessAA && 331 "Expected liveness in the presence of instructions!"); 332 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 333 BasicBlock *IncomingBB = PHI->getIncomingBlock(u); 334 bool UsedAssumedInformation = false; 335 if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA, 336 LivenessAA, UsedAssumedInformation, 337 /* CheckBBLivenessOnly */ true)) { 338 AnyDead = true; 339 continue; 340 } 341 Worklist.push_back( 342 {PHI->getIncomingValue(u), IncomingBB->getTerminator()}); 343 } 344 continue; 345 } 346 347 if (UseValueSimplify && !isa<Constant>(V)) { 348 bool UsedAssumedInformation = false; 349 Optional<Value *> SimpleV = 350 A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation); 351 if (!SimpleV.hasValue()) 352 continue; 353 if (!SimpleV.getValue()) 354 return false; 355 Value *NewV = SimpleV.getValue(); 356 if (NewV != V) { 357 Worklist.push_back({NewV, CtxI}); 358 continue; 359 } 360 } 361 362 // Once a leaf is reached we inform the user through the callback. 363 if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) 364 return false; 365 } while (!Worklist.empty()); 366 367 // If we actually used liveness information so we have to record a dependence. 368 if (AnyDead) 369 A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL); 370 371 // All values have been visited. 372 return true; 373 } 374 375 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr, 376 SmallVectorImpl<Value *> &Objects, 377 const AbstractAttribute &QueryingAA, 378 const Instruction *CtxI) { 379 auto StripCB = [&](Value *V) { return getUnderlyingObject(V); }; 380 SmallPtrSet<Value *, 8> SeenObjects; 381 auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *, 382 SmallVectorImpl<Value *> &Objects, 383 bool) -> bool { 384 if (SeenObjects.insert(&Val).second) 385 Objects.push_back(&Val); 386 return true; 387 }; 388 if (!genericValueTraversal<decltype(Objects)>( 389 A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI, 390 true, 32, StripCB)) 391 return false; 392 return true; 393 } 394 395 const Value *stripAndAccumulateMinimalOffsets( 396 Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val, 397 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, 398 bool UseAssumed = false) { 399 400 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { 401 const IRPosition &Pos = IRPosition::value(V); 402 // Only track dependence if we are going to use the assumed info. 403 const AAValueConstantRange &ValueConstantRangeAA = 404 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, 405 UseAssumed ? DepClassTy::OPTIONAL 406 : DepClassTy::NONE); 407 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() 408 : ValueConstantRangeAA.getKnown(); 409 // We can only use the lower part of the range because the upper part can 410 // be higher than what the value can really be. 411 ROffset = Range.getSignedMin(); 412 return true; 413 }; 414 415 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, 416 /* AllowInvariant */ false, 417 AttributorAnalysis); 418 } 419 420 static const Value * 421 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA, 422 const Value *Ptr, int64_t &BytesOffset, 423 const DataLayout &DL, bool AllowNonInbounds = false) { 424 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 425 const Value *Base = stripAndAccumulateMinimalOffsets( 426 A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds); 427 428 BytesOffset = OffsetAPInt.getSExtValue(); 429 return Base; 430 } 431 432 /// Clamp the information known for all returned values of a function 433 /// (identified by \p QueryingAA) into \p S. 434 template <typename AAType, typename StateType = typename AAType::StateType> 435 static void clampReturnedValueStates( 436 Attributor &A, const AAType &QueryingAA, StateType &S, 437 const IRPosition::CallBaseContext *CBContext = nullptr) { 438 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " 439 << QueryingAA << " into " << S << "\n"); 440 441 assert((QueryingAA.getIRPosition().getPositionKind() == 442 IRPosition::IRP_RETURNED || 443 QueryingAA.getIRPosition().getPositionKind() == 444 IRPosition::IRP_CALL_SITE_RETURNED) && 445 "Can only clamp returned value states for a function returned or call " 446 "site returned position!"); 447 448 // Use an optional state as there might not be any return values and we want 449 // to join (IntegerState::operator&) the state of all there are. 450 Optional<StateType> T; 451 452 // Callback for each possibly returned value. 453 auto CheckReturnValue = [&](Value &RV) -> bool { 454 const IRPosition &RVPos = IRPosition::value(RV, CBContext); 455 const AAType &AA = 456 A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED); 457 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() 458 << " @ " << RVPos << "\n"); 459 const StateType &AAS = AA.getState(); 460 if (T.hasValue()) 461 *T &= AAS; 462 else 463 T = AAS; 464 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T 465 << "\n"); 466 return T->isValidState(); 467 }; 468 469 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) 470 S.indicatePessimisticFixpoint(); 471 else if (T.hasValue()) 472 S ^= *T; 473 } 474 475 namespace { 476 /// Helper class for generic deduction: return value -> returned position. 477 template <typename AAType, typename BaseType, 478 typename StateType = typename BaseType::StateType, 479 bool PropagateCallBaseContext = false> 480 struct AAReturnedFromReturnedValues : public BaseType { 481 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) 482 : BaseType(IRP, A) {} 483 484 /// See AbstractAttribute::updateImpl(...). 485 ChangeStatus updateImpl(Attributor &A) override { 486 StateType S(StateType::getBestState(this->getState())); 487 clampReturnedValueStates<AAType, StateType>( 488 A, *this, S, 489 PropagateCallBaseContext ? this->getCallBaseContext() : nullptr); 490 // TODO: If we know we visited all returned values, thus no are assumed 491 // dead, we can take the known information from the state T. 492 return clampStateAndIndicateChange<StateType>(this->getState(), S); 493 } 494 }; 495 496 /// Clamp the information known at all call sites for a given argument 497 /// (identified by \p QueryingAA) into \p S. 498 template <typename AAType, typename StateType = typename AAType::StateType> 499 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, 500 StateType &S) { 501 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " 502 << QueryingAA << " into " << S << "\n"); 503 504 assert(QueryingAA.getIRPosition().getPositionKind() == 505 IRPosition::IRP_ARGUMENT && 506 "Can only clamp call site argument states for an argument position!"); 507 508 // Use an optional state as there might not be any return values and we want 509 // to join (IntegerState::operator&) the state of all there are. 510 Optional<StateType> T; 511 512 // The argument number which is also the call site argument number. 513 unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo(); 514 515 auto CallSiteCheck = [&](AbstractCallSite ACS) { 516 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 517 // Check if a coresponding argument was found or if it is on not associated 518 // (which can happen for callback calls). 519 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 520 return false; 521 522 const AAType &AA = 523 A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED); 524 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() 525 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n"); 526 const StateType &AAS = AA.getState(); 527 if (T.hasValue()) 528 *T &= AAS; 529 else 530 T = AAS; 531 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T 532 << "\n"); 533 return T->isValidState(); 534 }; 535 536 bool AllCallSitesKnown; 537 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, 538 AllCallSitesKnown)) 539 S.indicatePessimisticFixpoint(); 540 else if (T.hasValue()) 541 S ^= *T; 542 } 543 544 /// This function is the bridge between argument position and the call base 545 /// context. 546 template <typename AAType, typename BaseType, 547 typename StateType = typename AAType::StateType> 548 bool getArgumentStateFromCallBaseContext(Attributor &A, 549 BaseType &QueryingAttribute, 550 IRPosition &Pos, StateType &State) { 551 assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && 552 "Expected an 'argument' position !"); 553 const CallBase *CBContext = Pos.getCallBaseContext(); 554 if (!CBContext) 555 return false; 556 557 int ArgNo = Pos.getCallSiteArgNo(); 558 assert(ArgNo >= 0 && "Invalid Arg No!"); 559 560 const auto &AA = A.getAAFor<AAType>( 561 QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo), 562 DepClassTy::REQUIRED); 563 const StateType &CBArgumentState = 564 static_cast<const StateType &>(AA.getState()); 565 566 LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument" 567 << "Position:" << Pos << "CB Arg state:" << CBArgumentState 568 << "\n"); 569 570 // NOTE: If we want to do call site grouping it should happen here. 571 State ^= CBArgumentState; 572 return true; 573 } 574 575 /// Helper class for generic deduction: call site argument -> argument position. 576 template <typename AAType, typename BaseType, 577 typename StateType = typename AAType::StateType, 578 bool BridgeCallBaseContext = false> 579 struct AAArgumentFromCallSiteArguments : public BaseType { 580 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) 581 : BaseType(IRP, A) {} 582 583 /// See AbstractAttribute::updateImpl(...). 584 ChangeStatus updateImpl(Attributor &A) override { 585 StateType S = StateType::getBestState(this->getState()); 586 587 if (BridgeCallBaseContext) { 588 bool Success = 589 getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>( 590 A, *this, this->getIRPosition(), S); 591 if (Success) 592 return clampStateAndIndicateChange<StateType>(this->getState(), S); 593 } 594 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); 595 596 // TODO: If we know we visited all incoming values, thus no are assumed 597 // dead, we can take the known information from the state T. 598 return clampStateAndIndicateChange<StateType>(this->getState(), S); 599 } 600 }; 601 602 /// Helper class for generic replication: function returned -> cs returned. 603 template <typename AAType, typename BaseType, 604 typename StateType = typename BaseType::StateType, 605 bool IntroduceCallBaseContext = false> 606 struct AACallSiteReturnedFromReturned : public BaseType { 607 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) 608 : BaseType(IRP, A) {} 609 610 /// See AbstractAttribute::updateImpl(...). 611 ChangeStatus updateImpl(Attributor &A) override { 612 assert(this->getIRPosition().getPositionKind() == 613 IRPosition::IRP_CALL_SITE_RETURNED && 614 "Can only wrap function returned positions for call site returned " 615 "positions!"); 616 auto &S = this->getState(); 617 618 const Function *AssociatedFunction = 619 this->getIRPosition().getAssociatedFunction(); 620 if (!AssociatedFunction) 621 return S.indicatePessimisticFixpoint(); 622 623 CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue()); 624 if (IntroduceCallBaseContext) 625 LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:" 626 << CBContext << "\n"); 627 628 IRPosition FnPos = IRPosition::returned( 629 *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr); 630 const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED); 631 return clampStateAndIndicateChange(S, AA.getState()); 632 } 633 }; 634 } // namespace 635 636 /// Helper function to accumulate uses. 637 template <class AAType, typename StateType = typename AAType::StateType> 638 static void followUsesInContext(AAType &AA, Attributor &A, 639 MustBeExecutedContextExplorer &Explorer, 640 const Instruction *CtxI, 641 SetVector<const Use *> &Uses, 642 StateType &State) { 643 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); 644 for (unsigned u = 0; u < Uses.size(); ++u) { 645 const Use *U = Uses[u]; 646 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { 647 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); 648 if (Found && AA.followUseInMBEC(A, U, UserI, State)) 649 for (const Use &Us : UserI->uses()) 650 Uses.insert(&Us); 651 } 652 } 653 } 654 655 /// Use the must-be-executed-context around \p I to add information into \p S. 656 /// The AAType class is required to have `followUseInMBEC` method with the 657 /// following signature and behaviour: 658 /// 659 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) 660 /// U - Underlying use. 661 /// I - The user of the \p U. 662 /// Returns true if the value should be tracked transitively. 663 /// 664 template <class AAType, typename StateType = typename AAType::StateType> 665 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, 666 Instruction &CtxI) { 667 668 // Container for (transitive) uses of the associated value. 669 SetVector<const Use *> Uses; 670 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) 671 Uses.insert(&U); 672 673 MustBeExecutedContextExplorer &Explorer = 674 A.getInfoCache().getMustBeExecutedContextExplorer(); 675 676 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); 677 678 if (S.isAtFixpoint()) 679 return; 680 681 SmallVector<const BranchInst *, 4> BrInsts; 682 auto Pred = [&](const Instruction *I) { 683 if (const BranchInst *Br = dyn_cast<BranchInst>(I)) 684 if (Br->isConditional()) 685 BrInsts.push_back(Br); 686 return true; 687 }; 688 689 // Here, accumulate conditional branch instructions in the context. We 690 // explore the child paths and collect the known states. The disjunction of 691 // those states can be merged to its own state. Let ParentState_i be a state 692 // to indicate the known information for an i-th branch instruction in the 693 // context. ChildStates are created for its successors respectively. 694 // 695 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} 696 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} 697 // ... 698 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} 699 // 700 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m 701 // 702 // FIXME: Currently, recursive branches are not handled. For example, we 703 // can't deduce that ptr must be dereferenced in below function. 704 // 705 // void f(int a, int c, int *ptr) { 706 // if(a) 707 // if (b) { 708 // *ptr = 0; 709 // } else { 710 // *ptr = 1; 711 // } 712 // else { 713 // if (b) { 714 // *ptr = 0; 715 // } else { 716 // *ptr = 1; 717 // } 718 // } 719 // } 720 721 Explorer.checkForAllContext(&CtxI, Pred); 722 for (const BranchInst *Br : BrInsts) { 723 StateType ParentState; 724 725 // The known state of the parent state is a conjunction of children's 726 // known states so it is initialized with a best state. 727 ParentState.indicateOptimisticFixpoint(); 728 729 for (const BasicBlock *BB : Br->successors()) { 730 StateType ChildState; 731 732 size_t BeforeSize = Uses.size(); 733 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); 734 735 // Erase uses which only appear in the child. 736 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) 737 It = Uses.erase(It); 738 739 ParentState &= ChildState; 740 } 741 742 // Use only known state. 743 S += ParentState; 744 } 745 } 746 747 /// ------------------------ PointerInfo --------------------------------------- 748 749 namespace llvm { 750 namespace AA { 751 namespace PointerInfo { 752 753 /// An access kind description as used by AAPointerInfo. 754 struct OffsetAndSize; 755 756 struct State; 757 758 } // namespace PointerInfo 759 } // namespace AA 760 761 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage. 762 template <> 763 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> { 764 using Access = AAPointerInfo::Access; 765 static inline Access getEmptyKey(); 766 static inline Access getTombstoneKey(); 767 static unsigned getHashValue(const Access &A); 768 static bool isEqual(const Access &LHS, const Access &RHS); 769 }; 770 771 /// Helper that allows OffsetAndSize as a key in a DenseMap. 772 template <> 773 struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize> 774 : DenseMapInfo<std::pair<int64_t, int64_t>> {}; 775 776 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign 777 /// but the instruction 778 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> { 779 using Base = DenseMapInfo<Instruction *>; 780 using Access = AAPointerInfo::Access; 781 static inline Access getEmptyKey(); 782 static inline Access getTombstoneKey(); 783 static unsigned getHashValue(const Access &A); 784 static bool isEqual(const Access &LHS, const Access &RHS); 785 }; 786 787 } // namespace llvm 788 789 /// Helper to represent an access offset and size, with logic to deal with 790 /// uncertainty and check for overlapping accesses. 791 struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> { 792 using BaseTy = std::pair<int64_t, int64_t>; 793 OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {} 794 OffsetAndSize(const BaseTy &P) : BaseTy(P) {} 795 int64_t getOffset() const { return first; } 796 int64_t getSize() const { return second; } 797 static OffsetAndSize getUnknown() { return OffsetAndSize(Unknown, Unknown); } 798 799 /// Return true if offset or size are unknown. 800 bool offsetOrSizeAreUnknown() const { 801 return getOffset() == OffsetAndSize::Unknown || 802 getSize() == OffsetAndSize::Unknown; 803 } 804 805 /// Return true if this offset and size pair might describe an address that 806 /// overlaps with \p OAS. 807 bool mayOverlap(const OffsetAndSize &OAS) const { 808 // Any unknown value and we are giving up -> overlap. 809 if (offsetOrSizeAreUnknown() || OAS.offsetOrSizeAreUnknown()) 810 return true; 811 812 // Check if one offset point is in the other interval [offset, offset+size]. 813 return OAS.getOffset() + OAS.getSize() > getOffset() && 814 OAS.getOffset() < getOffset() + getSize(); 815 } 816 817 /// Constant used to represent unknown offset or sizes. 818 static constexpr int64_t Unknown = 1 << 31; 819 }; 820 821 /// Implementation of the DenseMapInfo. 822 /// 823 ///{ 824 inline llvm::AccessAsInstructionInfo::Access 825 llvm::AccessAsInstructionInfo::getEmptyKey() { 826 return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr); 827 } 828 inline llvm::AccessAsInstructionInfo::Access 829 llvm::AccessAsInstructionInfo::getTombstoneKey() { 830 return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ, 831 nullptr); 832 } 833 unsigned llvm::AccessAsInstructionInfo::getHashValue( 834 const llvm::AccessAsInstructionInfo::Access &A) { 835 return Base::getHashValue(A.getRemoteInst()); 836 } 837 bool llvm::AccessAsInstructionInfo::isEqual( 838 const llvm::AccessAsInstructionInfo::Access &LHS, 839 const llvm::AccessAsInstructionInfo::Access &RHS) { 840 return LHS.getRemoteInst() == RHS.getRemoteInst(); 841 } 842 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access 843 llvm::DenseMapInfo<AAPointerInfo::Access>::getEmptyKey() { 844 return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ, 845 nullptr); 846 } 847 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access 848 llvm::DenseMapInfo<AAPointerInfo::Access>::getTombstoneKey() { 849 return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE, 850 nullptr); 851 } 852 853 unsigned llvm::DenseMapInfo<AAPointerInfo::Access>::getHashValue( 854 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &A) { 855 return detail::combineHashValue( 856 DenseMapInfo<Instruction *>::getHashValue(A.getRemoteInst()), 857 (A.isWrittenValueYetUndetermined() 858 ? ~0 859 : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) + 860 A.getKind(); 861 } 862 863 bool llvm::DenseMapInfo<AAPointerInfo::Access>::isEqual( 864 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &LHS, 865 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &RHS) { 866 return LHS == RHS; 867 } 868 ///} 869 870 /// A type to track pointer/struct usage and accesses for AAPointerInfo. 871 struct AA::PointerInfo::State : public AbstractState { 872 873 /// Return the best possible representable state. 874 static State getBestState(const State &SIS) { return State(); } 875 876 /// Return the worst possible representable state. 877 static State getWorstState(const State &SIS) { 878 State R; 879 R.indicatePessimisticFixpoint(); 880 return R; 881 } 882 883 State() {} 884 State(const State &SIS) : AccessBins(SIS.AccessBins) {} 885 State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {} 886 887 const State &getAssumed() const { return *this; } 888 889 /// See AbstractState::isValidState(). 890 bool isValidState() const override { return BS.isValidState(); } 891 892 /// See AbstractState::isAtFixpoint(). 893 bool isAtFixpoint() const override { return BS.isAtFixpoint(); } 894 895 /// See AbstractState::indicateOptimisticFixpoint(). 896 ChangeStatus indicateOptimisticFixpoint() override { 897 BS.indicateOptimisticFixpoint(); 898 return ChangeStatus::UNCHANGED; 899 } 900 901 /// See AbstractState::indicatePessimisticFixpoint(). 902 ChangeStatus indicatePessimisticFixpoint() override { 903 BS.indicatePessimisticFixpoint(); 904 return ChangeStatus::CHANGED; 905 } 906 907 State &operator=(const State &R) { 908 if (this == &R) 909 return *this; 910 BS = R.BS; 911 AccessBins = R.AccessBins; 912 return *this; 913 } 914 915 State &operator=(State &&R) { 916 if (this == &R) 917 return *this; 918 std::swap(BS, R.BS); 919 std::swap(AccessBins, R.AccessBins); 920 return *this; 921 } 922 923 bool operator==(const State &R) const { 924 if (BS != R.BS) 925 return false; 926 if (AccessBins.size() != R.AccessBins.size()) 927 return false; 928 auto It = begin(), RIt = R.begin(), E = end(); 929 while (It != E) { 930 if (It->getFirst() != RIt->getFirst()) 931 return false; 932 auto &Accs = It->getSecond(); 933 auto &RAccs = RIt->getSecond(); 934 if (Accs.size() != RAccs.size()) 935 return false; 936 auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end(); 937 while (AccIt != AccE) { 938 if (*AccIt != *RAccIt) 939 return false; 940 ++AccIt; 941 ++RAccIt; 942 } 943 ++It; 944 ++RIt; 945 } 946 return true; 947 } 948 bool operator!=(const State &R) const { return !(*this == R); } 949 950 /// We store accesses in a set with the instruction as key. 951 using Accesses = DenseSet<AAPointerInfo::Access, AccessAsInstructionInfo>; 952 953 /// We store all accesses in bins denoted by their offset and size. 954 using AccessBinsTy = DenseMap<OffsetAndSize, Accesses>; 955 956 AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); } 957 AccessBinsTy::const_iterator end() const { return AccessBins.end(); } 958 959 protected: 960 /// The bins with all the accesses for the associated pointer. 961 DenseMap<OffsetAndSize, Accesses> AccessBins; 962 963 /// Add a new access to the state at offset \p Offset and with size \p Size. 964 /// The access is associated with \p I, writes \p Content (if anything), and 965 /// is of kind \p Kind. 966 /// \Returns CHANGED, if the state changed, UNCHANGED otherwise. 967 ChangeStatus addAccess(int64_t Offset, int64_t Size, Instruction &I, 968 Optional<Value *> Content, 969 AAPointerInfo::AccessKind Kind, Type *Ty, 970 Instruction *RemoteI = nullptr, 971 Accesses *BinPtr = nullptr) { 972 OffsetAndSize Key{Offset, Size}; 973 Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key]; 974 AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty); 975 // Check if we have an access for this instruction in this bin, if not, 976 // simply add it. 977 auto It = Bin.find(Acc); 978 if (It == Bin.end()) { 979 Bin.insert(Acc); 980 return ChangeStatus::CHANGED; 981 } 982 // If the existing access is the same as then new one, nothing changed. 983 AAPointerInfo::Access Before = *It; 984 // The new one will be combined with the existing one. 985 *It &= Acc; 986 return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; 987 } 988 989 /// See AAPointerInfo::forallInterferingAccesses. 990 bool forallInterferingAccesses( 991 Instruction &I, 992 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { 993 if (!isValidState()) 994 return false; 995 // First find the offset and size of I. 996 OffsetAndSize OAS(-1, -1); 997 for (auto &It : AccessBins) { 998 for (auto &Access : It.getSecond()) { 999 if (Access.getRemoteInst() == &I) { 1000 OAS = It.getFirst(); 1001 break; 1002 } 1003 } 1004 if (OAS.getSize() != -1) 1005 break; 1006 } 1007 if (OAS.getSize() == -1) 1008 return true; 1009 1010 // Now that we have an offset and size, find all overlapping ones and use 1011 // the callback on the accesses. 1012 for (auto &It : AccessBins) { 1013 OffsetAndSize ItOAS = It.getFirst(); 1014 if (!OAS.mayOverlap(ItOAS)) 1015 continue; 1016 bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown(); 1017 for (auto &Access : It.getSecond()) 1018 if (!CB(Access, IsExact)) 1019 return false; 1020 } 1021 return true; 1022 } 1023 1024 private: 1025 /// State to track fixpoint and validity. 1026 BooleanState BS; 1027 }; 1028 1029 namespace { 1030 struct AAPointerInfoImpl 1031 : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> { 1032 using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>; 1033 AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {} 1034 1035 /// See AbstractAttribute::initialize(...). 1036 void initialize(Attributor &A) override { AAPointerInfo::initialize(A); } 1037 1038 /// See AbstractAttribute::getAsStr(). 1039 const std::string getAsStr() const override { 1040 return std::string("PointerInfo ") + 1041 (isValidState() ? (std::string("#") + 1042 std::to_string(AccessBins.size()) + " bins") 1043 : "<invalid>"); 1044 } 1045 1046 /// See AbstractAttribute::manifest(...). 1047 ChangeStatus manifest(Attributor &A) override { 1048 return AAPointerInfo::manifest(A); 1049 } 1050 1051 bool forallInterferingAccesses( 1052 LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB) 1053 const override { 1054 return State::forallInterferingAccesses(LI, CB); 1055 } 1056 bool forallInterferingAccesses( 1057 StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB) 1058 const override { 1059 return State::forallInterferingAccesses(SI, CB); 1060 } 1061 1062 ChangeStatus translateAndAddCalleeState(Attributor &A, 1063 const AAPointerInfo &CalleeAA, 1064 int64_t CallArgOffset, CallBase &CB) { 1065 using namespace AA::PointerInfo; 1066 if (!CalleeAA.getState().isValidState() || !isValidState()) 1067 return indicatePessimisticFixpoint(); 1068 1069 const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA); 1070 bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr(); 1071 1072 // Combine the accesses bin by bin. 1073 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1074 for (auto &It : CalleeImplAA.getState()) { 1075 OffsetAndSize OAS = OffsetAndSize::getUnknown(); 1076 if (CallArgOffset != OffsetAndSize::Unknown) 1077 OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset, 1078 It.first.getSize()); 1079 Accesses &Bin = AccessBins[OAS]; 1080 for (const AAPointerInfo::Access &RAcc : It.second) { 1081 if (IsByval && !RAcc.isRead()) 1082 continue; 1083 bool UsedAssumedInformation = false; 1084 Optional<Value *> Content = A.translateArgumentToCallSiteContent( 1085 RAcc.getContent(), CB, *this, UsedAssumedInformation); 1086 AccessKind AK = 1087 AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ 1088 : AccessKind::AK_READ_WRITE)); 1089 Changed = 1090 Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK, 1091 RAcc.getType(), RAcc.getRemoteInst(), &Bin); 1092 } 1093 } 1094 return Changed; 1095 } 1096 1097 /// Statistic tracking for all AAPointerInfo implementations. 1098 /// See AbstractAttribute::trackStatistics(). 1099 void trackPointerInfoStatistics(const IRPosition &IRP) const {} 1100 }; 1101 1102 struct AAPointerInfoFloating : public AAPointerInfoImpl { 1103 using AccessKind = AAPointerInfo::AccessKind; 1104 AAPointerInfoFloating(const IRPosition &IRP, Attributor &A) 1105 : AAPointerInfoImpl(IRP, A) {} 1106 1107 /// See AbstractAttribute::initialize(...). 1108 void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); } 1109 1110 /// Deal with an access and signal if it was handled successfully. 1111 bool handleAccess(Attributor &A, Instruction &I, Value &Ptr, 1112 Optional<Value *> Content, AccessKind Kind, int64_t Offset, 1113 ChangeStatus &Changed, Type *Ty, 1114 int64_t Size = AA::PointerInfo::OffsetAndSize::Unknown) { 1115 using namespace AA::PointerInfo; 1116 // No need to find a size if one is given or the offset is unknown. 1117 if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown && 1118 Ty) { 1119 const DataLayout &DL = A.getDataLayout(); 1120 TypeSize AccessSize = DL.getTypeStoreSize(Ty); 1121 if (!AccessSize.isScalable()) 1122 Size = AccessSize.getFixedSize(); 1123 } 1124 Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty); 1125 return true; 1126 }; 1127 1128 /// Helper struct, will support ranges eventually. 1129 struct OffsetInfo { 1130 int64_t Offset = AA::PointerInfo::OffsetAndSize::Unknown; 1131 1132 bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; } 1133 }; 1134 1135 /// See AbstractAttribute::updateImpl(...). 1136 ChangeStatus updateImpl(Attributor &A) override { 1137 using namespace AA::PointerInfo; 1138 State S = getState(); 1139 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1140 Value &AssociatedValue = getAssociatedValue(); 1141 1142 const DataLayout &DL = A.getDataLayout(); 1143 DenseMap<Value *, OffsetInfo> OffsetInfoMap; 1144 OffsetInfoMap[&AssociatedValue] = OffsetInfo{0}; 1145 1146 auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI, 1147 bool &Follow) { 1148 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1149 UsrOI = PtrOI; 1150 Follow = true; 1151 return true; 1152 }; 1153 1154 const auto *TLI = getAnchorScope() 1155 ? A.getInfoCache().getTargetLibraryInfoForFunction( 1156 *getAnchorScope()) 1157 : nullptr; 1158 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 1159 Value *CurPtr = U.get(); 1160 User *Usr = U.getUser(); 1161 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " 1162 << *Usr << "\n"); 1163 assert(OffsetInfoMap.count(CurPtr) && 1164 "The current pointer offset should have been seeded!"); 1165 1166 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) { 1167 if (CE->isCast()) 1168 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); 1169 if (CE->isCompare()) 1170 return true; 1171 if (!isa<GEPOperator>(CE)) { 1172 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE 1173 << "\n"); 1174 return false; 1175 } 1176 } 1177 if (auto *GEP = dyn_cast<GEPOperator>(Usr)) { 1178 // Note the order here, the Usr access might change the map, CurPtr is 1179 // already in it though. 1180 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1181 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1182 UsrOI = PtrOI; 1183 1184 // TODO: Use range information. 1185 if (PtrOI.Offset == OffsetAndSize::Unknown || 1186 !GEP->hasAllConstantIndices()) { 1187 UsrOI.Offset = OffsetAndSize::Unknown; 1188 Follow = true; 1189 return true; 1190 } 1191 1192 SmallVector<Value *, 8> Indices; 1193 for (Use &Idx : GEP->indices()) { 1194 if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) { 1195 Indices.push_back(CIdx); 1196 continue; 1197 } 1198 1199 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP 1200 << " : " << *Idx << "\n"); 1201 return false; 1202 } 1203 UsrOI.Offset = PtrOI.Offset + 1204 DL.getIndexedOffsetInType( 1205 GEP->getSourceElementType(), Indices); 1206 Follow = true; 1207 return true; 1208 } 1209 if (isa<CastInst>(Usr) || isa<SelectInst>(Usr)) 1210 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); 1211 1212 // For PHIs we need to take care of the recurrence explicitly as the value 1213 // might change while we iterate through a loop. For now, we give up if 1214 // the PHI is not invariant. 1215 if (isa<PHINode>(Usr)) { 1216 // Note the order here, the Usr access might change the map, CurPtr is 1217 // already in it though. 1218 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1219 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1220 // Check if the PHI is invariant (so far). 1221 if (UsrOI == PtrOI) 1222 return true; 1223 1224 // Check if the PHI operand has already an unknown offset as we can't 1225 // improve on that anymore. 1226 if (PtrOI.Offset == OffsetAndSize::Unknown) { 1227 UsrOI = PtrOI; 1228 Follow = true; 1229 return true; 1230 } 1231 1232 // Check if the PHI operand is not dependent on the PHI itself. 1233 // TODO: This is not great as we look at the pointer type. However, it 1234 // is unclear where the Offset size comes from with typeless pointers. 1235 APInt Offset( 1236 DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()), 1237 0); 1238 if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets( 1239 DL, Offset, /* AllowNonInbounds */ true)) { 1240 if (Offset != PtrOI.Offset) { 1241 LLVM_DEBUG(dbgs() 1242 << "[AAPointerInfo] PHI operand pointer offset mismatch " 1243 << *CurPtr << " in " << *Usr << "\n"); 1244 return false; 1245 } 1246 return HandlePassthroughUser(Usr, PtrOI, Follow); 1247 } 1248 1249 // TODO: Approximate in case we know the direction of the recurrence. 1250 LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex " 1251 << *CurPtr << " in " << *Usr << "\n"); 1252 UsrOI = PtrOI; 1253 UsrOI.Offset = OffsetAndSize::Unknown; 1254 Follow = true; 1255 return true; 1256 } 1257 1258 if (auto *LoadI = dyn_cast<LoadInst>(Usr)) 1259 return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr, 1260 AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset, 1261 Changed, LoadI->getType()); 1262 if (auto *StoreI = dyn_cast<StoreInst>(Usr)) { 1263 if (StoreI->getValueOperand() == CurPtr) { 1264 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store " 1265 << *StoreI << "\n"); 1266 return false; 1267 } 1268 bool UsedAssumedInformation = false; 1269 Optional<Value *> Content = A.getAssumedSimplified( 1270 *StoreI->getValueOperand(), *this, UsedAssumedInformation); 1271 return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE, 1272 OffsetInfoMap[CurPtr].Offset, Changed, 1273 StoreI->getValueOperand()->getType()); 1274 } 1275 if (auto *CB = dyn_cast<CallBase>(Usr)) { 1276 if (CB->isLifetimeStartOrEnd()) 1277 return true; 1278 if (TLI && isFreeCall(CB, TLI)) 1279 return true; 1280 if (CB->isArgOperand(&U)) { 1281 unsigned ArgNo = CB->getArgOperandNo(&U); 1282 const auto &CSArgPI = A.getAAFor<AAPointerInfo>( 1283 *this, IRPosition::callsite_argument(*CB, ArgNo), 1284 DepClassTy::REQUIRED); 1285 Changed = translateAndAddCalleeState( 1286 A, CSArgPI, OffsetInfoMap[CurPtr].Offset, *CB) | 1287 Changed; 1288 return true; 1289 } 1290 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB 1291 << "\n"); 1292 // TODO: Allow some call uses 1293 return false; 1294 } 1295 1296 LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n"); 1297 return false; 1298 }; 1299 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { 1300 if (OffsetInfoMap.count(NewU)) 1301 return OffsetInfoMap[NewU] == OffsetInfoMap[OldU]; 1302 OffsetInfoMap[NewU] = OffsetInfoMap[OldU]; 1303 return true; 1304 }; 1305 if (!A.checkForAllUses(UsePred, *this, AssociatedValue, 1306 /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL, 1307 EquivalentUseCB)) 1308 return indicatePessimisticFixpoint(); 1309 1310 LLVM_DEBUG({ 1311 dbgs() << "Accesses by bin after update:\n"; 1312 for (auto &It : AccessBins) { 1313 dbgs() << "[" << It.first.getOffset() << "-" 1314 << It.first.getOffset() + It.first.getSize() 1315 << "] : " << It.getSecond().size() << "\n"; 1316 for (auto &Acc : It.getSecond()) { 1317 dbgs() << " - " << Acc.getKind() << " - " << *Acc.getLocalInst() 1318 << "\n"; 1319 if (Acc.getLocalInst() != Acc.getRemoteInst()) 1320 dbgs() << " --> " 1321 << *Acc.getRemoteInst() << "\n"; 1322 if (!Acc.isWrittenValueYetUndetermined()) 1323 dbgs() << " - " << Acc.getWrittenValue() << "\n"; 1324 } 1325 } 1326 }); 1327 1328 return Changed; 1329 } 1330 1331 /// See AbstractAttribute::trackStatistics() 1332 void trackStatistics() const override { 1333 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1334 } 1335 }; 1336 1337 struct AAPointerInfoReturned final : AAPointerInfoImpl { 1338 AAPointerInfoReturned(const IRPosition &IRP, Attributor &A) 1339 : AAPointerInfoImpl(IRP, A) {} 1340 1341 /// See AbstractAttribute::updateImpl(...). 1342 ChangeStatus updateImpl(Attributor &A) override { 1343 return indicatePessimisticFixpoint(); 1344 } 1345 1346 /// See AbstractAttribute::trackStatistics() 1347 void trackStatistics() const override { 1348 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1349 } 1350 }; 1351 1352 struct AAPointerInfoArgument final : AAPointerInfoFloating { 1353 AAPointerInfoArgument(const IRPosition &IRP, Attributor &A) 1354 : AAPointerInfoFloating(IRP, A) {} 1355 1356 /// See AbstractAttribute::initialize(...). 1357 void initialize(Attributor &A) override { 1358 AAPointerInfoFloating::initialize(A); 1359 if (getAnchorScope()->isDeclaration()) 1360 indicatePessimisticFixpoint(); 1361 } 1362 1363 /// See AbstractAttribute::trackStatistics() 1364 void trackStatistics() const override { 1365 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1366 } 1367 }; 1368 1369 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating { 1370 AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) 1371 : AAPointerInfoFloating(IRP, A) {} 1372 1373 /// See AbstractAttribute::updateImpl(...). 1374 ChangeStatus updateImpl(Attributor &A) override { 1375 using namespace AA::PointerInfo; 1376 // We handle memory intrinsics explicitly, at least the first (= 1377 // destination) and second (=source) arguments as we know how they are 1378 // accessed. 1379 if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) { 1380 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 1381 int64_t LengthVal = OffsetAndSize::Unknown; 1382 if (Length) 1383 LengthVal = Length->getSExtValue(); 1384 Value &Ptr = getAssociatedValue(); 1385 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 1386 ChangeStatus Changed; 1387 if (ArgNo == 0) { 1388 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed, 1389 nullptr, LengthVal); 1390 } else if (ArgNo == 1) { 1391 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed, 1392 nullptr, LengthVal); 1393 } else { 1394 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic " 1395 << *MI << "\n"); 1396 return indicatePessimisticFixpoint(); 1397 } 1398 return Changed; 1399 } 1400 1401 // TODO: Once we have call site specific value information we can provide 1402 // call site specific liveness information and then it makes 1403 // sense to specialize attributes for call sites arguments instead of 1404 // redirecting requests to the callee argument. 1405 Argument *Arg = getAssociatedArgument(); 1406 if (!Arg) 1407 return indicatePessimisticFixpoint(); 1408 const IRPosition &ArgPos = IRPosition::argument(*Arg); 1409 auto &ArgAA = 1410 A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED); 1411 return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI())); 1412 } 1413 1414 /// See AbstractAttribute::trackStatistics() 1415 void trackStatistics() const override { 1416 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1417 } 1418 }; 1419 1420 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating { 1421 AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) 1422 : AAPointerInfoFloating(IRP, A) {} 1423 1424 /// See AbstractAttribute::trackStatistics() 1425 void trackStatistics() const override { 1426 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1427 } 1428 }; 1429 1430 /// -----------------------NoUnwind Function Attribute-------------------------- 1431 1432 struct AANoUnwindImpl : AANoUnwind { 1433 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} 1434 1435 const std::string getAsStr() const override { 1436 return getAssumed() ? "nounwind" : "may-unwind"; 1437 } 1438 1439 /// See AbstractAttribute::updateImpl(...). 1440 ChangeStatus updateImpl(Attributor &A) override { 1441 auto Opcodes = { 1442 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 1443 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 1444 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 1445 1446 auto CheckForNoUnwind = [&](Instruction &I) { 1447 if (!I.mayThrow()) 1448 return true; 1449 1450 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1451 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( 1452 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 1453 return NoUnwindAA.isAssumedNoUnwind(); 1454 } 1455 return false; 1456 }; 1457 1458 bool UsedAssumedInformation = false; 1459 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes, 1460 UsedAssumedInformation)) 1461 return indicatePessimisticFixpoint(); 1462 1463 return ChangeStatus::UNCHANGED; 1464 } 1465 }; 1466 1467 struct AANoUnwindFunction final : public AANoUnwindImpl { 1468 AANoUnwindFunction(const IRPosition &IRP, Attributor &A) 1469 : AANoUnwindImpl(IRP, A) {} 1470 1471 /// See AbstractAttribute::trackStatistics() 1472 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } 1473 }; 1474 1475 /// NoUnwind attribute deduction for a call sites. 1476 struct AANoUnwindCallSite final : AANoUnwindImpl { 1477 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) 1478 : AANoUnwindImpl(IRP, A) {} 1479 1480 /// See AbstractAttribute::initialize(...). 1481 void initialize(Attributor &A) override { 1482 AANoUnwindImpl::initialize(A); 1483 Function *F = getAssociatedFunction(); 1484 if (!F || F->isDeclaration()) 1485 indicatePessimisticFixpoint(); 1486 } 1487 1488 /// See AbstractAttribute::updateImpl(...). 1489 ChangeStatus updateImpl(Attributor &A) override { 1490 // TODO: Once we have call site specific value information we can provide 1491 // call site specific liveness information and then it makes 1492 // sense to specialize attributes for call sites arguments instead of 1493 // redirecting requests to the callee argument. 1494 Function *F = getAssociatedFunction(); 1495 const IRPosition &FnPos = IRPosition::function(*F); 1496 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED); 1497 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1498 } 1499 1500 /// See AbstractAttribute::trackStatistics() 1501 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } 1502 }; 1503 1504 /// --------------------- Function Return Values ------------------------------- 1505 1506 /// "Attribute" that collects all potential returned values and the return 1507 /// instructions that they arise from. 1508 /// 1509 /// If there is a unique returned value R, the manifest method will: 1510 /// - mark R with the "returned" attribute, if R is an argument. 1511 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { 1512 1513 /// Mapping of values potentially returned by the associated function to the 1514 /// return instructions that might return them. 1515 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; 1516 1517 /// State flags 1518 /// 1519 ///{ 1520 bool IsFixed = false; 1521 bool IsValidState = true; 1522 ///} 1523 1524 public: 1525 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) 1526 : AAReturnedValues(IRP, A) {} 1527 1528 /// See AbstractAttribute::initialize(...). 1529 void initialize(Attributor &A) override { 1530 // Reset the state. 1531 IsFixed = false; 1532 IsValidState = true; 1533 ReturnedValues.clear(); 1534 1535 Function *F = getAssociatedFunction(); 1536 if (!F || F->isDeclaration()) { 1537 indicatePessimisticFixpoint(); 1538 return; 1539 } 1540 assert(!F->getReturnType()->isVoidTy() && 1541 "Did not expect a void return type!"); 1542 1543 // The map from instruction opcodes to those instructions in the function. 1544 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); 1545 1546 // Look through all arguments, if one is marked as returned we are done. 1547 for (Argument &Arg : F->args()) { 1548 if (Arg.hasReturnedAttr()) { 1549 auto &ReturnInstSet = ReturnedValues[&Arg]; 1550 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) 1551 for (Instruction *RI : *Insts) 1552 ReturnInstSet.insert(cast<ReturnInst>(RI)); 1553 1554 indicateOptimisticFixpoint(); 1555 return; 1556 } 1557 } 1558 1559 if (!A.isFunctionIPOAmendable(*F)) 1560 indicatePessimisticFixpoint(); 1561 } 1562 1563 /// See AbstractAttribute::manifest(...). 1564 ChangeStatus manifest(Attributor &A) override; 1565 1566 /// See AbstractAttribute::getState(...). 1567 AbstractState &getState() override { return *this; } 1568 1569 /// See AbstractAttribute::getState(...). 1570 const AbstractState &getState() const override { return *this; } 1571 1572 /// See AbstractAttribute::updateImpl(Attributor &A). 1573 ChangeStatus updateImpl(Attributor &A) override; 1574 1575 llvm::iterator_range<iterator> returned_values() override { 1576 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1577 } 1578 1579 llvm::iterator_range<const_iterator> returned_values() const override { 1580 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1581 } 1582 1583 /// Return the number of potential return values, -1 if unknown. 1584 size_t getNumReturnValues() const override { 1585 return isValidState() ? ReturnedValues.size() : -1; 1586 } 1587 1588 /// Return an assumed unique return value if a single candidate is found. If 1589 /// there cannot be one, return a nullptr. If it is not clear yet, return the 1590 /// Optional::NoneType. 1591 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 1592 1593 /// See AbstractState::checkForAllReturnedValues(...). 1594 bool checkForAllReturnedValuesAndReturnInsts( 1595 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1596 const override; 1597 1598 /// Pretty print the attribute similar to the IR representation. 1599 const std::string getAsStr() const override; 1600 1601 /// See AbstractState::isAtFixpoint(). 1602 bool isAtFixpoint() const override { return IsFixed; } 1603 1604 /// See AbstractState::isValidState(). 1605 bool isValidState() const override { return IsValidState; } 1606 1607 /// See AbstractState::indicateOptimisticFixpoint(...). 1608 ChangeStatus indicateOptimisticFixpoint() override { 1609 IsFixed = true; 1610 return ChangeStatus::UNCHANGED; 1611 } 1612 1613 ChangeStatus indicatePessimisticFixpoint() override { 1614 IsFixed = true; 1615 IsValidState = false; 1616 return ChangeStatus::CHANGED; 1617 } 1618 }; 1619 1620 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 1621 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1622 1623 // Bookkeeping. 1624 assert(isValidState()); 1625 STATS_DECLTRACK(KnownReturnValues, FunctionReturn, 1626 "Number of function with known return values"); 1627 1628 // Check if we have an assumed unique return value that we could manifest. 1629 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); 1630 1631 if (!UniqueRV.hasValue() || !UniqueRV.getValue()) 1632 return Changed; 1633 1634 // Bookkeeping. 1635 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, 1636 "Number of function with unique return"); 1637 // If the assumed unique return value is an argument, annotate it. 1638 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { 1639 if (UniqueRVArg->getType()->canLosslesslyBitCastTo( 1640 getAssociatedFunction()->getReturnType())) { 1641 getIRPosition() = IRPosition::argument(*UniqueRVArg); 1642 Changed = IRAttribute::manifest(A); 1643 } 1644 } 1645 return Changed; 1646 } 1647 1648 const std::string AAReturnedValuesImpl::getAsStr() const { 1649 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 1650 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")"; 1651 } 1652 1653 Optional<Value *> 1654 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { 1655 // If checkForAllReturnedValues provides a unique value, ignoring potential 1656 // undef values that can also be present, it is assumed to be the actual 1657 // return value and forwarded to the caller of this method. If there are 1658 // multiple, a nullptr is returned indicating there cannot be a unique 1659 // returned value. 1660 Optional<Value *> UniqueRV; 1661 Type *Ty = getAssociatedFunction()->getReturnType(); 1662 1663 auto Pred = [&](Value &RV) -> bool { 1664 UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty); 1665 return UniqueRV != Optional<Value *>(nullptr); 1666 }; 1667 1668 if (!A.checkForAllReturnedValues(Pred, *this)) 1669 UniqueRV = nullptr; 1670 1671 return UniqueRV; 1672 } 1673 1674 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( 1675 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1676 const { 1677 if (!isValidState()) 1678 return false; 1679 1680 // Check all returned values but ignore call sites as long as we have not 1681 // encountered an overdefined one during an update. 1682 for (auto &It : ReturnedValues) { 1683 Value *RV = It.first; 1684 if (!Pred(*RV, It.second)) 1685 return false; 1686 } 1687 1688 return true; 1689 } 1690 1691 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 1692 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1693 1694 auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret, 1695 bool) -> bool { 1696 bool UsedAssumedInformation = false; 1697 Optional<Value *> SimpleRetVal = 1698 A.getAssumedSimplified(V, *this, UsedAssumedInformation); 1699 if (!SimpleRetVal.hasValue()) 1700 return true; 1701 if (!SimpleRetVal.getValue()) 1702 return false; 1703 Value *RetVal = *SimpleRetVal; 1704 assert(AA::isValidInScope(*RetVal, Ret.getFunction()) && 1705 "Assumed returned value should be valid in function scope!"); 1706 if (ReturnedValues[RetVal].insert(&Ret)) 1707 Changed = ChangeStatus::CHANGED; 1708 return true; 1709 }; 1710 1711 auto ReturnInstCB = [&](Instruction &I) { 1712 ReturnInst &Ret = cast<ReturnInst>(I); 1713 return genericValueTraversal<ReturnInst>( 1714 A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB, 1715 &I); 1716 }; 1717 1718 // Discover returned values from all live returned instructions in the 1719 // associated function. 1720 bool UsedAssumedInformation = false; 1721 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret}, 1722 UsedAssumedInformation)) 1723 return indicatePessimisticFixpoint(); 1724 return Changed; 1725 } 1726 1727 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { 1728 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) 1729 : AAReturnedValuesImpl(IRP, A) {} 1730 1731 /// See AbstractAttribute::trackStatistics() 1732 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } 1733 }; 1734 1735 /// Returned values information for a call sites. 1736 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { 1737 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) 1738 : AAReturnedValuesImpl(IRP, A) {} 1739 1740 /// See AbstractAttribute::initialize(...). 1741 void initialize(Attributor &A) override { 1742 // TODO: Once we have call site specific value information we can provide 1743 // call site specific liveness information and then it makes 1744 // sense to specialize attributes for call sites instead of 1745 // redirecting requests to the callee. 1746 llvm_unreachable("Abstract attributes for returned values are not " 1747 "supported for call sites yet!"); 1748 } 1749 1750 /// See AbstractAttribute::updateImpl(...). 1751 ChangeStatus updateImpl(Attributor &A) override { 1752 return indicatePessimisticFixpoint(); 1753 } 1754 1755 /// See AbstractAttribute::trackStatistics() 1756 void trackStatistics() const override {} 1757 }; 1758 1759 /// ------------------------ NoSync Function Attribute ------------------------- 1760 1761 struct AANoSyncImpl : AANoSync { 1762 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} 1763 1764 const std::string getAsStr() const override { 1765 return getAssumed() ? "nosync" : "may-sync"; 1766 } 1767 1768 /// See AbstractAttribute::updateImpl(...). 1769 ChangeStatus updateImpl(Attributor &A) override; 1770 1771 /// Helper function used to determine whether an instruction is non-relaxed 1772 /// atomic. In other words, if an atomic instruction does not have unordered 1773 /// or monotonic ordering 1774 static bool isNonRelaxedAtomic(Instruction *I); 1775 1776 /// Helper function specific for intrinsics which are potentially volatile 1777 static bool isNoSyncIntrinsic(Instruction *I); 1778 }; 1779 1780 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) { 1781 if (!I->isAtomic()) 1782 return false; 1783 1784 if (auto *FI = dyn_cast<FenceInst>(I)) 1785 // All legal orderings for fence are stronger than monotonic. 1786 return FI->getSyncScopeID() != SyncScope::SingleThread; 1787 else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) { 1788 // Unordered is not a legal ordering for cmpxchg. 1789 return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic || 1790 AI->getFailureOrdering() != AtomicOrdering::Monotonic); 1791 } 1792 1793 AtomicOrdering Ordering; 1794 switch (I->getOpcode()) { 1795 case Instruction::AtomicRMW: 1796 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 1797 break; 1798 case Instruction::Store: 1799 Ordering = cast<StoreInst>(I)->getOrdering(); 1800 break; 1801 case Instruction::Load: 1802 Ordering = cast<LoadInst>(I)->getOrdering(); 1803 break; 1804 default: 1805 llvm_unreachable( 1806 "New atomic operations need to be known in the attributor."); 1807 } 1808 1809 return (Ordering != AtomicOrdering::Unordered && 1810 Ordering != AtomicOrdering::Monotonic); 1811 } 1812 1813 /// Return true if this intrinsic is nosync. This is only used for intrinsics 1814 /// which would be nosync except that they have a volatile flag. All other 1815 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td. 1816 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) { 1817 if (auto *MI = dyn_cast<MemIntrinsic>(I)) 1818 return !MI->isVolatile(); 1819 return false; 1820 } 1821 1822 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { 1823 1824 auto CheckRWInstForNoSync = [&](Instruction &I) { 1825 /// We are looking for volatile instructions or Non-Relaxed atomics. 1826 1827 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1828 if (CB->hasFnAttr(Attribute::NoSync)) 1829 return true; 1830 1831 if (isNoSyncIntrinsic(&I)) 1832 return true; 1833 1834 const auto &NoSyncAA = A.getAAFor<AANoSync>( 1835 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 1836 return NoSyncAA.isAssumedNoSync(); 1837 } 1838 1839 if (!I.isVolatile() && !isNonRelaxedAtomic(&I)) 1840 return true; 1841 1842 return false; 1843 }; 1844 1845 auto CheckForNoSync = [&](Instruction &I) { 1846 // At this point we handled all read/write effects and they are all 1847 // nosync, so they can be skipped. 1848 if (I.mayReadOrWriteMemory()) 1849 return true; 1850 1851 // non-convergent and readnone imply nosync. 1852 return !cast<CallBase>(I).isConvergent(); 1853 }; 1854 1855 bool UsedAssumedInformation = false; 1856 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this, 1857 UsedAssumedInformation) || 1858 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this, 1859 UsedAssumedInformation)) 1860 return indicatePessimisticFixpoint(); 1861 1862 return ChangeStatus::UNCHANGED; 1863 } 1864 1865 struct AANoSyncFunction final : public AANoSyncImpl { 1866 AANoSyncFunction(const IRPosition &IRP, Attributor &A) 1867 : AANoSyncImpl(IRP, A) {} 1868 1869 /// See AbstractAttribute::trackStatistics() 1870 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } 1871 }; 1872 1873 /// NoSync attribute deduction for a call sites. 1874 struct AANoSyncCallSite final : AANoSyncImpl { 1875 AANoSyncCallSite(const IRPosition &IRP, Attributor &A) 1876 : AANoSyncImpl(IRP, A) {} 1877 1878 /// See AbstractAttribute::initialize(...). 1879 void initialize(Attributor &A) override { 1880 AANoSyncImpl::initialize(A); 1881 Function *F = getAssociatedFunction(); 1882 if (!F || F->isDeclaration()) 1883 indicatePessimisticFixpoint(); 1884 } 1885 1886 /// See AbstractAttribute::updateImpl(...). 1887 ChangeStatus updateImpl(Attributor &A) override { 1888 // TODO: Once we have call site specific value information we can provide 1889 // call site specific liveness information and then it makes 1890 // sense to specialize attributes for call sites arguments instead of 1891 // redirecting requests to the callee argument. 1892 Function *F = getAssociatedFunction(); 1893 const IRPosition &FnPos = IRPosition::function(*F); 1894 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED); 1895 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1896 } 1897 1898 /// See AbstractAttribute::trackStatistics() 1899 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } 1900 }; 1901 1902 /// ------------------------ No-Free Attributes ---------------------------- 1903 1904 struct AANoFreeImpl : public AANoFree { 1905 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} 1906 1907 /// See AbstractAttribute::updateImpl(...). 1908 ChangeStatus updateImpl(Attributor &A) override { 1909 auto CheckForNoFree = [&](Instruction &I) { 1910 const auto &CB = cast<CallBase>(I); 1911 if (CB.hasFnAttr(Attribute::NoFree)) 1912 return true; 1913 1914 const auto &NoFreeAA = A.getAAFor<AANoFree>( 1915 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 1916 return NoFreeAA.isAssumedNoFree(); 1917 }; 1918 1919 bool UsedAssumedInformation = false; 1920 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this, 1921 UsedAssumedInformation)) 1922 return indicatePessimisticFixpoint(); 1923 return ChangeStatus::UNCHANGED; 1924 } 1925 1926 /// See AbstractAttribute::getAsStr(). 1927 const std::string getAsStr() const override { 1928 return getAssumed() ? "nofree" : "may-free"; 1929 } 1930 }; 1931 1932 struct AANoFreeFunction final : public AANoFreeImpl { 1933 AANoFreeFunction(const IRPosition &IRP, Attributor &A) 1934 : AANoFreeImpl(IRP, A) {} 1935 1936 /// See AbstractAttribute::trackStatistics() 1937 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } 1938 }; 1939 1940 /// NoFree attribute deduction for a call sites. 1941 struct AANoFreeCallSite final : AANoFreeImpl { 1942 AANoFreeCallSite(const IRPosition &IRP, Attributor &A) 1943 : AANoFreeImpl(IRP, A) {} 1944 1945 /// See AbstractAttribute::initialize(...). 1946 void initialize(Attributor &A) override { 1947 AANoFreeImpl::initialize(A); 1948 Function *F = getAssociatedFunction(); 1949 if (!F || F->isDeclaration()) 1950 indicatePessimisticFixpoint(); 1951 } 1952 1953 /// See AbstractAttribute::updateImpl(...). 1954 ChangeStatus updateImpl(Attributor &A) override { 1955 // TODO: Once we have call site specific value information we can provide 1956 // call site specific liveness information and then it makes 1957 // sense to specialize attributes for call sites arguments instead of 1958 // redirecting requests to the callee argument. 1959 Function *F = getAssociatedFunction(); 1960 const IRPosition &FnPos = IRPosition::function(*F); 1961 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED); 1962 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1963 } 1964 1965 /// See AbstractAttribute::trackStatistics() 1966 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } 1967 }; 1968 1969 /// NoFree attribute for floating values. 1970 struct AANoFreeFloating : AANoFreeImpl { 1971 AANoFreeFloating(const IRPosition &IRP, Attributor &A) 1972 : AANoFreeImpl(IRP, A) {} 1973 1974 /// See AbstractAttribute::trackStatistics() 1975 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} 1976 1977 /// See Abstract Attribute::updateImpl(...). 1978 ChangeStatus updateImpl(Attributor &A) override { 1979 const IRPosition &IRP = getIRPosition(); 1980 1981 const auto &NoFreeAA = A.getAAFor<AANoFree>( 1982 *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL); 1983 if (NoFreeAA.isAssumedNoFree()) 1984 return ChangeStatus::UNCHANGED; 1985 1986 Value &AssociatedValue = getIRPosition().getAssociatedValue(); 1987 auto Pred = [&](const Use &U, bool &Follow) -> bool { 1988 Instruction *UserI = cast<Instruction>(U.getUser()); 1989 if (auto *CB = dyn_cast<CallBase>(UserI)) { 1990 if (CB->isBundleOperand(&U)) 1991 return false; 1992 if (!CB->isArgOperand(&U)) 1993 return true; 1994 unsigned ArgNo = CB->getArgOperandNo(&U); 1995 1996 const auto &NoFreeArg = A.getAAFor<AANoFree>( 1997 *this, IRPosition::callsite_argument(*CB, ArgNo), 1998 DepClassTy::REQUIRED); 1999 return NoFreeArg.isAssumedNoFree(); 2000 } 2001 2002 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 2003 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 2004 Follow = true; 2005 return true; 2006 } 2007 if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) || 2008 isa<ReturnInst>(UserI)) 2009 return true; 2010 2011 // Unknown user. 2012 return false; 2013 }; 2014 if (!A.checkForAllUses(Pred, *this, AssociatedValue)) 2015 return indicatePessimisticFixpoint(); 2016 2017 return ChangeStatus::UNCHANGED; 2018 } 2019 }; 2020 2021 /// NoFree attribute for a call site argument. 2022 struct AANoFreeArgument final : AANoFreeFloating { 2023 AANoFreeArgument(const IRPosition &IRP, Attributor &A) 2024 : AANoFreeFloating(IRP, A) {} 2025 2026 /// See AbstractAttribute::trackStatistics() 2027 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } 2028 }; 2029 2030 /// NoFree attribute for call site arguments. 2031 struct AANoFreeCallSiteArgument final : AANoFreeFloating { 2032 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) 2033 : AANoFreeFloating(IRP, A) {} 2034 2035 /// See AbstractAttribute::updateImpl(...). 2036 ChangeStatus updateImpl(Attributor &A) override { 2037 // TODO: Once we have call site specific value information we can provide 2038 // call site specific liveness information and then it makes 2039 // sense to specialize attributes for call sites arguments instead of 2040 // redirecting requests to the callee argument. 2041 Argument *Arg = getAssociatedArgument(); 2042 if (!Arg) 2043 return indicatePessimisticFixpoint(); 2044 const IRPosition &ArgPos = IRPosition::argument(*Arg); 2045 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED); 2046 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 2047 } 2048 2049 /// See AbstractAttribute::trackStatistics() 2050 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)}; 2051 }; 2052 2053 /// NoFree attribute for function return value. 2054 struct AANoFreeReturned final : AANoFreeFloating { 2055 AANoFreeReturned(const IRPosition &IRP, Attributor &A) 2056 : AANoFreeFloating(IRP, A) { 2057 llvm_unreachable("NoFree is not applicable to function returns!"); 2058 } 2059 2060 /// See AbstractAttribute::initialize(...). 2061 void initialize(Attributor &A) override { 2062 llvm_unreachable("NoFree is not applicable to function returns!"); 2063 } 2064 2065 /// See AbstractAttribute::updateImpl(...). 2066 ChangeStatus updateImpl(Attributor &A) override { 2067 llvm_unreachable("NoFree is not applicable to function returns!"); 2068 } 2069 2070 /// See AbstractAttribute::trackStatistics() 2071 void trackStatistics() const override {} 2072 }; 2073 2074 /// NoFree attribute deduction for a call site return value. 2075 struct AANoFreeCallSiteReturned final : AANoFreeFloating { 2076 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) 2077 : AANoFreeFloating(IRP, A) {} 2078 2079 ChangeStatus manifest(Attributor &A) override { 2080 return ChangeStatus::UNCHANGED; 2081 } 2082 /// See AbstractAttribute::trackStatistics() 2083 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } 2084 }; 2085 2086 /// ------------------------ NonNull Argument Attribute ------------------------ 2087 static int64_t getKnownNonNullAndDerefBytesForUse( 2088 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, 2089 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { 2090 TrackUse = false; 2091 2092 const Value *UseV = U->get(); 2093 if (!UseV->getType()->isPointerTy()) 2094 return 0; 2095 2096 // We need to follow common pointer manipulation uses to the accesses they 2097 // feed into. We can try to be smart to avoid looking through things we do not 2098 // like for now, e.g., non-inbounds GEPs. 2099 if (isa<CastInst>(I)) { 2100 TrackUse = true; 2101 return 0; 2102 } 2103 2104 if (isa<GetElementPtrInst>(I)) { 2105 TrackUse = true; 2106 return 0; 2107 } 2108 2109 Type *PtrTy = UseV->getType(); 2110 const Function *F = I->getFunction(); 2111 bool NullPointerIsDefined = 2112 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; 2113 const DataLayout &DL = A.getInfoCache().getDL(); 2114 if (const auto *CB = dyn_cast<CallBase>(I)) { 2115 if (CB->isBundleOperand(U)) { 2116 if (RetainedKnowledge RK = getKnowledgeFromUse( 2117 U, {Attribute::NonNull, Attribute::Dereferenceable})) { 2118 IsNonNull |= 2119 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); 2120 return RK.ArgValue; 2121 } 2122 return 0; 2123 } 2124 2125 if (CB->isCallee(U)) { 2126 IsNonNull |= !NullPointerIsDefined; 2127 return 0; 2128 } 2129 2130 unsigned ArgNo = CB->getArgOperandNo(U); 2131 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 2132 // As long as we only use known information there is no need to track 2133 // dependences here. 2134 auto &DerefAA = 2135 A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE); 2136 IsNonNull |= DerefAA.isKnownNonNull(); 2137 return DerefAA.getKnownDereferenceableBytes(); 2138 } 2139 2140 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); 2141 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) 2142 return 0; 2143 2144 int64_t Offset; 2145 const Value *Base = 2146 getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL); 2147 if (Base && Base == &AssociatedValue) { 2148 int64_t DerefBytes = Loc->Size.getValue() + Offset; 2149 IsNonNull |= !NullPointerIsDefined; 2150 return std::max(int64_t(0), DerefBytes); 2151 } 2152 2153 /// Corner case when an offset is 0. 2154 Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL, 2155 /*AllowNonInbounds*/ true); 2156 if (Base && Base == &AssociatedValue && Offset == 0) { 2157 int64_t DerefBytes = Loc->Size.getValue(); 2158 IsNonNull |= !NullPointerIsDefined; 2159 return std::max(int64_t(0), DerefBytes); 2160 } 2161 2162 return 0; 2163 } 2164 2165 struct AANonNullImpl : AANonNull { 2166 AANonNullImpl(const IRPosition &IRP, Attributor &A) 2167 : AANonNull(IRP, A), 2168 NullIsDefined(NullPointerIsDefined( 2169 getAnchorScope(), 2170 getAssociatedValue().getType()->getPointerAddressSpace())) {} 2171 2172 /// See AbstractAttribute::initialize(...). 2173 void initialize(Attributor &A) override { 2174 Value &V = getAssociatedValue(); 2175 if (!NullIsDefined && 2176 hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, 2177 /* IgnoreSubsumingPositions */ false, &A)) { 2178 indicateOptimisticFixpoint(); 2179 return; 2180 } 2181 2182 if (isa<ConstantPointerNull>(V)) { 2183 indicatePessimisticFixpoint(); 2184 return; 2185 } 2186 2187 AANonNull::initialize(A); 2188 2189 bool CanBeNull, CanBeFreed; 2190 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull, 2191 CanBeFreed)) { 2192 if (!CanBeNull) { 2193 indicateOptimisticFixpoint(); 2194 return; 2195 } 2196 } 2197 2198 if (isa<GlobalValue>(&getAssociatedValue())) { 2199 indicatePessimisticFixpoint(); 2200 return; 2201 } 2202 2203 if (Instruction *CtxI = getCtxI()) 2204 followUsesInMBEC(*this, A, getState(), *CtxI); 2205 } 2206 2207 /// See followUsesInMBEC 2208 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 2209 AANonNull::StateType &State) { 2210 bool IsNonNull = false; 2211 bool TrackUse = false; 2212 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, 2213 IsNonNull, TrackUse); 2214 State.setKnown(IsNonNull); 2215 return TrackUse; 2216 } 2217 2218 /// See AbstractAttribute::getAsStr(). 2219 const std::string getAsStr() const override { 2220 return getAssumed() ? "nonnull" : "may-null"; 2221 } 2222 2223 /// Flag to determine if the underlying value can be null and still allow 2224 /// valid accesses. 2225 const bool NullIsDefined; 2226 }; 2227 2228 /// NonNull attribute for a floating value. 2229 struct AANonNullFloating : public AANonNullImpl { 2230 AANonNullFloating(const IRPosition &IRP, Attributor &A) 2231 : AANonNullImpl(IRP, A) {} 2232 2233 /// See AbstractAttribute::updateImpl(...). 2234 ChangeStatus updateImpl(Attributor &A) override { 2235 const DataLayout &DL = A.getDataLayout(); 2236 2237 DominatorTree *DT = nullptr; 2238 AssumptionCache *AC = nullptr; 2239 InformationCache &InfoCache = A.getInfoCache(); 2240 if (const Function *Fn = getAnchorScope()) { 2241 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); 2242 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); 2243 } 2244 2245 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 2246 AANonNull::StateType &T, bool Stripped) -> bool { 2247 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V), 2248 DepClassTy::REQUIRED); 2249 if (!Stripped && this == &AA) { 2250 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) 2251 T.indicatePessimisticFixpoint(); 2252 } else { 2253 // Use abstract attribute information. 2254 const AANonNull::StateType &NS = AA.getState(); 2255 T ^= NS; 2256 } 2257 return T.isValidState(); 2258 }; 2259 2260 StateType T; 2261 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 2262 VisitValueCB, getCtxI())) 2263 return indicatePessimisticFixpoint(); 2264 2265 return clampStateAndIndicateChange(getState(), T); 2266 } 2267 2268 /// See AbstractAttribute::trackStatistics() 2269 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2270 }; 2271 2272 /// NonNull attribute for function return value. 2273 struct AANonNullReturned final 2274 : AAReturnedFromReturnedValues<AANonNull, AANonNull> { 2275 AANonNullReturned(const IRPosition &IRP, Attributor &A) 2276 : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {} 2277 2278 /// See AbstractAttribute::getAsStr(). 2279 const std::string getAsStr() const override { 2280 return getAssumed() ? "nonnull" : "may-null"; 2281 } 2282 2283 /// See AbstractAttribute::trackStatistics() 2284 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2285 }; 2286 2287 /// NonNull attribute for function argument. 2288 struct AANonNullArgument final 2289 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { 2290 AANonNullArgument(const IRPosition &IRP, Attributor &A) 2291 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} 2292 2293 /// See AbstractAttribute::trackStatistics() 2294 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } 2295 }; 2296 2297 struct AANonNullCallSiteArgument final : AANonNullFloating { 2298 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) 2299 : AANonNullFloating(IRP, A) {} 2300 2301 /// See AbstractAttribute::trackStatistics() 2302 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } 2303 }; 2304 2305 /// NonNull attribute for a call site return position. 2306 struct AANonNullCallSiteReturned final 2307 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { 2308 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) 2309 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} 2310 2311 /// See AbstractAttribute::trackStatistics() 2312 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } 2313 }; 2314 2315 /// ------------------------ No-Recurse Attributes ---------------------------- 2316 2317 struct AANoRecurseImpl : public AANoRecurse { 2318 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} 2319 2320 /// See AbstractAttribute::getAsStr() 2321 const std::string getAsStr() const override { 2322 return getAssumed() ? "norecurse" : "may-recurse"; 2323 } 2324 }; 2325 2326 struct AANoRecurseFunction final : AANoRecurseImpl { 2327 AANoRecurseFunction(const IRPosition &IRP, Attributor &A) 2328 : AANoRecurseImpl(IRP, A) {} 2329 2330 /// See AbstractAttribute::initialize(...). 2331 void initialize(Attributor &A) override { 2332 AANoRecurseImpl::initialize(A); 2333 // TODO: We should build a call graph ourselves to enable this in the module 2334 // pass as well. 2335 if (const Function *F = getAnchorScope()) 2336 if (A.getInfoCache().getSccSize(*F) != 1) 2337 indicatePessimisticFixpoint(); 2338 } 2339 2340 /// See AbstractAttribute::updateImpl(...). 2341 ChangeStatus updateImpl(Attributor &A) override { 2342 2343 // If all live call sites are known to be no-recurse, we are as well. 2344 auto CallSitePred = [&](AbstractCallSite ACS) { 2345 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 2346 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 2347 DepClassTy::NONE); 2348 return NoRecurseAA.isKnownNoRecurse(); 2349 }; 2350 bool AllCallSitesKnown; 2351 if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) { 2352 // If we know all call sites and all are known no-recurse, we are done. 2353 // If all known call sites, which might not be all that exist, are known 2354 // to be no-recurse, we are not done but we can continue to assume 2355 // no-recurse. If one of the call sites we have not visited will become 2356 // live, another update is triggered. 2357 if (AllCallSitesKnown) 2358 indicateOptimisticFixpoint(); 2359 return ChangeStatus::UNCHANGED; 2360 } 2361 2362 // If the above check does not hold anymore we look at the calls. 2363 auto CheckForNoRecurse = [&](Instruction &I) { 2364 const auto &CB = cast<CallBase>(I); 2365 if (CB.hasFnAttr(Attribute::NoRecurse)) 2366 return true; 2367 2368 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 2369 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 2370 if (!NoRecurseAA.isAssumedNoRecurse()) 2371 return false; 2372 2373 // Recursion to the same function 2374 if (CB.getCalledFunction() == getAnchorScope()) 2375 return false; 2376 2377 return true; 2378 }; 2379 2380 bool UsedAssumedInformation = false; 2381 if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this, 2382 UsedAssumedInformation)) 2383 return indicatePessimisticFixpoint(); 2384 return ChangeStatus::UNCHANGED; 2385 } 2386 2387 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } 2388 }; 2389 2390 /// NoRecurse attribute deduction for a call sites. 2391 struct AANoRecurseCallSite final : AANoRecurseImpl { 2392 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) 2393 : AANoRecurseImpl(IRP, A) {} 2394 2395 /// See AbstractAttribute::initialize(...). 2396 void initialize(Attributor &A) override { 2397 AANoRecurseImpl::initialize(A); 2398 Function *F = getAssociatedFunction(); 2399 if (!F || F->isDeclaration()) 2400 indicatePessimisticFixpoint(); 2401 } 2402 2403 /// See AbstractAttribute::updateImpl(...). 2404 ChangeStatus updateImpl(Attributor &A) override { 2405 // TODO: Once we have call site specific value information we can provide 2406 // call site specific liveness information and then it makes 2407 // sense to specialize attributes for call sites arguments instead of 2408 // redirecting requests to the callee argument. 2409 Function *F = getAssociatedFunction(); 2410 const IRPosition &FnPos = IRPosition::function(*F); 2411 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED); 2412 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2413 } 2414 2415 /// See AbstractAttribute::trackStatistics() 2416 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } 2417 }; 2418 2419 /// -------------------- Undefined-Behavior Attributes ------------------------ 2420 2421 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { 2422 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) 2423 : AAUndefinedBehavior(IRP, A) {} 2424 2425 /// See AbstractAttribute::updateImpl(...). 2426 // through a pointer (i.e. also branches etc.) 2427 ChangeStatus updateImpl(Attributor &A) override { 2428 const size_t UBPrevSize = KnownUBInsts.size(); 2429 const size_t NoUBPrevSize = AssumedNoUBInsts.size(); 2430 2431 auto InspectMemAccessInstForUB = [&](Instruction &I) { 2432 // Lang ref now states volatile store is not UB, let's skip them. 2433 if (I.isVolatile() && I.mayWriteToMemory()) 2434 return true; 2435 2436 // Skip instructions that are already saved. 2437 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2438 return true; 2439 2440 // If we reach here, we know we have an instruction 2441 // that accesses memory through a pointer operand, 2442 // for which getPointerOperand() should give it to us. 2443 Value *PtrOp = 2444 const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true)); 2445 assert(PtrOp && 2446 "Expected pointer operand of memory accessing instruction"); 2447 2448 // Either we stopped and the appropriate action was taken, 2449 // or we got back a simplified value to continue. 2450 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); 2451 if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue()) 2452 return true; 2453 const Value *PtrOpVal = SimplifiedPtrOp.getValue(); 2454 2455 // A memory access through a pointer is considered UB 2456 // only if the pointer has constant null value. 2457 // TODO: Expand it to not only check constant values. 2458 if (!isa<ConstantPointerNull>(PtrOpVal)) { 2459 AssumedNoUBInsts.insert(&I); 2460 return true; 2461 } 2462 const Type *PtrTy = PtrOpVal->getType(); 2463 2464 // Because we only consider instructions inside functions, 2465 // assume that a parent function exists. 2466 const Function *F = I.getFunction(); 2467 2468 // A memory access using constant null pointer is only considered UB 2469 // if null pointer is _not_ defined for the target platform. 2470 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) 2471 AssumedNoUBInsts.insert(&I); 2472 else 2473 KnownUBInsts.insert(&I); 2474 return true; 2475 }; 2476 2477 auto InspectBrInstForUB = [&](Instruction &I) { 2478 // A conditional branch instruction is considered UB if it has `undef` 2479 // condition. 2480 2481 // Skip instructions that are already saved. 2482 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2483 return true; 2484 2485 // We know we have a branch instruction. 2486 auto *BrInst = cast<BranchInst>(&I); 2487 2488 // Unconditional branches are never considered UB. 2489 if (BrInst->isUnconditional()) 2490 return true; 2491 2492 // Either we stopped and the appropriate action was taken, 2493 // or we got back a simplified value to continue. 2494 Optional<Value *> SimplifiedCond = 2495 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); 2496 if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue()) 2497 return true; 2498 AssumedNoUBInsts.insert(&I); 2499 return true; 2500 }; 2501 2502 auto InspectCallSiteForUB = [&](Instruction &I) { 2503 // Check whether a callsite always cause UB or not 2504 2505 // Skip instructions that are already saved. 2506 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2507 return true; 2508 2509 // Check nonnull and noundef argument attribute violation for each 2510 // callsite. 2511 CallBase &CB = cast<CallBase>(I); 2512 Function *Callee = CB.getCalledFunction(); 2513 if (!Callee) 2514 return true; 2515 for (unsigned idx = 0; idx < CB.arg_size(); idx++) { 2516 // If current argument is known to be simplified to null pointer and the 2517 // corresponding argument position is known to have nonnull attribute, 2518 // the argument is poison. Furthermore, if the argument is poison and 2519 // the position is known to have noundef attriubte, this callsite is 2520 // considered UB. 2521 if (idx >= Callee->arg_size()) 2522 break; 2523 Value *ArgVal = CB.getArgOperand(idx); 2524 if (!ArgVal) 2525 continue; 2526 // Here, we handle three cases. 2527 // (1) Not having a value means it is dead. (we can replace the value 2528 // with undef) 2529 // (2) Simplified to undef. The argument violate noundef attriubte. 2530 // (3) Simplified to null pointer where known to be nonnull. 2531 // The argument is a poison value and violate noundef attribute. 2532 IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx); 2533 auto &NoUndefAA = 2534 A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2535 if (!NoUndefAA.isKnownNoUndef()) 2536 continue; 2537 bool UsedAssumedInformation = false; 2538 Optional<Value *> SimplifiedVal = A.getAssumedSimplified( 2539 IRPosition::value(*ArgVal), *this, UsedAssumedInformation); 2540 if (UsedAssumedInformation) 2541 continue; 2542 if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue()) 2543 return true; 2544 if (!SimplifiedVal.hasValue() || 2545 isa<UndefValue>(*SimplifiedVal.getValue())) { 2546 KnownUBInsts.insert(&I); 2547 continue; 2548 } 2549 if (!ArgVal->getType()->isPointerTy() || 2550 !isa<ConstantPointerNull>(*SimplifiedVal.getValue())) 2551 continue; 2552 auto &NonNullAA = 2553 A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2554 if (NonNullAA.isKnownNonNull()) 2555 KnownUBInsts.insert(&I); 2556 } 2557 return true; 2558 }; 2559 2560 auto InspectReturnInstForUB = 2561 [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) { 2562 // Check if a return instruction always cause UB or not 2563 // Note: It is guaranteed that the returned position of the anchor 2564 // scope has noundef attribute when this is called. 2565 // We also ensure the return position is not "assumed dead" 2566 // because the returned value was then potentially simplified to 2567 // `undef` in AAReturnedValues without removing the `noundef` 2568 // attribute yet. 2569 2570 // When the returned position has noundef attriubte, UB occur in the 2571 // following cases. 2572 // (1) Returned value is known to be undef. 2573 // (2) The value is known to be a null pointer and the returned 2574 // position has nonnull attribute (because the returned value is 2575 // poison). 2576 bool FoundUB = false; 2577 if (isa<UndefValue>(V)) { 2578 FoundUB = true; 2579 } else { 2580 if (isa<ConstantPointerNull>(V)) { 2581 auto &NonNullAA = A.getAAFor<AANonNull>( 2582 *this, IRPosition::returned(*getAnchorScope()), 2583 DepClassTy::NONE); 2584 if (NonNullAA.isKnownNonNull()) 2585 FoundUB = true; 2586 } 2587 } 2588 2589 if (FoundUB) 2590 for (ReturnInst *RI : RetInsts) 2591 KnownUBInsts.insert(RI); 2592 return true; 2593 }; 2594 2595 bool UsedAssumedInformation = false; 2596 A.checkForAllInstructions(InspectMemAccessInstForUB, *this, 2597 {Instruction::Load, Instruction::Store, 2598 Instruction::AtomicCmpXchg, 2599 Instruction::AtomicRMW}, 2600 UsedAssumedInformation, 2601 /* CheckBBLivenessOnly */ true); 2602 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, 2603 UsedAssumedInformation, 2604 /* CheckBBLivenessOnly */ true); 2605 A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this, 2606 UsedAssumedInformation); 2607 2608 // If the returned position of the anchor scope has noundef attriubte, check 2609 // all returned instructions. 2610 if (!getAnchorScope()->getReturnType()->isVoidTy()) { 2611 const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope()); 2612 if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) { 2613 auto &RetPosNoUndefAA = 2614 A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE); 2615 if (RetPosNoUndefAA.isKnownNoUndef()) 2616 A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB, 2617 *this); 2618 } 2619 } 2620 2621 if (NoUBPrevSize != AssumedNoUBInsts.size() || 2622 UBPrevSize != KnownUBInsts.size()) 2623 return ChangeStatus::CHANGED; 2624 return ChangeStatus::UNCHANGED; 2625 } 2626 2627 bool isKnownToCauseUB(Instruction *I) const override { 2628 return KnownUBInsts.count(I); 2629 } 2630 2631 bool isAssumedToCauseUB(Instruction *I) const override { 2632 // In simple words, if an instruction is not in the assumed to _not_ 2633 // cause UB, then it is assumed UB (that includes those 2634 // in the KnownUBInsts set). The rest is boilerplate 2635 // is to ensure that it is one of the instructions we test 2636 // for UB. 2637 2638 switch (I->getOpcode()) { 2639 case Instruction::Load: 2640 case Instruction::Store: 2641 case Instruction::AtomicCmpXchg: 2642 case Instruction::AtomicRMW: 2643 return !AssumedNoUBInsts.count(I); 2644 case Instruction::Br: { 2645 auto BrInst = cast<BranchInst>(I); 2646 if (BrInst->isUnconditional()) 2647 return false; 2648 return !AssumedNoUBInsts.count(I); 2649 } break; 2650 default: 2651 return false; 2652 } 2653 return false; 2654 } 2655 2656 ChangeStatus manifest(Attributor &A) override { 2657 if (KnownUBInsts.empty()) 2658 return ChangeStatus::UNCHANGED; 2659 for (Instruction *I : KnownUBInsts) 2660 A.changeToUnreachableAfterManifest(I); 2661 return ChangeStatus::CHANGED; 2662 } 2663 2664 /// See AbstractAttribute::getAsStr() 2665 const std::string getAsStr() const override { 2666 return getAssumed() ? "undefined-behavior" : "no-ub"; 2667 } 2668 2669 /// Note: The correctness of this analysis depends on the fact that the 2670 /// following 2 sets will stop changing after some point. 2671 /// "Change" here means that their size changes. 2672 /// The size of each set is monotonically increasing 2673 /// (we only add items to them) and it is upper bounded by the number of 2674 /// instructions in the processed function (we can never save more 2675 /// elements in either set than this number). Hence, at some point, 2676 /// they will stop increasing. 2677 /// Consequently, at some point, both sets will have stopped 2678 /// changing, effectively making the analysis reach a fixpoint. 2679 2680 /// Note: These 2 sets are disjoint and an instruction can be considered 2681 /// one of 3 things: 2682 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in 2683 /// the KnownUBInsts set. 2684 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior 2685 /// has a reason to assume it). 2686 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior 2687 /// could not find a reason to assume or prove that it can cause UB, 2688 /// hence it assumes it doesn't. We have a set for these instructions 2689 /// so that we don't reprocess them in every update. 2690 /// Note however that instructions in this set may cause UB. 2691 2692 protected: 2693 /// A set of all live instructions _known_ to cause UB. 2694 SmallPtrSet<Instruction *, 8> KnownUBInsts; 2695 2696 private: 2697 /// A set of all the (live) instructions that are assumed to _not_ cause UB. 2698 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; 2699 2700 // Should be called on updates in which if we're processing an instruction 2701 // \p I that depends on a value \p V, one of the following has to happen: 2702 // - If the value is assumed, then stop. 2703 // - If the value is known but undef, then consider it UB. 2704 // - Otherwise, do specific processing with the simplified value. 2705 // We return None in the first 2 cases to signify that an appropriate 2706 // action was taken and the caller should stop. 2707 // Otherwise, we return the simplified value that the caller should 2708 // use for specific processing. 2709 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V, 2710 Instruction *I) { 2711 bool UsedAssumedInformation = false; 2712 Optional<Value *> SimplifiedV = A.getAssumedSimplified( 2713 IRPosition::value(*V), *this, UsedAssumedInformation); 2714 if (!UsedAssumedInformation) { 2715 // Don't depend on assumed values. 2716 if (!SimplifiedV.hasValue()) { 2717 // If it is known (which we tested above) but it doesn't have a value, 2718 // then we can assume `undef` and hence the instruction is UB. 2719 KnownUBInsts.insert(I); 2720 return llvm::None; 2721 } 2722 if (!SimplifiedV.getValue()) 2723 return nullptr; 2724 V = *SimplifiedV; 2725 } 2726 if (isa<UndefValue>(V)) { 2727 KnownUBInsts.insert(I); 2728 return llvm::None; 2729 } 2730 return V; 2731 } 2732 }; 2733 2734 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { 2735 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) 2736 : AAUndefinedBehaviorImpl(IRP, A) {} 2737 2738 /// See AbstractAttribute::trackStatistics() 2739 void trackStatistics() const override { 2740 STATS_DECL(UndefinedBehaviorInstruction, Instruction, 2741 "Number of instructions known to have UB"); 2742 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += 2743 KnownUBInsts.size(); 2744 } 2745 }; 2746 2747 /// ------------------------ Will-Return Attributes ---------------------------- 2748 2749 // Helper function that checks whether a function has any cycle which we don't 2750 // know if it is bounded or not. 2751 // Loops with maximum trip count are considered bounded, any other cycle not. 2752 static bool mayContainUnboundedCycle(Function &F, Attributor &A) { 2753 ScalarEvolution *SE = 2754 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); 2755 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); 2756 // If either SCEV or LoopInfo is not available for the function then we assume 2757 // any cycle to be unbounded cycle. 2758 // We use scc_iterator which uses Tarjan algorithm to find all the maximal 2759 // SCCs.To detect if there's a cycle, we only need to find the maximal ones. 2760 if (!SE || !LI) { 2761 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) 2762 if (SCCI.hasCycle()) 2763 return true; 2764 return false; 2765 } 2766 2767 // If there's irreducible control, the function may contain non-loop cycles. 2768 if (mayContainIrreducibleControl(F, LI)) 2769 return true; 2770 2771 // Any loop that does not have a max trip count is considered unbounded cycle. 2772 for (auto *L : LI->getLoopsInPreorder()) { 2773 if (!SE->getSmallConstantMaxTripCount(L)) 2774 return true; 2775 } 2776 return false; 2777 } 2778 2779 struct AAWillReturnImpl : public AAWillReturn { 2780 AAWillReturnImpl(const IRPosition &IRP, Attributor &A) 2781 : AAWillReturn(IRP, A) {} 2782 2783 /// See AbstractAttribute::initialize(...). 2784 void initialize(Attributor &A) override { 2785 AAWillReturn::initialize(A); 2786 2787 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) { 2788 indicateOptimisticFixpoint(); 2789 return; 2790 } 2791 } 2792 2793 /// Check for `mustprogress` and `readonly` as they imply `willreturn`. 2794 bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) { 2795 // Check for `mustprogress` in the scope and the associated function which 2796 // might be different if this is a call site. 2797 if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) && 2798 (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress())) 2799 return false; 2800 2801 const auto &MemAA = 2802 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 2803 if (!MemAA.isAssumedReadOnly()) 2804 return false; 2805 if (KnownOnly && !MemAA.isKnownReadOnly()) 2806 return false; 2807 if (!MemAA.isKnownReadOnly()) 2808 A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL); 2809 2810 return true; 2811 } 2812 2813 /// See AbstractAttribute::updateImpl(...). 2814 ChangeStatus updateImpl(Attributor &A) override { 2815 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 2816 return ChangeStatus::UNCHANGED; 2817 2818 auto CheckForWillReturn = [&](Instruction &I) { 2819 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); 2820 const auto &WillReturnAA = 2821 A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED); 2822 if (WillReturnAA.isKnownWillReturn()) 2823 return true; 2824 if (!WillReturnAA.isAssumedWillReturn()) 2825 return false; 2826 const auto &NoRecurseAA = 2827 A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED); 2828 return NoRecurseAA.isAssumedNoRecurse(); 2829 }; 2830 2831 bool UsedAssumedInformation = false; 2832 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this, 2833 UsedAssumedInformation)) 2834 return indicatePessimisticFixpoint(); 2835 2836 return ChangeStatus::UNCHANGED; 2837 } 2838 2839 /// See AbstractAttribute::getAsStr() 2840 const std::string getAsStr() const override { 2841 return getAssumed() ? "willreturn" : "may-noreturn"; 2842 } 2843 }; 2844 2845 struct AAWillReturnFunction final : AAWillReturnImpl { 2846 AAWillReturnFunction(const IRPosition &IRP, Attributor &A) 2847 : AAWillReturnImpl(IRP, A) {} 2848 2849 /// See AbstractAttribute::initialize(...). 2850 void initialize(Attributor &A) override { 2851 AAWillReturnImpl::initialize(A); 2852 2853 Function *F = getAnchorScope(); 2854 if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A)) 2855 indicatePessimisticFixpoint(); 2856 } 2857 2858 /// See AbstractAttribute::trackStatistics() 2859 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } 2860 }; 2861 2862 /// WillReturn attribute deduction for a call sites. 2863 struct AAWillReturnCallSite final : AAWillReturnImpl { 2864 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) 2865 : AAWillReturnImpl(IRP, A) {} 2866 2867 /// See AbstractAttribute::initialize(...). 2868 void initialize(Attributor &A) override { 2869 AAWillReturnImpl::initialize(A); 2870 Function *F = getAssociatedFunction(); 2871 if (!F || !A.isFunctionIPOAmendable(*F)) 2872 indicatePessimisticFixpoint(); 2873 } 2874 2875 /// See AbstractAttribute::updateImpl(...). 2876 ChangeStatus updateImpl(Attributor &A) override { 2877 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 2878 return ChangeStatus::UNCHANGED; 2879 2880 // TODO: Once we have call site specific value information we can provide 2881 // call site specific liveness information and then it makes 2882 // sense to specialize attributes for call sites arguments instead of 2883 // redirecting requests to the callee argument. 2884 Function *F = getAssociatedFunction(); 2885 const IRPosition &FnPos = IRPosition::function(*F); 2886 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED); 2887 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2888 } 2889 2890 /// See AbstractAttribute::trackStatistics() 2891 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } 2892 }; 2893 2894 /// -------------------AAReachability Attribute-------------------------- 2895 2896 struct AAReachabilityImpl : AAReachability { 2897 AAReachabilityImpl(const IRPosition &IRP, Attributor &A) 2898 : AAReachability(IRP, A) {} 2899 2900 const std::string getAsStr() const override { 2901 // TODO: Return the number of reachable queries. 2902 return "reachable"; 2903 } 2904 2905 /// See AbstractAttribute::updateImpl(...). 2906 ChangeStatus updateImpl(Attributor &A) override { 2907 return ChangeStatus::UNCHANGED; 2908 } 2909 }; 2910 2911 struct AAReachabilityFunction final : public AAReachabilityImpl { 2912 AAReachabilityFunction(const IRPosition &IRP, Attributor &A) 2913 : AAReachabilityImpl(IRP, A) {} 2914 2915 /// See AbstractAttribute::trackStatistics() 2916 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); } 2917 }; 2918 2919 /// ------------------------ NoAlias Argument Attribute ------------------------ 2920 2921 struct AANoAliasImpl : AANoAlias { 2922 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { 2923 assert(getAssociatedType()->isPointerTy() && 2924 "Noalias is a pointer attribute"); 2925 } 2926 2927 const std::string getAsStr() const override { 2928 return getAssumed() ? "noalias" : "may-alias"; 2929 } 2930 }; 2931 2932 /// NoAlias attribute for a floating value. 2933 struct AANoAliasFloating final : AANoAliasImpl { 2934 AANoAliasFloating(const IRPosition &IRP, Attributor &A) 2935 : AANoAliasImpl(IRP, A) {} 2936 2937 /// See AbstractAttribute::initialize(...). 2938 void initialize(Attributor &A) override { 2939 AANoAliasImpl::initialize(A); 2940 Value *Val = &getAssociatedValue(); 2941 do { 2942 CastInst *CI = dyn_cast<CastInst>(Val); 2943 if (!CI) 2944 break; 2945 Value *Base = CI->getOperand(0); 2946 if (!Base->hasOneUse()) 2947 break; 2948 Val = Base; 2949 } while (true); 2950 2951 if (!Val->getType()->isPointerTy()) { 2952 indicatePessimisticFixpoint(); 2953 return; 2954 } 2955 2956 if (isa<AllocaInst>(Val)) 2957 indicateOptimisticFixpoint(); 2958 else if (isa<ConstantPointerNull>(Val) && 2959 !NullPointerIsDefined(getAnchorScope(), 2960 Val->getType()->getPointerAddressSpace())) 2961 indicateOptimisticFixpoint(); 2962 else if (Val != &getAssociatedValue()) { 2963 const auto &ValNoAliasAA = A.getAAFor<AANoAlias>( 2964 *this, IRPosition::value(*Val), DepClassTy::OPTIONAL); 2965 if (ValNoAliasAA.isKnownNoAlias()) 2966 indicateOptimisticFixpoint(); 2967 } 2968 } 2969 2970 /// See AbstractAttribute::updateImpl(...). 2971 ChangeStatus updateImpl(Attributor &A) override { 2972 // TODO: Implement this. 2973 return indicatePessimisticFixpoint(); 2974 } 2975 2976 /// See AbstractAttribute::trackStatistics() 2977 void trackStatistics() const override { 2978 STATS_DECLTRACK_FLOATING_ATTR(noalias) 2979 } 2980 }; 2981 2982 /// NoAlias attribute for an argument. 2983 struct AANoAliasArgument final 2984 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { 2985 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; 2986 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 2987 2988 /// See AbstractAttribute::initialize(...). 2989 void initialize(Attributor &A) override { 2990 Base::initialize(A); 2991 // See callsite argument attribute and callee argument attribute. 2992 if (hasAttr({Attribute::ByVal})) 2993 indicateOptimisticFixpoint(); 2994 } 2995 2996 /// See AbstractAttribute::update(...). 2997 ChangeStatus updateImpl(Attributor &A) override { 2998 // We have to make sure no-alias on the argument does not break 2999 // synchronization when this is a callback argument, see also [1] below. 3000 // If synchronization cannot be affected, we delegate to the base updateImpl 3001 // function, otherwise we give up for now. 3002 3003 // If the function is no-sync, no-alias cannot break synchronization. 3004 const auto &NoSyncAA = 3005 A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()), 3006 DepClassTy::OPTIONAL); 3007 if (NoSyncAA.isAssumedNoSync()) 3008 return Base::updateImpl(A); 3009 3010 // If the argument is read-only, no-alias cannot break synchronization. 3011 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 3012 *this, getIRPosition(), DepClassTy::OPTIONAL); 3013 if (MemBehaviorAA.isAssumedReadOnly()) 3014 return Base::updateImpl(A); 3015 3016 // If the argument is never passed through callbacks, no-alias cannot break 3017 // synchronization. 3018 bool AllCallSitesKnown; 3019 if (A.checkForAllCallSites( 3020 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, 3021 true, AllCallSitesKnown)) 3022 return Base::updateImpl(A); 3023 3024 // TODO: add no-alias but make sure it doesn't break synchronization by 3025 // introducing fake uses. See: 3026 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, 3027 // International Workshop on OpenMP 2018, 3028 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf 3029 3030 return indicatePessimisticFixpoint(); 3031 } 3032 3033 /// See AbstractAttribute::trackStatistics() 3034 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } 3035 }; 3036 3037 struct AANoAliasCallSiteArgument final : AANoAliasImpl { 3038 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) 3039 : AANoAliasImpl(IRP, A) {} 3040 3041 /// See AbstractAttribute::initialize(...). 3042 void initialize(Attributor &A) override { 3043 // See callsite argument attribute and callee argument attribute. 3044 const auto &CB = cast<CallBase>(getAnchorValue()); 3045 if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias)) 3046 indicateOptimisticFixpoint(); 3047 Value &Val = getAssociatedValue(); 3048 if (isa<ConstantPointerNull>(Val) && 3049 !NullPointerIsDefined(getAnchorScope(), 3050 Val.getType()->getPointerAddressSpace())) 3051 indicateOptimisticFixpoint(); 3052 } 3053 3054 /// Determine if the underlying value may alias with the call site argument 3055 /// \p OtherArgNo of \p ICS (= the underlying call site). 3056 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, 3057 const AAMemoryBehavior &MemBehaviorAA, 3058 const CallBase &CB, unsigned OtherArgNo) { 3059 // We do not need to worry about aliasing with the underlying IRP. 3060 if (this->getCalleeArgNo() == (int)OtherArgNo) 3061 return false; 3062 3063 // If it is not a pointer or pointer vector we do not alias. 3064 const Value *ArgOp = CB.getArgOperand(OtherArgNo); 3065 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 3066 return false; 3067 3068 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 3069 *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE); 3070 3071 // If the argument is readnone, there is no read-write aliasing. 3072 if (CBArgMemBehaviorAA.isAssumedReadNone()) { 3073 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3074 return false; 3075 } 3076 3077 // If the argument is readonly and the underlying value is readonly, there 3078 // is no read-write aliasing. 3079 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); 3080 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { 3081 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3082 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3083 return false; 3084 } 3085 3086 // We have to utilize actual alias analysis queries so we need the object. 3087 if (!AAR) 3088 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); 3089 3090 // Try to rule it out at the call site. 3091 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); 3092 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " 3093 "callsite arguments: " 3094 << getAssociatedValue() << " " << *ArgOp << " => " 3095 << (IsAliasing ? "" : "no-") << "alias \n"); 3096 3097 return IsAliasing; 3098 } 3099 3100 bool 3101 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, 3102 const AAMemoryBehavior &MemBehaviorAA, 3103 const AANoAlias &NoAliasAA) { 3104 // We can deduce "noalias" if the following conditions hold. 3105 // (i) Associated value is assumed to be noalias in the definition. 3106 // (ii) Associated value is assumed to be no-capture in all the uses 3107 // possibly executed before this callsite. 3108 // (iii) There is no other pointer argument which could alias with the 3109 // value. 3110 3111 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); 3112 if (!AssociatedValueIsNoAliasAtDef) { 3113 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() 3114 << " is not no-alias at the definition\n"); 3115 return false; 3116 } 3117 3118 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); 3119 3120 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3121 const Function *ScopeFn = VIRP.getAnchorScope(); 3122 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE); 3123 // Check whether the value is captured in the scope using AANoCapture. 3124 // Look at CFG and check only uses possibly executed before this 3125 // callsite. 3126 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 3127 Instruction *UserI = cast<Instruction>(U.getUser()); 3128 3129 // If UserI is the curr instruction and there is a single potential use of 3130 // the value in UserI we allow the use. 3131 // TODO: We should inspect the operands and allow those that cannot alias 3132 // with the value. 3133 if (UserI == getCtxI() && UserI->getNumOperands() == 1) 3134 return true; 3135 3136 if (ScopeFn) { 3137 const auto &ReachabilityAA = A.getAAFor<AAReachability>( 3138 *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL); 3139 3140 if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI())) 3141 return true; 3142 3143 if (auto *CB = dyn_cast<CallBase>(UserI)) { 3144 if (CB->isArgOperand(&U)) { 3145 3146 unsigned ArgNo = CB->getArgOperandNo(&U); 3147 3148 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 3149 *this, IRPosition::callsite_argument(*CB, ArgNo), 3150 DepClassTy::OPTIONAL); 3151 3152 if (NoCaptureAA.isAssumedNoCapture()) 3153 return true; 3154 } 3155 } 3156 } 3157 3158 // For cases which can potentially have more users 3159 if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) || 3160 isa<SelectInst>(U)) { 3161 Follow = true; 3162 return true; 3163 } 3164 3165 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n"); 3166 return false; 3167 }; 3168 3169 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 3170 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { 3171 LLVM_DEBUG( 3172 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() 3173 << " cannot be noalias as it is potentially captured\n"); 3174 return false; 3175 } 3176 } 3177 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); 3178 3179 // Check there is no other pointer argument which could alias with the 3180 // value passed at this call site. 3181 // TODO: AbstractCallSite 3182 const auto &CB = cast<CallBase>(getAnchorValue()); 3183 for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++) 3184 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) 3185 return false; 3186 3187 return true; 3188 } 3189 3190 /// See AbstractAttribute::updateImpl(...). 3191 ChangeStatus updateImpl(Attributor &A) override { 3192 // If the argument is readnone we are done as there are no accesses via the 3193 // argument. 3194 auto &MemBehaviorAA = 3195 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 3196 if (MemBehaviorAA.isAssumedReadNone()) { 3197 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3198 return ChangeStatus::UNCHANGED; 3199 } 3200 3201 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3202 const auto &NoAliasAA = 3203 A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE); 3204 3205 AAResults *AAR = nullptr; 3206 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, 3207 NoAliasAA)) { 3208 LLVM_DEBUG( 3209 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"); 3210 return ChangeStatus::UNCHANGED; 3211 } 3212 3213 return indicatePessimisticFixpoint(); 3214 } 3215 3216 /// See AbstractAttribute::trackStatistics() 3217 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } 3218 }; 3219 3220 /// NoAlias attribute for function return value. 3221 struct AANoAliasReturned final : AANoAliasImpl { 3222 AANoAliasReturned(const IRPosition &IRP, Attributor &A) 3223 : AANoAliasImpl(IRP, A) {} 3224 3225 /// See AbstractAttribute::initialize(...). 3226 void initialize(Attributor &A) override { 3227 AANoAliasImpl::initialize(A); 3228 Function *F = getAssociatedFunction(); 3229 if (!F || F->isDeclaration()) 3230 indicatePessimisticFixpoint(); 3231 } 3232 3233 /// See AbstractAttribute::updateImpl(...). 3234 virtual ChangeStatus updateImpl(Attributor &A) override { 3235 3236 auto CheckReturnValue = [&](Value &RV) -> bool { 3237 if (Constant *C = dyn_cast<Constant>(&RV)) 3238 if (C->isNullValue() || isa<UndefValue>(C)) 3239 return true; 3240 3241 /// For now, we can only deduce noalias if we have call sites. 3242 /// FIXME: add more support. 3243 if (!isa<CallBase>(&RV)) 3244 return false; 3245 3246 const IRPosition &RVPos = IRPosition::value(RV); 3247 const auto &NoAliasAA = 3248 A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED); 3249 if (!NoAliasAA.isAssumedNoAlias()) 3250 return false; 3251 3252 const auto &NoCaptureAA = 3253 A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED); 3254 return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); 3255 }; 3256 3257 if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) 3258 return indicatePessimisticFixpoint(); 3259 3260 return ChangeStatus::UNCHANGED; 3261 } 3262 3263 /// See AbstractAttribute::trackStatistics() 3264 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } 3265 }; 3266 3267 /// NoAlias attribute deduction for a call site return value. 3268 struct AANoAliasCallSiteReturned final : AANoAliasImpl { 3269 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) 3270 : AANoAliasImpl(IRP, A) {} 3271 3272 /// See AbstractAttribute::initialize(...). 3273 void initialize(Attributor &A) override { 3274 AANoAliasImpl::initialize(A); 3275 Function *F = getAssociatedFunction(); 3276 if (!F || F->isDeclaration()) 3277 indicatePessimisticFixpoint(); 3278 } 3279 3280 /// See AbstractAttribute::updateImpl(...). 3281 ChangeStatus updateImpl(Attributor &A) override { 3282 // TODO: Once we have call site specific value information we can provide 3283 // call site specific liveness information and then it makes 3284 // sense to specialize attributes for call sites arguments instead of 3285 // redirecting requests to the callee argument. 3286 Function *F = getAssociatedFunction(); 3287 const IRPosition &FnPos = IRPosition::returned(*F); 3288 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED); 3289 return clampStateAndIndicateChange(getState(), FnAA.getState()); 3290 } 3291 3292 /// See AbstractAttribute::trackStatistics() 3293 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } 3294 }; 3295 3296 /// -------------------AAIsDead Function Attribute----------------------- 3297 3298 struct AAIsDeadValueImpl : public AAIsDead { 3299 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3300 3301 /// See AAIsDead::isAssumedDead(). 3302 bool isAssumedDead() const override { return isAssumed(IS_DEAD); } 3303 3304 /// See AAIsDead::isKnownDead(). 3305 bool isKnownDead() const override { return isKnown(IS_DEAD); } 3306 3307 /// See AAIsDead::isAssumedDead(BasicBlock *). 3308 bool isAssumedDead(const BasicBlock *BB) const override { return false; } 3309 3310 /// See AAIsDead::isKnownDead(BasicBlock *). 3311 bool isKnownDead(const BasicBlock *BB) const override { return false; } 3312 3313 /// See AAIsDead::isAssumedDead(Instruction *I). 3314 bool isAssumedDead(const Instruction *I) const override { 3315 return I == getCtxI() && isAssumedDead(); 3316 } 3317 3318 /// See AAIsDead::isKnownDead(Instruction *I). 3319 bool isKnownDead(const Instruction *I) const override { 3320 return isAssumedDead(I) && isKnownDead(); 3321 } 3322 3323 /// See AbstractAttribute::getAsStr(). 3324 const std::string getAsStr() const override { 3325 return isAssumedDead() ? "assumed-dead" : "assumed-live"; 3326 } 3327 3328 /// Check if all uses are assumed dead. 3329 bool areAllUsesAssumedDead(Attributor &A, Value &V) { 3330 // Callers might not check the type, void has no uses. 3331 if (V.getType()->isVoidTy()) 3332 return true; 3333 3334 // If we replace a value with a constant there are no uses left afterwards. 3335 if (!isa<Constant>(V)) { 3336 bool UsedAssumedInformation = false; 3337 Optional<Constant *> C = 3338 A.getAssumedConstant(V, *this, UsedAssumedInformation); 3339 if (!C.hasValue() || *C) 3340 return true; 3341 } 3342 3343 auto UsePred = [&](const Use &U, bool &Follow) { return false; }; 3344 // Explicitly set the dependence class to required because we want a long 3345 // chain of N dependent instructions to be considered live as soon as one is 3346 // without going through N update cycles. This is not required for 3347 // correctness. 3348 return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false, 3349 DepClassTy::REQUIRED); 3350 } 3351 3352 /// Determine if \p I is assumed to be side-effect free. 3353 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { 3354 if (!I || wouldInstructionBeTriviallyDead(I)) 3355 return true; 3356 3357 auto *CB = dyn_cast<CallBase>(I); 3358 if (!CB || isa<IntrinsicInst>(CB)) 3359 return false; 3360 3361 const IRPosition &CallIRP = IRPosition::callsite_function(*CB); 3362 const auto &NoUnwindAA = 3363 A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE); 3364 if (!NoUnwindAA.isAssumedNoUnwind()) 3365 return false; 3366 if (!NoUnwindAA.isKnownNoUnwind()) 3367 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); 3368 3369 const auto &MemBehaviorAA = 3370 A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE); 3371 if (MemBehaviorAA.isAssumedReadOnly()) { 3372 if (!MemBehaviorAA.isKnownReadOnly()) 3373 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3374 return true; 3375 } 3376 return false; 3377 } 3378 }; 3379 3380 struct AAIsDeadFloating : public AAIsDeadValueImpl { 3381 AAIsDeadFloating(const IRPosition &IRP, Attributor &A) 3382 : AAIsDeadValueImpl(IRP, A) {} 3383 3384 /// See AbstractAttribute::initialize(...). 3385 void initialize(Attributor &A) override { 3386 if (isa<UndefValue>(getAssociatedValue())) { 3387 indicatePessimisticFixpoint(); 3388 return; 3389 } 3390 3391 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3392 if (!isAssumedSideEffectFree(A, I)) { 3393 if (!isa_and_nonnull<StoreInst>(I)) 3394 indicatePessimisticFixpoint(); 3395 else 3396 removeAssumedBits(HAS_NO_EFFECT); 3397 } 3398 } 3399 3400 bool isDeadStore(Attributor &A, StoreInst &SI) { 3401 // Lang ref now states volatile store is not UB/dead, let's skip them. 3402 if (SI.isVolatile()) 3403 return false; 3404 3405 bool UsedAssumedInformation = false; 3406 SmallSetVector<Value *, 4> PotentialCopies; 3407 if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this, 3408 UsedAssumedInformation)) 3409 return false; 3410 return llvm::all_of(PotentialCopies, [&](Value *V) { 3411 return A.isAssumedDead(IRPosition::value(*V), this, nullptr, 3412 UsedAssumedInformation); 3413 }); 3414 } 3415 3416 /// See AbstractAttribute::updateImpl(...). 3417 ChangeStatus updateImpl(Attributor &A) override { 3418 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3419 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { 3420 if (!isDeadStore(A, *SI)) 3421 return indicatePessimisticFixpoint(); 3422 } else { 3423 if (!isAssumedSideEffectFree(A, I)) 3424 return indicatePessimisticFixpoint(); 3425 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3426 return indicatePessimisticFixpoint(); 3427 } 3428 return ChangeStatus::UNCHANGED; 3429 } 3430 3431 /// See AbstractAttribute::manifest(...). 3432 ChangeStatus manifest(Attributor &A) override { 3433 Value &V = getAssociatedValue(); 3434 if (auto *I = dyn_cast<Instruction>(&V)) { 3435 // If we get here we basically know the users are all dead. We check if 3436 // isAssumedSideEffectFree returns true here again because it might not be 3437 // the case and only the users are dead but the instruction (=call) is 3438 // still needed. 3439 if (isa<StoreInst>(I) || 3440 (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) { 3441 A.deleteAfterManifest(*I); 3442 return ChangeStatus::CHANGED; 3443 } 3444 } 3445 if (V.use_empty()) 3446 return ChangeStatus::UNCHANGED; 3447 3448 bool UsedAssumedInformation = false; 3449 Optional<Constant *> C = 3450 A.getAssumedConstant(V, *this, UsedAssumedInformation); 3451 if (C.hasValue() && C.getValue()) 3452 return ChangeStatus::UNCHANGED; 3453 3454 // Replace the value with undef as it is dead but keep droppable uses around 3455 // as they provide information we don't want to give up on just yet. 3456 UndefValue &UV = *UndefValue::get(V.getType()); 3457 bool AnyChange = 3458 A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false); 3459 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3460 } 3461 3462 /// See AbstractAttribute::trackStatistics() 3463 void trackStatistics() const override { 3464 STATS_DECLTRACK_FLOATING_ATTR(IsDead) 3465 } 3466 }; 3467 3468 struct AAIsDeadArgument : public AAIsDeadFloating { 3469 AAIsDeadArgument(const IRPosition &IRP, Attributor &A) 3470 : AAIsDeadFloating(IRP, A) {} 3471 3472 /// See AbstractAttribute::initialize(...). 3473 void initialize(Attributor &A) override { 3474 if (!A.isFunctionIPOAmendable(*getAnchorScope())) 3475 indicatePessimisticFixpoint(); 3476 } 3477 3478 /// See AbstractAttribute::manifest(...). 3479 ChangeStatus manifest(Attributor &A) override { 3480 ChangeStatus Changed = AAIsDeadFloating::manifest(A); 3481 Argument &Arg = *getAssociatedArgument(); 3482 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) 3483 if (A.registerFunctionSignatureRewrite( 3484 Arg, /* ReplacementTypes */ {}, 3485 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, 3486 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { 3487 Arg.dropDroppableUses(); 3488 return ChangeStatus::CHANGED; 3489 } 3490 return Changed; 3491 } 3492 3493 /// See AbstractAttribute::trackStatistics() 3494 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) } 3495 }; 3496 3497 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { 3498 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) 3499 : AAIsDeadValueImpl(IRP, A) {} 3500 3501 /// See AbstractAttribute::initialize(...). 3502 void initialize(Attributor &A) override { 3503 if (isa<UndefValue>(getAssociatedValue())) 3504 indicatePessimisticFixpoint(); 3505 } 3506 3507 /// See AbstractAttribute::updateImpl(...). 3508 ChangeStatus updateImpl(Attributor &A) override { 3509 // TODO: Once we have call site specific value information we can provide 3510 // call site specific liveness information and then it makes 3511 // sense to specialize attributes for call sites arguments instead of 3512 // redirecting requests to the callee argument. 3513 Argument *Arg = getAssociatedArgument(); 3514 if (!Arg) 3515 return indicatePessimisticFixpoint(); 3516 const IRPosition &ArgPos = IRPosition::argument(*Arg); 3517 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED); 3518 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 3519 } 3520 3521 /// See AbstractAttribute::manifest(...). 3522 ChangeStatus manifest(Attributor &A) override { 3523 CallBase &CB = cast<CallBase>(getAnchorValue()); 3524 Use &U = CB.getArgOperandUse(getCallSiteArgNo()); 3525 assert(!isa<UndefValue>(U.get()) && 3526 "Expected undef values to be filtered out!"); 3527 UndefValue &UV = *UndefValue::get(U->getType()); 3528 if (A.changeUseAfterManifest(U, UV)) 3529 return ChangeStatus::CHANGED; 3530 return ChangeStatus::UNCHANGED; 3531 } 3532 3533 /// See AbstractAttribute::trackStatistics() 3534 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) } 3535 }; 3536 3537 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { 3538 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) 3539 : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {} 3540 3541 /// See AAIsDead::isAssumedDead(). 3542 bool isAssumedDead() const override { 3543 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; 3544 } 3545 3546 /// See AbstractAttribute::initialize(...). 3547 void initialize(Attributor &A) override { 3548 if (isa<UndefValue>(getAssociatedValue())) { 3549 indicatePessimisticFixpoint(); 3550 return; 3551 } 3552 3553 // We track this separately as a secondary state. 3554 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); 3555 } 3556 3557 /// See AbstractAttribute::updateImpl(...). 3558 ChangeStatus updateImpl(Attributor &A) override { 3559 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3560 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { 3561 IsAssumedSideEffectFree = false; 3562 Changed = ChangeStatus::CHANGED; 3563 } 3564 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3565 return indicatePessimisticFixpoint(); 3566 return Changed; 3567 } 3568 3569 /// See AbstractAttribute::trackStatistics() 3570 void trackStatistics() const override { 3571 if (IsAssumedSideEffectFree) 3572 STATS_DECLTRACK_CSRET_ATTR(IsDead) 3573 else 3574 STATS_DECLTRACK_CSRET_ATTR(UnusedResult) 3575 } 3576 3577 /// See AbstractAttribute::getAsStr(). 3578 const std::string getAsStr() const override { 3579 return isAssumedDead() 3580 ? "assumed-dead" 3581 : (getAssumed() ? "assumed-dead-users" : "assumed-live"); 3582 } 3583 3584 private: 3585 bool IsAssumedSideEffectFree; 3586 }; 3587 3588 struct AAIsDeadReturned : public AAIsDeadValueImpl { 3589 AAIsDeadReturned(const IRPosition &IRP, Attributor &A) 3590 : AAIsDeadValueImpl(IRP, A) {} 3591 3592 /// See AbstractAttribute::updateImpl(...). 3593 ChangeStatus updateImpl(Attributor &A) override { 3594 3595 bool UsedAssumedInformation = false; 3596 A.checkForAllInstructions([](Instruction &) { return true; }, *this, 3597 {Instruction::Ret}, UsedAssumedInformation); 3598 3599 auto PredForCallSite = [&](AbstractCallSite ACS) { 3600 if (ACS.isCallbackCall() || !ACS.getInstruction()) 3601 return false; 3602 return areAllUsesAssumedDead(A, *ACS.getInstruction()); 3603 }; 3604 3605 bool AllCallSitesKnown; 3606 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 3607 AllCallSitesKnown)) 3608 return indicatePessimisticFixpoint(); 3609 3610 return ChangeStatus::UNCHANGED; 3611 } 3612 3613 /// See AbstractAttribute::manifest(...). 3614 ChangeStatus manifest(Attributor &A) override { 3615 // TODO: Rewrite the signature to return void? 3616 bool AnyChange = false; 3617 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); 3618 auto RetInstPred = [&](Instruction &I) { 3619 ReturnInst &RI = cast<ReturnInst>(I); 3620 if (!isa<UndefValue>(RI.getReturnValue())) 3621 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); 3622 return true; 3623 }; 3624 bool UsedAssumedInformation = false; 3625 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}, 3626 UsedAssumedInformation); 3627 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3628 } 3629 3630 /// See AbstractAttribute::trackStatistics() 3631 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) } 3632 }; 3633 3634 struct AAIsDeadFunction : public AAIsDead { 3635 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3636 3637 /// See AbstractAttribute::initialize(...). 3638 void initialize(Attributor &A) override { 3639 const Function *F = getAnchorScope(); 3640 if (F && !F->isDeclaration()) { 3641 // We only want to compute liveness once. If the function is not part of 3642 // the SCC, skip it. 3643 if (A.isRunOn(*const_cast<Function *>(F))) { 3644 ToBeExploredFrom.insert(&F->getEntryBlock().front()); 3645 assumeLive(A, F->getEntryBlock()); 3646 } else { 3647 indicatePessimisticFixpoint(); 3648 } 3649 } 3650 } 3651 3652 /// See AbstractAttribute::getAsStr(). 3653 const std::string getAsStr() const override { 3654 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + 3655 std::to_string(getAnchorScope()->size()) + "][#TBEP " + 3656 std::to_string(ToBeExploredFrom.size()) + "][#KDE " + 3657 std::to_string(KnownDeadEnds.size()) + "]"; 3658 } 3659 3660 /// See AbstractAttribute::manifest(...). 3661 ChangeStatus manifest(Attributor &A) override { 3662 assert(getState().isValidState() && 3663 "Attempted to manifest an invalid state!"); 3664 3665 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 3666 Function &F = *getAnchorScope(); 3667 3668 if (AssumedLiveBlocks.empty()) { 3669 A.deleteAfterManifest(F); 3670 return ChangeStatus::CHANGED; 3671 } 3672 3673 // Flag to determine if we can change an invoke to a call assuming the 3674 // callee is nounwind. This is not possible if the personality of the 3675 // function allows to catch asynchronous exceptions. 3676 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 3677 3678 KnownDeadEnds.set_union(ToBeExploredFrom); 3679 for (const Instruction *DeadEndI : KnownDeadEnds) { 3680 auto *CB = dyn_cast<CallBase>(DeadEndI); 3681 if (!CB) 3682 continue; 3683 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3684 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 3685 bool MayReturn = !NoReturnAA.isAssumedNoReturn(); 3686 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) 3687 continue; 3688 3689 if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) 3690 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); 3691 else 3692 A.changeToUnreachableAfterManifest( 3693 const_cast<Instruction *>(DeadEndI->getNextNode())); 3694 HasChanged = ChangeStatus::CHANGED; 3695 } 3696 3697 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted."); 3698 for (BasicBlock &BB : F) 3699 if (!AssumedLiveBlocks.count(&BB)) { 3700 A.deleteAfterManifest(BB); 3701 ++BUILD_STAT_NAME(AAIsDead, BasicBlock); 3702 } 3703 3704 return HasChanged; 3705 } 3706 3707 /// See AbstractAttribute::updateImpl(...). 3708 ChangeStatus updateImpl(Attributor &A) override; 3709 3710 bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override { 3711 return !AssumedLiveEdges.count(std::make_pair(From, To)); 3712 } 3713 3714 /// See AbstractAttribute::trackStatistics() 3715 void trackStatistics() const override {} 3716 3717 /// Returns true if the function is assumed dead. 3718 bool isAssumedDead() const override { return false; } 3719 3720 /// See AAIsDead::isKnownDead(). 3721 bool isKnownDead() const override { return false; } 3722 3723 /// See AAIsDead::isAssumedDead(BasicBlock *). 3724 bool isAssumedDead(const BasicBlock *BB) const override { 3725 assert(BB->getParent() == getAnchorScope() && 3726 "BB must be in the same anchor scope function."); 3727 3728 if (!getAssumed()) 3729 return false; 3730 return !AssumedLiveBlocks.count(BB); 3731 } 3732 3733 /// See AAIsDead::isKnownDead(BasicBlock *). 3734 bool isKnownDead(const BasicBlock *BB) const override { 3735 return getKnown() && isAssumedDead(BB); 3736 } 3737 3738 /// See AAIsDead::isAssumed(Instruction *I). 3739 bool isAssumedDead(const Instruction *I) const override { 3740 assert(I->getParent()->getParent() == getAnchorScope() && 3741 "Instruction must be in the same anchor scope function."); 3742 3743 if (!getAssumed()) 3744 return false; 3745 3746 // If it is not in AssumedLiveBlocks then it for sure dead. 3747 // Otherwise, it can still be after noreturn call in a live block. 3748 if (!AssumedLiveBlocks.count(I->getParent())) 3749 return true; 3750 3751 // If it is not after a liveness barrier it is live. 3752 const Instruction *PrevI = I->getPrevNode(); 3753 while (PrevI) { 3754 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) 3755 return true; 3756 PrevI = PrevI->getPrevNode(); 3757 } 3758 return false; 3759 } 3760 3761 /// See AAIsDead::isKnownDead(Instruction *I). 3762 bool isKnownDead(const Instruction *I) const override { 3763 return getKnown() && isAssumedDead(I); 3764 } 3765 3766 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A 3767 /// that internal function called from \p BB should now be looked at. 3768 bool assumeLive(Attributor &A, const BasicBlock &BB) { 3769 if (!AssumedLiveBlocks.insert(&BB).second) 3770 return false; 3771 3772 // We assume that all of BB is (probably) live now and if there are calls to 3773 // internal functions we will assume that those are now live as well. This 3774 // is a performance optimization for blocks with calls to a lot of internal 3775 // functions. It can however cause dead functions to be treated as live. 3776 for (const Instruction &I : BB) 3777 if (const auto *CB = dyn_cast<CallBase>(&I)) 3778 if (const Function *F = CB->getCalledFunction()) 3779 if (F->hasLocalLinkage()) 3780 A.markLiveInternalFunction(*F); 3781 return true; 3782 } 3783 3784 /// Collection of instructions that need to be explored again, e.g., we 3785 /// did assume they do not transfer control to (one of their) successors. 3786 SmallSetVector<const Instruction *, 8> ToBeExploredFrom; 3787 3788 /// Collection of instructions that are known to not transfer control. 3789 SmallSetVector<const Instruction *, 8> KnownDeadEnds; 3790 3791 /// Collection of all assumed live edges 3792 DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges; 3793 3794 /// Collection of all assumed live BasicBlocks. 3795 DenseSet<const BasicBlock *> AssumedLiveBlocks; 3796 }; 3797 3798 static bool 3799 identifyAliveSuccessors(Attributor &A, const CallBase &CB, 3800 AbstractAttribute &AA, 3801 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3802 const IRPosition &IPos = IRPosition::callsite_function(CB); 3803 3804 const auto &NoReturnAA = 3805 A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL); 3806 if (NoReturnAA.isAssumedNoReturn()) 3807 return !NoReturnAA.isKnownNoReturn(); 3808 if (CB.isTerminator()) 3809 AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); 3810 else 3811 AliveSuccessors.push_back(CB.getNextNode()); 3812 return false; 3813 } 3814 3815 static bool 3816 identifyAliveSuccessors(Attributor &A, const InvokeInst &II, 3817 AbstractAttribute &AA, 3818 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3819 bool UsedAssumedInformation = 3820 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); 3821 3822 // First, determine if we can change an invoke to a call assuming the 3823 // callee is nounwind. This is not possible if the personality of the 3824 // function allows to catch asynchronous exceptions. 3825 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { 3826 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3827 } else { 3828 const IRPosition &IPos = IRPosition::callsite_function(II); 3829 const auto &AANoUnw = 3830 A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL); 3831 if (AANoUnw.isAssumedNoUnwind()) { 3832 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); 3833 } else { 3834 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3835 } 3836 } 3837 return UsedAssumedInformation; 3838 } 3839 3840 static bool 3841 identifyAliveSuccessors(Attributor &A, const BranchInst &BI, 3842 AbstractAttribute &AA, 3843 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3844 bool UsedAssumedInformation = false; 3845 if (BI.getNumSuccessors() == 1) { 3846 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3847 } else { 3848 Optional<Constant *> C = 3849 A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation); 3850 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) { 3851 // No value yet, assume both edges are dead. 3852 } else if (isa_and_nonnull<ConstantInt>(*C)) { 3853 const BasicBlock *SuccBB = 3854 BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue()); 3855 AliveSuccessors.push_back(&SuccBB->front()); 3856 } else { 3857 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3858 AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); 3859 UsedAssumedInformation = false; 3860 } 3861 } 3862 return UsedAssumedInformation; 3863 } 3864 3865 static bool 3866 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, 3867 AbstractAttribute &AA, 3868 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3869 bool UsedAssumedInformation = false; 3870 Optional<Constant *> C = 3871 A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation); 3872 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) { 3873 // No value yet, assume all edges are dead. 3874 } else if (isa_and_nonnull<ConstantInt>(C.getValue())) { 3875 for (auto &CaseIt : SI.cases()) { 3876 if (CaseIt.getCaseValue() == C.getValue()) { 3877 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); 3878 return UsedAssumedInformation; 3879 } 3880 } 3881 AliveSuccessors.push_back(&SI.getDefaultDest()->front()); 3882 return UsedAssumedInformation; 3883 } else { 3884 for (const BasicBlock *SuccBB : successors(SI.getParent())) 3885 AliveSuccessors.push_back(&SuccBB->front()); 3886 } 3887 return UsedAssumedInformation; 3888 } 3889 3890 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { 3891 ChangeStatus Change = ChangeStatus::UNCHANGED; 3892 3893 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" 3894 << getAnchorScope()->size() << "] BBs and " 3895 << ToBeExploredFrom.size() << " exploration points and " 3896 << KnownDeadEnds.size() << " known dead ends\n"); 3897 3898 // Copy and clear the list of instructions we need to explore from. It is 3899 // refilled with instructions the next update has to look at. 3900 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), 3901 ToBeExploredFrom.end()); 3902 decltype(ToBeExploredFrom) NewToBeExploredFrom; 3903 3904 SmallVector<const Instruction *, 8> AliveSuccessors; 3905 while (!Worklist.empty()) { 3906 const Instruction *I = Worklist.pop_back_val(); 3907 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"); 3908 3909 // Fast forward for uninteresting instructions. We could look for UB here 3910 // though. 3911 while (!I->isTerminator() && !isa<CallBase>(I)) 3912 I = I->getNextNode(); 3913 3914 AliveSuccessors.clear(); 3915 3916 bool UsedAssumedInformation = false; 3917 switch (I->getOpcode()) { 3918 // TODO: look for (assumed) UB to backwards propagate "deadness". 3919 default: 3920 assert(I->isTerminator() && 3921 "Expected non-terminators to be handled already!"); 3922 for (const BasicBlock *SuccBB : successors(I->getParent())) 3923 AliveSuccessors.push_back(&SuccBB->front()); 3924 break; 3925 case Instruction::Call: 3926 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), 3927 *this, AliveSuccessors); 3928 break; 3929 case Instruction::Invoke: 3930 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), 3931 *this, AliveSuccessors); 3932 break; 3933 case Instruction::Br: 3934 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), 3935 *this, AliveSuccessors); 3936 break; 3937 case Instruction::Switch: 3938 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), 3939 *this, AliveSuccessors); 3940 break; 3941 } 3942 3943 if (UsedAssumedInformation) { 3944 NewToBeExploredFrom.insert(I); 3945 } else if (AliveSuccessors.empty() || 3946 (I->isTerminator() && 3947 AliveSuccessors.size() < I->getNumSuccessors())) { 3948 if (KnownDeadEnds.insert(I)) 3949 Change = ChangeStatus::CHANGED; 3950 } 3951 3952 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: " 3953 << AliveSuccessors.size() << " UsedAssumedInformation: " 3954 << UsedAssumedInformation << "\n"); 3955 3956 for (const Instruction *AliveSuccessor : AliveSuccessors) { 3957 if (!I->isTerminator()) { 3958 assert(AliveSuccessors.size() == 1 && 3959 "Non-terminator expected to have a single successor!"); 3960 Worklist.push_back(AliveSuccessor); 3961 } else { 3962 // record the assumed live edge 3963 auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent()); 3964 if (AssumedLiveEdges.insert(Edge).second) 3965 Change = ChangeStatus::CHANGED; 3966 if (assumeLive(A, *AliveSuccessor->getParent())) 3967 Worklist.push_back(AliveSuccessor); 3968 } 3969 } 3970 } 3971 3972 // Check if the content of ToBeExploredFrom changed, ignore the order. 3973 if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() || 3974 llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) { 3975 return !ToBeExploredFrom.count(I); 3976 })) { 3977 Change = ChangeStatus::CHANGED; 3978 ToBeExploredFrom = std::move(NewToBeExploredFrom); 3979 } 3980 3981 // If we know everything is live there is no need to query for liveness. 3982 // Instead, indicating a pessimistic fixpoint will cause the state to be 3983 // "invalid" and all queries to be answered conservatively without lookups. 3984 // To be in this state we have to (1) finished the exploration and (3) not 3985 // discovered any non-trivial dead end and (2) not ruled unreachable code 3986 // dead. 3987 if (ToBeExploredFrom.empty() && 3988 getAnchorScope()->size() == AssumedLiveBlocks.size() && 3989 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { 3990 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; 3991 })) 3992 return indicatePessimisticFixpoint(); 3993 return Change; 3994 } 3995 3996 /// Liveness information for a call sites. 3997 struct AAIsDeadCallSite final : AAIsDeadFunction { 3998 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) 3999 : AAIsDeadFunction(IRP, A) {} 4000 4001 /// See AbstractAttribute::initialize(...). 4002 void initialize(Attributor &A) override { 4003 // TODO: Once we have call site specific value information we can provide 4004 // call site specific liveness information and then it makes 4005 // sense to specialize attributes for call sites instead of 4006 // redirecting requests to the callee. 4007 llvm_unreachable("Abstract attributes for liveness are not " 4008 "supported for call sites yet!"); 4009 } 4010 4011 /// See AbstractAttribute::updateImpl(...). 4012 ChangeStatus updateImpl(Attributor &A) override { 4013 return indicatePessimisticFixpoint(); 4014 } 4015 4016 /// See AbstractAttribute::trackStatistics() 4017 void trackStatistics() const override {} 4018 }; 4019 4020 /// -------------------- Dereferenceable Argument Attribute -------------------- 4021 4022 struct AADereferenceableImpl : AADereferenceable { 4023 AADereferenceableImpl(const IRPosition &IRP, Attributor &A) 4024 : AADereferenceable(IRP, A) {} 4025 using StateType = DerefState; 4026 4027 /// See AbstractAttribute::initialize(...). 4028 void initialize(Attributor &A) override { 4029 SmallVector<Attribute, 4> Attrs; 4030 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, 4031 Attrs, /* IgnoreSubsumingPositions */ false, &A); 4032 for (const Attribute &Attr : Attrs) 4033 takeKnownDerefBytesMaximum(Attr.getValueAsInt()); 4034 4035 const IRPosition &IRP = this->getIRPosition(); 4036 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE); 4037 4038 bool CanBeNull, CanBeFreed; 4039 takeKnownDerefBytesMaximum( 4040 IRP.getAssociatedValue().getPointerDereferenceableBytes( 4041 A.getDataLayout(), CanBeNull, CanBeFreed)); 4042 4043 bool IsFnInterface = IRP.isFnInterfaceKind(); 4044 Function *FnScope = IRP.getAnchorScope(); 4045 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { 4046 indicatePessimisticFixpoint(); 4047 return; 4048 } 4049 4050 if (Instruction *CtxI = getCtxI()) 4051 followUsesInMBEC(*this, A, getState(), *CtxI); 4052 } 4053 4054 /// See AbstractAttribute::getState() 4055 /// { 4056 StateType &getState() override { return *this; } 4057 const StateType &getState() const override { return *this; } 4058 /// } 4059 4060 /// Helper function for collecting accessed bytes in must-be-executed-context 4061 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, 4062 DerefState &State) { 4063 const Value *UseV = U->get(); 4064 if (!UseV->getType()->isPointerTy()) 4065 return; 4066 4067 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); 4068 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) 4069 return; 4070 4071 int64_t Offset; 4072 const Value *Base = GetPointerBaseWithConstantOffset( 4073 Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true); 4074 if (Base && Base == &getAssociatedValue()) 4075 State.addAccessedBytes(Offset, Loc->Size.getValue()); 4076 } 4077 4078 /// See followUsesInMBEC 4079 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4080 AADereferenceable::StateType &State) { 4081 bool IsNonNull = false; 4082 bool TrackUse = false; 4083 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( 4084 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); 4085 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes 4086 << " for instruction " << *I << "\n"); 4087 4088 addAccessedBytesForUse(A, U, I, State); 4089 State.takeKnownDerefBytesMaximum(DerefBytes); 4090 return TrackUse; 4091 } 4092 4093 /// See AbstractAttribute::manifest(...). 4094 ChangeStatus manifest(Attributor &A) override { 4095 ChangeStatus Change = AADereferenceable::manifest(A); 4096 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { 4097 removeAttrs({Attribute::DereferenceableOrNull}); 4098 return ChangeStatus::CHANGED; 4099 } 4100 return Change; 4101 } 4102 4103 void getDeducedAttributes(LLVMContext &Ctx, 4104 SmallVectorImpl<Attribute> &Attrs) const override { 4105 // TODO: Add *_globally support 4106 if (isAssumedNonNull()) 4107 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 4108 Ctx, getAssumedDereferenceableBytes())); 4109 else 4110 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 4111 Ctx, getAssumedDereferenceableBytes())); 4112 } 4113 4114 /// See AbstractAttribute::getAsStr(). 4115 const std::string getAsStr() const override { 4116 if (!getAssumedDereferenceableBytes()) 4117 return "unknown-dereferenceable"; 4118 return std::string("dereferenceable") + 4119 (isAssumedNonNull() ? "" : "_or_null") + 4120 (isAssumedGlobal() ? "_globally" : "") + "<" + 4121 std::to_string(getKnownDereferenceableBytes()) + "-" + 4122 std::to_string(getAssumedDereferenceableBytes()) + ">"; 4123 } 4124 }; 4125 4126 /// Dereferenceable attribute for a floating value. 4127 struct AADereferenceableFloating : AADereferenceableImpl { 4128 AADereferenceableFloating(const IRPosition &IRP, Attributor &A) 4129 : AADereferenceableImpl(IRP, A) {} 4130 4131 /// See AbstractAttribute::updateImpl(...). 4132 ChangeStatus updateImpl(Attributor &A) override { 4133 const DataLayout &DL = A.getDataLayout(); 4134 4135 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T, 4136 bool Stripped) -> bool { 4137 unsigned IdxWidth = 4138 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 4139 APInt Offset(IdxWidth, 0); 4140 const Value *Base = 4141 stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false); 4142 4143 const auto &AA = A.getAAFor<AADereferenceable>( 4144 *this, IRPosition::value(*Base), DepClassTy::REQUIRED); 4145 int64_t DerefBytes = 0; 4146 if (!Stripped && this == &AA) { 4147 // Use IR information if we did not strip anything. 4148 // TODO: track globally. 4149 bool CanBeNull, CanBeFreed; 4150 DerefBytes = 4151 Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); 4152 T.GlobalState.indicatePessimisticFixpoint(); 4153 } else { 4154 const DerefState &DS = AA.getState(); 4155 DerefBytes = DS.DerefBytesState.getAssumed(); 4156 T.GlobalState &= DS.GlobalState; 4157 } 4158 4159 // For now we do not try to "increase" dereferenceability due to negative 4160 // indices as we first have to come up with code to deal with loops and 4161 // for overflows of the dereferenceable bytes. 4162 int64_t OffsetSExt = Offset.getSExtValue(); 4163 if (OffsetSExt < 0) 4164 OffsetSExt = 0; 4165 4166 T.takeAssumedDerefBytesMinimum( 4167 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4168 4169 if (this == &AA) { 4170 if (!Stripped) { 4171 // If nothing was stripped IR information is all we got. 4172 T.takeKnownDerefBytesMaximum( 4173 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4174 T.indicatePessimisticFixpoint(); 4175 } else if (OffsetSExt > 0) { 4176 // If something was stripped but there is circular reasoning we look 4177 // for the offset. If it is positive we basically decrease the 4178 // dereferenceable bytes in a circluar loop now, which will simply 4179 // drive them down to the known value in a very slow way which we 4180 // can accelerate. 4181 T.indicatePessimisticFixpoint(); 4182 } 4183 } 4184 4185 return T.isValidState(); 4186 }; 4187 4188 DerefState T; 4189 if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T, 4190 VisitValueCB, getCtxI())) 4191 return indicatePessimisticFixpoint(); 4192 4193 return clampStateAndIndicateChange(getState(), T); 4194 } 4195 4196 /// See AbstractAttribute::trackStatistics() 4197 void trackStatistics() const override { 4198 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) 4199 } 4200 }; 4201 4202 /// Dereferenceable attribute for a return value. 4203 struct AADereferenceableReturned final 4204 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { 4205 AADereferenceableReturned(const IRPosition &IRP, Attributor &A) 4206 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( 4207 IRP, A) {} 4208 4209 /// See AbstractAttribute::trackStatistics() 4210 void trackStatistics() const override { 4211 STATS_DECLTRACK_FNRET_ATTR(dereferenceable) 4212 } 4213 }; 4214 4215 /// Dereferenceable attribute for an argument 4216 struct AADereferenceableArgument final 4217 : AAArgumentFromCallSiteArguments<AADereferenceable, 4218 AADereferenceableImpl> { 4219 using Base = 4220 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; 4221 AADereferenceableArgument(const IRPosition &IRP, Attributor &A) 4222 : Base(IRP, A) {} 4223 4224 /// See AbstractAttribute::trackStatistics() 4225 void trackStatistics() const override { 4226 STATS_DECLTRACK_ARG_ATTR(dereferenceable) 4227 } 4228 }; 4229 4230 /// Dereferenceable attribute for a call site argument. 4231 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { 4232 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) 4233 : AADereferenceableFloating(IRP, A) {} 4234 4235 /// See AbstractAttribute::trackStatistics() 4236 void trackStatistics() const override { 4237 STATS_DECLTRACK_CSARG_ATTR(dereferenceable) 4238 } 4239 }; 4240 4241 /// Dereferenceable attribute deduction for a call site return value. 4242 struct AADereferenceableCallSiteReturned final 4243 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { 4244 using Base = 4245 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; 4246 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) 4247 : Base(IRP, A) {} 4248 4249 /// See AbstractAttribute::trackStatistics() 4250 void trackStatistics() const override { 4251 STATS_DECLTRACK_CS_ATTR(dereferenceable); 4252 } 4253 }; 4254 4255 // ------------------------ Align Argument Attribute ------------------------ 4256 4257 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA, 4258 Value &AssociatedValue, const Use *U, 4259 const Instruction *I, bool &TrackUse) { 4260 // We need to follow common pointer manipulation uses to the accesses they 4261 // feed into. 4262 if (isa<CastInst>(I)) { 4263 // Follow all but ptr2int casts. 4264 TrackUse = !isa<PtrToIntInst>(I); 4265 return 0; 4266 } 4267 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 4268 if (GEP->hasAllConstantIndices()) 4269 TrackUse = true; 4270 return 0; 4271 } 4272 4273 MaybeAlign MA; 4274 if (const auto *CB = dyn_cast<CallBase>(I)) { 4275 if (CB->isBundleOperand(U) || CB->isCallee(U)) 4276 return 0; 4277 4278 unsigned ArgNo = CB->getArgOperandNo(U); 4279 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 4280 // As long as we only use known information there is no need to track 4281 // dependences here. 4282 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE); 4283 MA = MaybeAlign(AlignAA.getKnownAlign()); 4284 } 4285 4286 const DataLayout &DL = A.getDataLayout(); 4287 const Value *UseV = U->get(); 4288 if (auto *SI = dyn_cast<StoreInst>(I)) { 4289 if (SI->getPointerOperand() == UseV) 4290 MA = SI->getAlign(); 4291 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 4292 if (LI->getPointerOperand() == UseV) 4293 MA = LI->getAlign(); 4294 } 4295 4296 if (!MA || *MA <= QueryingAA.getKnownAlign()) 4297 return 0; 4298 4299 unsigned Alignment = MA->value(); 4300 int64_t Offset; 4301 4302 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { 4303 if (Base == &AssociatedValue) { 4304 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4305 // So we can say that the maximum power of two which is a divisor of 4306 // gcd(Offset, Alignment) is an alignment. 4307 4308 uint32_t gcd = 4309 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment); 4310 Alignment = llvm::PowerOf2Floor(gcd); 4311 } 4312 } 4313 4314 return Alignment; 4315 } 4316 4317 struct AAAlignImpl : AAAlign { 4318 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} 4319 4320 /// See AbstractAttribute::initialize(...). 4321 void initialize(Attributor &A) override { 4322 SmallVector<Attribute, 4> Attrs; 4323 getAttrs({Attribute::Alignment}, Attrs); 4324 for (const Attribute &Attr : Attrs) 4325 takeKnownMaximum(Attr.getValueAsInt()); 4326 4327 Value &V = getAssociatedValue(); 4328 // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int 4329 // use of the function pointer. This was caused by D73131. We want to 4330 // avoid this for function pointers especially because we iterate 4331 // their uses and int2ptr is not handled. It is not a correctness 4332 // problem though! 4333 if (!V.getType()->getPointerElementType()->isFunctionTy()) 4334 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value()); 4335 4336 if (getIRPosition().isFnInterfaceKind() && 4337 (!getAnchorScope() || 4338 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { 4339 indicatePessimisticFixpoint(); 4340 return; 4341 } 4342 4343 if (Instruction *CtxI = getCtxI()) 4344 followUsesInMBEC(*this, A, getState(), *CtxI); 4345 } 4346 4347 /// See AbstractAttribute::manifest(...). 4348 ChangeStatus manifest(Attributor &A) override { 4349 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; 4350 4351 // Check for users that allow alignment annotations. 4352 Value &AssociatedValue = getAssociatedValue(); 4353 for (const Use &U : AssociatedValue.uses()) { 4354 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { 4355 if (SI->getPointerOperand() == &AssociatedValue) 4356 if (SI->getAlignment() < getAssumedAlign()) { 4357 STATS_DECLTRACK(AAAlign, Store, 4358 "Number of times alignment added to a store"); 4359 SI->setAlignment(Align(getAssumedAlign())); 4360 LoadStoreChanged = ChangeStatus::CHANGED; 4361 } 4362 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { 4363 if (LI->getPointerOperand() == &AssociatedValue) 4364 if (LI->getAlignment() < getAssumedAlign()) { 4365 LI->setAlignment(Align(getAssumedAlign())); 4366 STATS_DECLTRACK(AAAlign, Load, 4367 "Number of times alignment added to a load"); 4368 LoadStoreChanged = ChangeStatus::CHANGED; 4369 } 4370 } 4371 } 4372 4373 ChangeStatus Changed = AAAlign::manifest(A); 4374 4375 Align InheritAlign = 4376 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4377 if (InheritAlign >= getAssumedAlign()) 4378 return LoadStoreChanged; 4379 return Changed | LoadStoreChanged; 4380 } 4381 4382 // TODO: Provide a helper to determine the implied ABI alignment and check in 4383 // the existing manifest method and a new one for AAAlignImpl that value 4384 // to avoid making the alignment explicit if it did not improve. 4385 4386 /// See AbstractAttribute::getDeducedAttributes 4387 virtual void 4388 getDeducedAttributes(LLVMContext &Ctx, 4389 SmallVectorImpl<Attribute> &Attrs) const override { 4390 if (getAssumedAlign() > 1) 4391 Attrs.emplace_back( 4392 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); 4393 } 4394 4395 /// See followUsesInMBEC 4396 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4397 AAAlign::StateType &State) { 4398 bool TrackUse = false; 4399 4400 unsigned int KnownAlign = 4401 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); 4402 State.takeKnownMaximum(KnownAlign); 4403 4404 return TrackUse; 4405 } 4406 4407 /// See AbstractAttribute::getAsStr(). 4408 const std::string getAsStr() const override { 4409 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + 4410 "-" + std::to_string(getAssumedAlign()) + ">") 4411 : "unknown-align"; 4412 } 4413 }; 4414 4415 /// Align attribute for a floating value. 4416 struct AAAlignFloating : AAAlignImpl { 4417 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} 4418 4419 /// See AbstractAttribute::updateImpl(...). 4420 ChangeStatus updateImpl(Attributor &A) override { 4421 const DataLayout &DL = A.getDataLayout(); 4422 4423 auto VisitValueCB = [&](Value &V, const Instruction *, 4424 AAAlign::StateType &T, bool Stripped) -> bool { 4425 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V), 4426 DepClassTy::REQUIRED); 4427 if (!Stripped && this == &AA) { 4428 int64_t Offset; 4429 unsigned Alignment = 1; 4430 if (const Value *Base = 4431 GetPointerBaseWithConstantOffset(&V, Offset, DL)) { 4432 Align PA = Base->getPointerAlignment(DL); 4433 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4434 // So we can say that the maximum power of two which is a divisor of 4435 // gcd(Offset, Alignment) is an alignment. 4436 4437 uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), 4438 uint32_t(PA.value())); 4439 Alignment = llvm::PowerOf2Floor(gcd); 4440 } else { 4441 Alignment = V.getPointerAlignment(DL).value(); 4442 } 4443 // Use only IR information if we did not strip anything. 4444 T.takeKnownMaximum(Alignment); 4445 T.indicatePessimisticFixpoint(); 4446 } else { 4447 // Use abstract attribute information. 4448 const AAAlign::StateType &DS = AA.getState(); 4449 T ^= DS; 4450 } 4451 return T.isValidState(); 4452 }; 4453 4454 StateType T; 4455 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 4456 VisitValueCB, getCtxI())) 4457 return indicatePessimisticFixpoint(); 4458 4459 // TODO: If we know we visited all incoming values, thus no are assumed 4460 // dead, we can take the known information from the state T. 4461 return clampStateAndIndicateChange(getState(), T); 4462 } 4463 4464 /// See AbstractAttribute::trackStatistics() 4465 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } 4466 }; 4467 4468 /// Align attribute for function return value. 4469 struct AAAlignReturned final 4470 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { 4471 using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>; 4472 AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4473 4474 /// See AbstractAttribute::initialize(...). 4475 void initialize(Attributor &A) override { 4476 Base::initialize(A); 4477 Function *F = getAssociatedFunction(); 4478 if (!F || F->isDeclaration()) 4479 indicatePessimisticFixpoint(); 4480 } 4481 4482 /// See AbstractAttribute::trackStatistics() 4483 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } 4484 }; 4485 4486 /// Align attribute for function argument. 4487 struct AAAlignArgument final 4488 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { 4489 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; 4490 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4491 4492 /// See AbstractAttribute::manifest(...). 4493 ChangeStatus manifest(Attributor &A) override { 4494 // If the associated argument is involved in a must-tail call we give up 4495 // because we would need to keep the argument alignments of caller and 4496 // callee in-sync. Just does not seem worth the trouble right now. 4497 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) 4498 return ChangeStatus::UNCHANGED; 4499 return Base::manifest(A); 4500 } 4501 4502 /// See AbstractAttribute::trackStatistics() 4503 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } 4504 }; 4505 4506 struct AAAlignCallSiteArgument final : AAAlignFloating { 4507 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) 4508 : AAAlignFloating(IRP, A) {} 4509 4510 /// See AbstractAttribute::manifest(...). 4511 ChangeStatus manifest(Attributor &A) override { 4512 // If the associated argument is involved in a must-tail call we give up 4513 // because we would need to keep the argument alignments of caller and 4514 // callee in-sync. Just does not seem worth the trouble right now. 4515 if (Argument *Arg = getAssociatedArgument()) 4516 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) 4517 return ChangeStatus::UNCHANGED; 4518 ChangeStatus Changed = AAAlignImpl::manifest(A); 4519 Align InheritAlign = 4520 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4521 if (InheritAlign >= getAssumedAlign()) 4522 Changed = ChangeStatus::UNCHANGED; 4523 return Changed; 4524 } 4525 4526 /// See AbstractAttribute::updateImpl(Attributor &A). 4527 ChangeStatus updateImpl(Attributor &A) override { 4528 ChangeStatus Changed = AAAlignFloating::updateImpl(A); 4529 if (Argument *Arg = getAssociatedArgument()) { 4530 // We only take known information from the argument 4531 // so we do not need to track a dependence. 4532 const auto &ArgAlignAA = A.getAAFor<AAAlign>( 4533 *this, IRPosition::argument(*Arg), DepClassTy::NONE); 4534 takeKnownMaximum(ArgAlignAA.getKnownAlign()); 4535 } 4536 return Changed; 4537 } 4538 4539 /// See AbstractAttribute::trackStatistics() 4540 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } 4541 }; 4542 4543 /// Align attribute deduction for a call site return value. 4544 struct AAAlignCallSiteReturned final 4545 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { 4546 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; 4547 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) 4548 : Base(IRP, A) {} 4549 4550 /// See AbstractAttribute::initialize(...). 4551 void initialize(Attributor &A) override { 4552 Base::initialize(A); 4553 Function *F = getAssociatedFunction(); 4554 if (!F || F->isDeclaration()) 4555 indicatePessimisticFixpoint(); 4556 } 4557 4558 /// See AbstractAttribute::trackStatistics() 4559 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } 4560 }; 4561 4562 /// ------------------ Function No-Return Attribute ---------------------------- 4563 struct AANoReturnImpl : public AANoReturn { 4564 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} 4565 4566 /// See AbstractAttribute::initialize(...). 4567 void initialize(Attributor &A) override { 4568 AANoReturn::initialize(A); 4569 Function *F = getAssociatedFunction(); 4570 if (!F || F->isDeclaration()) 4571 indicatePessimisticFixpoint(); 4572 } 4573 4574 /// See AbstractAttribute::getAsStr(). 4575 const std::string getAsStr() const override { 4576 return getAssumed() ? "noreturn" : "may-return"; 4577 } 4578 4579 /// See AbstractAttribute::updateImpl(Attributor &A). 4580 virtual ChangeStatus updateImpl(Attributor &A) override { 4581 auto CheckForNoReturn = [](Instruction &) { return false; }; 4582 bool UsedAssumedInformation = false; 4583 if (!A.checkForAllInstructions(CheckForNoReturn, *this, 4584 {(unsigned)Instruction::Ret}, 4585 UsedAssumedInformation)) 4586 return indicatePessimisticFixpoint(); 4587 return ChangeStatus::UNCHANGED; 4588 } 4589 }; 4590 4591 struct AANoReturnFunction final : AANoReturnImpl { 4592 AANoReturnFunction(const IRPosition &IRP, Attributor &A) 4593 : AANoReturnImpl(IRP, A) {} 4594 4595 /// See AbstractAttribute::trackStatistics() 4596 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } 4597 }; 4598 4599 /// NoReturn attribute deduction for a call sites. 4600 struct AANoReturnCallSite final : AANoReturnImpl { 4601 AANoReturnCallSite(const IRPosition &IRP, Attributor &A) 4602 : AANoReturnImpl(IRP, A) {} 4603 4604 /// See AbstractAttribute::initialize(...). 4605 void initialize(Attributor &A) override { 4606 AANoReturnImpl::initialize(A); 4607 if (Function *F = getAssociatedFunction()) { 4608 const IRPosition &FnPos = IRPosition::function(*F); 4609 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4610 if (!FnAA.isAssumedNoReturn()) 4611 indicatePessimisticFixpoint(); 4612 } 4613 } 4614 4615 /// See AbstractAttribute::updateImpl(...). 4616 ChangeStatus updateImpl(Attributor &A) override { 4617 // TODO: Once we have call site specific value information we can provide 4618 // call site specific liveness information and then it makes 4619 // sense to specialize attributes for call sites arguments instead of 4620 // redirecting requests to the callee argument. 4621 Function *F = getAssociatedFunction(); 4622 const IRPosition &FnPos = IRPosition::function(*F); 4623 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4624 return clampStateAndIndicateChange(getState(), FnAA.getState()); 4625 } 4626 4627 /// See AbstractAttribute::trackStatistics() 4628 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } 4629 }; 4630 4631 /// ----------------------- Variable Capturing --------------------------------- 4632 4633 /// A class to hold the state of for no-capture attributes. 4634 struct AANoCaptureImpl : public AANoCapture { 4635 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} 4636 4637 /// See AbstractAttribute::initialize(...). 4638 void initialize(Attributor &A) override { 4639 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { 4640 indicateOptimisticFixpoint(); 4641 return; 4642 } 4643 Function *AnchorScope = getAnchorScope(); 4644 if (isFnInterfaceKind() && 4645 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { 4646 indicatePessimisticFixpoint(); 4647 return; 4648 } 4649 4650 // You cannot "capture" null in the default address space. 4651 if (isa<ConstantPointerNull>(getAssociatedValue()) && 4652 getAssociatedValue().getType()->getPointerAddressSpace() == 0) { 4653 indicateOptimisticFixpoint(); 4654 return; 4655 } 4656 4657 const Function *F = 4658 isArgumentPosition() ? getAssociatedFunction() : AnchorScope; 4659 4660 // Check what state the associated function can actually capture. 4661 if (F) 4662 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 4663 else 4664 indicatePessimisticFixpoint(); 4665 } 4666 4667 /// See AbstractAttribute::updateImpl(...). 4668 ChangeStatus updateImpl(Attributor &A) override; 4669 4670 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). 4671 virtual void 4672 getDeducedAttributes(LLVMContext &Ctx, 4673 SmallVectorImpl<Attribute> &Attrs) const override { 4674 if (!isAssumedNoCaptureMaybeReturned()) 4675 return; 4676 4677 if (isArgumentPosition()) { 4678 if (isAssumedNoCapture()) 4679 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); 4680 else if (ManifestInternal) 4681 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); 4682 } 4683 } 4684 4685 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known 4686 /// depending on the ability of the function associated with \p IRP to capture 4687 /// state in memory and through "returning/throwing", respectively. 4688 static void determineFunctionCaptureCapabilities(const IRPosition &IRP, 4689 const Function &F, 4690 BitIntegerState &State) { 4691 // TODO: Once we have memory behavior attributes we should use them here. 4692 4693 // If we know we cannot communicate or write to memory, we do not care about 4694 // ptr2int anymore. 4695 if (F.onlyReadsMemory() && F.doesNotThrow() && 4696 F.getReturnType()->isVoidTy()) { 4697 State.addKnownBits(NO_CAPTURE); 4698 return; 4699 } 4700 4701 // A function cannot capture state in memory if it only reads memory, it can 4702 // however return/throw state and the state might be influenced by the 4703 // pointer value, e.g., loading from a returned pointer might reveal a bit. 4704 if (F.onlyReadsMemory()) 4705 State.addKnownBits(NOT_CAPTURED_IN_MEM); 4706 4707 // A function cannot communicate state back if it does not through 4708 // exceptions and doesn not return values. 4709 if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) 4710 State.addKnownBits(NOT_CAPTURED_IN_RET); 4711 4712 // Check existing "returned" attributes. 4713 int ArgNo = IRP.getCalleeArgNo(); 4714 if (F.doesNotThrow() && ArgNo >= 0) { 4715 for (unsigned u = 0, e = F.arg_size(); u < e; ++u) 4716 if (F.hasParamAttribute(u, Attribute::Returned)) { 4717 if (u == unsigned(ArgNo)) 4718 State.removeAssumedBits(NOT_CAPTURED_IN_RET); 4719 else if (F.onlyReadsMemory()) 4720 State.addKnownBits(NO_CAPTURE); 4721 else 4722 State.addKnownBits(NOT_CAPTURED_IN_RET); 4723 break; 4724 } 4725 } 4726 } 4727 4728 /// See AbstractState::getAsStr(). 4729 const std::string getAsStr() const override { 4730 if (isKnownNoCapture()) 4731 return "known not-captured"; 4732 if (isAssumedNoCapture()) 4733 return "assumed not-captured"; 4734 if (isKnownNoCaptureMaybeReturned()) 4735 return "known not-captured-maybe-returned"; 4736 if (isAssumedNoCaptureMaybeReturned()) 4737 return "assumed not-captured-maybe-returned"; 4738 return "assumed-captured"; 4739 } 4740 }; 4741 4742 /// Attributor-aware capture tracker. 4743 struct AACaptureUseTracker final : public CaptureTracker { 4744 4745 /// Create a capture tracker that can lookup in-flight abstract attributes 4746 /// through the Attributor \p A. 4747 /// 4748 /// If a use leads to a potential capture, \p CapturedInMemory is set and the 4749 /// search is stopped. If a use leads to a return instruction, 4750 /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed. 4751 /// If a use leads to a ptr2int which may capture the value, 4752 /// \p CapturedInInteger is set. If a use is found that is currently assumed 4753 /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies 4754 /// set. All values in \p PotentialCopies are later tracked as well. For every 4755 /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0, 4756 /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger 4757 /// conservatively set to true. 4758 AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA, 4759 const AAIsDead &IsDeadAA, AANoCapture::StateType &State, 4760 SmallSetVector<Value *, 4> &PotentialCopies, 4761 unsigned &RemainingUsesToExplore) 4762 : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State), 4763 PotentialCopies(PotentialCopies), 4764 RemainingUsesToExplore(RemainingUsesToExplore) {} 4765 4766 /// Determine if \p V maybe captured. *Also updates the state!* 4767 bool valueMayBeCaptured(const Value *V) { 4768 if (V->getType()->isPointerTy()) { 4769 PointerMayBeCaptured(V, this); 4770 } else { 4771 State.indicatePessimisticFixpoint(); 4772 } 4773 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4774 } 4775 4776 /// See CaptureTracker::tooManyUses(). 4777 void tooManyUses() override { 4778 State.removeAssumedBits(AANoCapture::NO_CAPTURE); 4779 } 4780 4781 bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override { 4782 if (CaptureTracker::isDereferenceableOrNull(O, DL)) 4783 return true; 4784 const auto &DerefAA = A.getAAFor<AADereferenceable>( 4785 NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL); 4786 return DerefAA.getAssumedDereferenceableBytes(); 4787 } 4788 4789 /// See CaptureTracker::captured(...). 4790 bool captured(const Use *U) override { 4791 Instruction *UInst = cast<Instruction>(U->getUser()); 4792 LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst 4793 << "\n"); 4794 4795 // Because we may reuse the tracker multiple times we keep track of the 4796 // number of explored uses ourselves as well. 4797 if (RemainingUsesToExplore-- == 0) { 4798 LLVM_DEBUG(dbgs() << " - too many uses to explore!\n"); 4799 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4800 /* Return */ true); 4801 } 4802 4803 // Deal with ptr2int by following uses. 4804 if (isa<PtrToIntInst>(UInst)) { 4805 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n"); 4806 return valueMayBeCaptured(UInst); 4807 } 4808 4809 // For stores we check if we can follow the value through memory or not. 4810 if (auto *SI = dyn_cast<StoreInst>(UInst)) { 4811 if (SI->isVolatile()) 4812 return isCapturedIn(/* Memory */ true, /* Integer */ false, 4813 /* Return */ false); 4814 bool UsedAssumedInformation = false; 4815 if (!AA::getPotentialCopiesOfStoredValue( 4816 A, *SI, PotentialCopies, NoCaptureAA, UsedAssumedInformation)) 4817 return isCapturedIn(/* Memory */ true, /* Integer */ false, 4818 /* Return */ false); 4819 // Not captured directly, potential copies will be checked. 4820 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4821 /* Return */ false); 4822 } 4823 4824 // Explicitly catch return instructions. 4825 if (isa<ReturnInst>(UInst)) { 4826 if (UInst->getFunction() == NoCaptureAA.getAnchorScope()) 4827 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4828 /* Return */ true); 4829 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4830 /* Return */ true); 4831 } 4832 4833 // For now we only use special logic for call sites. However, the tracker 4834 // itself knows about a lot of other non-capturing cases already. 4835 auto *CB = dyn_cast<CallBase>(UInst); 4836 if (!CB || !CB->isArgOperand(U)) 4837 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4838 /* Return */ true); 4839 4840 unsigned ArgNo = CB->getArgOperandNo(U); 4841 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); 4842 // If we have a abstract no-capture attribute for the argument we can use 4843 // it to justify a non-capture attribute here. This allows recursion! 4844 auto &ArgNoCaptureAA = 4845 A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED); 4846 if (ArgNoCaptureAA.isAssumedNoCapture()) 4847 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4848 /* Return */ false); 4849 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 4850 addPotentialCopy(*CB); 4851 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4852 /* Return */ false); 4853 } 4854 4855 // Lastly, we could not find a reason no-capture can be assumed so we don't. 4856 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4857 /* Return */ true); 4858 } 4859 4860 /// Register \p CS as potential copy of the value we are checking. 4861 void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); } 4862 4863 /// See CaptureTracker::shouldExplore(...). 4864 bool shouldExplore(const Use *U) override { 4865 // Check liveness and ignore droppable users. 4866 bool UsedAssumedInformation = false; 4867 return !U->getUser()->isDroppable() && 4868 !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA, 4869 UsedAssumedInformation); 4870 } 4871 4872 /// Update the state according to \p CapturedInMem, \p CapturedInInt, and 4873 /// \p CapturedInRet, then return the appropriate value for use in the 4874 /// CaptureTracker::captured() interface. 4875 bool isCapturedIn(bool CapturedInMem, bool CapturedInInt, 4876 bool CapturedInRet) { 4877 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " 4878 << CapturedInInt << "|Ret " << CapturedInRet << "]\n"); 4879 if (CapturedInMem) 4880 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); 4881 if (CapturedInInt) 4882 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); 4883 if (CapturedInRet) 4884 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); 4885 return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4886 } 4887 4888 private: 4889 /// The attributor providing in-flight abstract attributes. 4890 Attributor &A; 4891 4892 /// The abstract attribute currently updated. 4893 AANoCapture &NoCaptureAA; 4894 4895 /// The abstract liveness state. 4896 const AAIsDead &IsDeadAA; 4897 4898 /// The state currently updated. 4899 AANoCapture::StateType &State; 4900 4901 /// Set of potential copies of the tracked value. 4902 SmallSetVector<Value *, 4> &PotentialCopies; 4903 4904 /// Global counter to limit the number of explored uses. 4905 unsigned &RemainingUsesToExplore; 4906 }; 4907 4908 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { 4909 const IRPosition &IRP = getIRPosition(); 4910 Value *V = isArgumentPosition() ? IRP.getAssociatedArgument() 4911 : &IRP.getAssociatedValue(); 4912 if (!V) 4913 return indicatePessimisticFixpoint(); 4914 4915 const Function *F = 4916 isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 4917 assert(F && "Expected a function!"); 4918 const IRPosition &FnPos = IRPosition::function(*F); 4919 const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE); 4920 4921 AANoCapture::StateType T; 4922 4923 // Readonly means we cannot capture through memory. 4924 const auto &FnMemAA = 4925 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE); 4926 if (FnMemAA.isAssumedReadOnly()) { 4927 T.addKnownBits(NOT_CAPTURED_IN_MEM); 4928 if (FnMemAA.isKnownReadOnly()) 4929 addKnownBits(NOT_CAPTURED_IN_MEM); 4930 else 4931 A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL); 4932 } 4933 4934 // Make sure all returned values are different than the underlying value. 4935 // TODO: we could do this in a more sophisticated way inside 4936 // AAReturnedValues, e.g., track all values that escape through returns 4937 // directly somehow. 4938 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { 4939 bool SeenConstant = false; 4940 for (auto &It : RVAA.returned_values()) { 4941 if (isa<Constant>(It.first)) { 4942 if (SeenConstant) 4943 return false; 4944 SeenConstant = true; 4945 } else if (!isa<Argument>(It.first) || 4946 It.first == getAssociatedArgument()) 4947 return false; 4948 } 4949 return true; 4950 }; 4951 4952 const auto &NoUnwindAA = 4953 A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL); 4954 if (NoUnwindAA.isAssumedNoUnwind()) { 4955 bool IsVoidTy = F->getReturnType()->isVoidTy(); 4956 const AAReturnedValues *RVAA = 4957 IsVoidTy ? nullptr 4958 : &A.getAAFor<AAReturnedValues>(*this, FnPos, 4959 4960 DepClassTy::OPTIONAL); 4961 if (IsVoidTy || CheckReturnedArgs(*RVAA)) { 4962 T.addKnownBits(NOT_CAPTURED_IN_RET); 4963 if (T.isKnown(NOT_CAPTURED_IN_MEM)) 4964 return ChangeStatus::UNCHANGED; 4965 if (NoUnwindAA.isKnownNoUnwind() && 4966 (IsVoidTy || RVAA->getState().isAtFixpoint())) { 4967 addKnownBits(NOT_CAPTURED_IN_RET); 4968 if (isKnown(NOT_CAPTURED_IN_MEM)) 4969 return indicateOptimisticFixpoint(); 4970 } 4971 } 4972 } 4973 4974 // Use the CaptureTracker interface and logic with the specialized tracker, 4975 // defined in AACaptureUseTracker, that can look at in-flight abstract 4976 // attributes and directly updates the assumed state. 4977 SmallSetVector<Value *, 4> PotentialCopies; 4978 unsigned RemainingUsesToExplore = 4979 getDefaultMaxUsesToExploreForCaptureTracking(); 4980 AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies, 4981 RemainingUsesToExplore); 4982 4983 // Check all potential copies of the associated value until we can assume 4984 // none will be captured or we have to assume at least one might be. 4985 unsigned Idx = 0; 4986 PotentialCopies.insert(V); 4987 while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size()) 4988 Tracker.valueMayBeCaptured(PotentialCopies[Idx++]); 4989 4990 AANoCapture::StateType &S = getState(); 4991 auto Assumed = S.getAssumed(); 4992 S.intersectAssumedBits(T.getAssumed()); 4993 if (!isAssumedNoCaptureMaybeReturned()) 4994 return indicatePessimisticFixpoint(); 4995 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 4996 : ChangeStatus::CHANGED; 4997 } 4998 4999 /// NoCapture attribute for function arguments. 5000 struct AANoCaptureArgument final : AANoCaptureImpl { 5001 AANoCaptureArgument(const IRPosition &IRP, Attributor &A) 5002 : AANoCaptureImpl(IRP, A) {} 5003 5004 /// See AbstractAttribute::trackStatistics() 5005 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } 5006 }; 5007 5008 /// NoCapture attribute for call site arguments. 5009 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { 5010 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) 5011 : AANoCaptureImpl(IRP, A) {} 5012 5013 /// See AbstractAttribute::initialize(...). 5014 void initialize(Attributor &A) override { 5015 if (Argument *Arg = getAssociatedArgument()) 5016 if (Arg->hasByValAttr()) 5017 indicateOptimisticFixpoint(); 5018 AANoCaptureImpl::initialize(A); 5019 } 5020 5021 /// See AbstractAttribute::updateImpl(...). 5022 ChangeStatus updateImpl(Attributor &A) override { 5023 // TODO: Once we have call site specific value information we can provide 5024 // call site specific liveness information and then it makes 5025 // sense to specialize attributes for call sites arguments instead of 5026 // redirecting requests to the callee argument. 5027 Argument *Arg = getAssociatedArgument(); 5028 if (!Arg) 5029 return indicatePessimisticFixpoint(); 5030 const IRPosition &ArgPos = IRPosition::argument(*Arg); 5031 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED); 5032 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 5033 } 5034 5035 /// See AbstractAttribute::trackStatistics() 5036 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)}; 5037 }; 5038 5039 /// NoCapture attribute for floating values. 5040 struct AANoCaptureFloating final : AANoCaptureImpl { 5041 AANoCaptureFloating(const IRPosition &IRP, Attributor &A) 5042 : AANoCaptureImpl(IRP, A) {} 5043 5044 /// See AbstractAttribute::trackStatistics() 5045 void trackStatistics() const override { 5046 STATS_DECLTRACK_FLOATING_ATTR(nocapture) 5047 } 5048 }; 5049 5050 /// NoCapture attribute for function return value. 5051 struct AANoCaptureReturned final : AANoCaptureImpl { 5052 AANoCaptureReturned(const IRPosition &IRP, Attributor &A) 5053 : AANoCaptureImpl(IRP, A) { 5054 llvm_unreachable("NoCapture is not applicable to function returns!"); 5055 } 5056 5057 /// See AbstractAttribute::initialize(...). 5058 void initialize(Attributor &A) override { 5059 llvm_unreachable("NoCapture is not applicable to function returns!"); 5060 } 5061 5062 /// See AbstractAttribute::updateImpl(...). 5063 ChangeStatus updateImpl(Attributor &A) override { 5064 llvm_unreachable("NoCapture is not applicable to function returns!"); 5065 } 5066 5067 /// See AbstractAttribute::trackStatistics() 5068 void trackStatistics() const override {} 5069 }; 5070 5071 /// NoCapture attribute deduction for a call site return value. 5072 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { 5073 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) 5074 : AANoCaptureImpl(IRP, A) {} 5075 5076 /// See AbstractAttribute::initialize(...). 5077 void initialize(Attributor &A) override { 5078 const Function *F = getAnchorScope(); 5079 // Check what state the associated function can actually capture. 5080 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 5081 } 5082 5083 /// See AbstractAttribute::trackStatistics() 5084 void trackStatistics() const override { 5085 STATS_DECLTRACK_CSRET_ATTR(nocapture) 5086 } 5087 }; 5088 } // namespace 5089 5090 /// ------------------ Value Simplify Attribute ---------------------------- 5091 5092 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) { 5093 // FIXME: Add a typecast support. 5094 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5095 SimplifiedAssociatedValue, Other, Ty); 5096 if (SimplifiedAssociatedValue == Optional<Value *>(nullptr)) 5097 return false; 5098 5099 LLVM_DEBUG({ 5100 if (SimplifiedAssociatedValue.hasValue()) 5101 dbgs() << "[ValueSimplify] is assumed to be " 5102 << **SimplifiedAssociatedValue << "\n"; 5103 else 5104 dbgs() << "[ValueSimplify] is assumed to be <none>\n"; 5105 }); 5106 return true; 5107 } 5108 5109 namespace { 5110 struct AAValueSimplifyImpl : AAValueSimplify { 5111 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) 5112 : AAValueSimplify(IRP, A) {} 5113 5114 /// See AbstractAttribute::initialize(...). 5115 void initialize(Attributor &A) override { 5116 if (getAssociatedValue().getType()->isVoidTy()) 5117 indicatePessimisticFixpoint(); 5118 if (A.hasSimplificationCallback(getIRPosition())) 5119 indicatePessimisticFixpoint(); 5120 } 5121 5122 /// See AbstractAttribute::getAsStr(). 5123 const std::string getAsStr() const override { 5124 LLVM_DEBUG({ 5125 errs() << "SAV: " << SimplifiedAssociatedValue << " "; 5126 if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue) 5127 errs() << "SAV: " << **SimplifiedAssociatedValue << " "; 5128 }); 5129 return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple") 5130 : "not-simple"; 5131 } 5132 5133 /// See AbstractAttribute::trackStatistics() 5134 void trackStatistics() const override {} 5135 5136 /// See AAValueSimplify::getAssumedSimplifiedValue() 5137 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5138 return SimplifiedAssociatedValue; 5139 } 5140 5141 /// Return a value we can use as replacement for the associated one, or 5142 /// nullptr if we don't have one that makes sense. 5143 Value *getReplacementValue(Attributor &A) const { 5144 Value *NewV; 5145 NewV = SimplifiedAssociatedValue.hasValue() 5146 ? SimplifiedAssociatedValue.getValue() 5147 : UndefValue::get(getAssociatedType()); 5148 if (!NewV) 5149 return nullptr; 5150 NewV = AA::getWithType(*NewV, *getAssociatedType()); 5151 if (!NewV || NewV == &getAssociatedValue()) 5152 return nullptr; 5153 const Instruction *CtxI = getCtxI(); 5154 if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache())) 5155 return nullptr; 5156 if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope())) 5157 return nullptr; 5158 return NewV; 5159 } 5160 5161 /// Helper function for querying AAValueSimplify and updating candicate. 5162 /// \param IRP The value position we are trying to unify with SimplifiedValue 5163 bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, 5164 const IRPosition &IRP, bool Simplify = true) { 5165 bool UsedAssumedInformation = false; 5166 Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue(); 5167 if (Simplify) 5168 QueryingValueSimplified = 5169 A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation); 5170 return unionAssumed(QueryingValueSimplified); 5171 } 5172 5173 /// Returns a candidate is found or not 5174 template <typename AAType> bool askSimplifiedValueFor(Attributor &A) { 5175 if (!getAssociatedValue().getType()->isIntegerTy()) 5176 return false; 5177 5178 // This will also pass the call base context. 5179 const auto &AA = 5180 A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE); 5181 5182 Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A); 5183 5184 if (!COpt.hasValue()) { 5185 SimplifiedAssociatedValue = llvm::None; 5186 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5187 return true; 5188 } 5189 if (auto *C = COpt.getValue()) { 5190 SimplifiedAssociatedValue = C; 5191 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5192 return true; 5193 } 5194 return false; 5195 } 5196 5197 bool askSimplifiedValueForOtherAAs(Attributor &A) { 5198 if (askSimplifiedValueFor<AAValueConstantRange>(A)) 5199 return true; 5200 if (askSimplifiedValueFor<AAPotentialValues>(A)) 5201 return true; 5202 return false; 5203 } 5204 5205 /// See AbstractAttribute::manifest(...). 5206 ChangeStatus manifest(Attributor &A) override { 5207 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5208 if (getAssociatedValue().user_empty()) 5209 return Changed; 5210 5211 if (auto *NewV = getReplacementValue(A)) { 5212 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> " 5213 << *NewV << " :: " << *this << "\n"); 5214 if (A.changeValueAfterManifest(getAssociatedValue(), *NewV)) 5215 Changed = ChangeStatus::CHANGED; 5216 } 5217 5218 return Changed | AAValueSimplify::manifest(A); 5219 } 5220 5221 /// See AbstractState::indicatePessimisticFixpoint(...). 5222 ChangeStatus indicatePessimisticFixpoint() override { 5223 SimplifiedAssociatedValue = &getAssociatedValue(); 5224 return AAValueSimplify::indicatePessimisticFixpoint(); 5225 } 5226 5227 static bool handleLoad(Attributor &A, const AbstractAttribute &AA, 5228 LoadInst &L, function_ref<bool(Value &)> Union) { 5229 auto UnionWrapper = [&](Value &V, Value &Obj) { 5230 if (isa<AllocaInst>(Obj)) 5231 return Union(V); 5232 if (!AA::isDynamicallyUnique(A, AA, V)) 5233 return false; 5234 if (!AA::isValidAtPosition(V, L, A.getInfoCache())) 5235 return false; 5236 return Union(V); 5237 }; 5238 5239 Value &Ptr = *L.getPointerOperand(); 5240 SmallVector<Value *, 8> Objects; 5241 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L)) 5242 return false; 5243 5244 const auto *TLI = 5245 A.getInfoCache().getTargetLibraryInfoForFunction(*L.getFunction()); 5246 for (Value *Obj : Objects) { 5247 LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n"); 5248 if (isa<UndefValue>(Obj)) 5249 continue; 5250 if (isa<ConstantPointerNull>(Obj)) { 5251 // A null pointer access can be undefined but any offset from null may 5252 // be OK. We do not try to optimize the latter. 5253 bool UsedAssumedInformation = false; 5254 if (!NullPointerIsDefined(L.getFunction(), 5255 Ptr.getType()->getPointerAddressSpace()) && 5256 A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj) 5257 continue; 5258 return false; 5259 } 5260 Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType(), TLI); 5261 if (!InitialVal || !Union(*InitialVal)) 5262 return false; 5263 5264 LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store " 5265 "propagation, checking accesses next.\n"); 5266 5267 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) { 5268 LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n"); 5269 if (!Acc.isWrite()) 5270 return true; 5271 if (Acc.isWrittenValueYetUndetermined()) 5272 return true; 5273 Value *Content = Acc.getWrittenValue(); 5274 if (!Content) 5275 return false; 5276 Value *CastedContent = 5277 AA::getWithType(*Content, *AA.getAssociatedType()); 5278 if (!CastedContent) 5279 return false; 5280 if (IsExact) 5281 return UnionWrapper(*CastedContent, *Obj); 5282 if (auto *C = dyn_cast<Constant>(CastedContent)) 5283 if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C)) 5284 return UnionWrapper(*CastedContent, *Obj); 5285 return false; 5286 }; 5287 5288 auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj), 5289 DepClassTy::REQUIRED); 5290 if (!PI.forallInterferingAccesses(L, CheckAccess)) 5291 return false; 5292 } 5293 return true; 5294 } 5295 }; 5296 5297 struct AAValueSimplifyArgument final : AAValueSimplifyImpl { 5298 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) 5299 : AAValueSimplifyImpl(IRP, A) {} 5300 5301 void initialize(Attributor &A) override { 5302 AAValueSimplifyImpl::initialize(A); 5303 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) 5304 indicatePessimisticFixpoint(); 5305 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated, 5306 Attribute::StructRet, Attribute::Nest, Attribute::ByVal}, 5307 /* IgnoreSubsumingPositions */ true)) 5308 indicatePessimisticFixpoint(); 5309 5310 // FIXME: This is a hack to prevent us from propagating function poiner in 5311 // the new pass manager CGSCC pass as it creates call edges the 5312 // CallGraphUpdater cannot handle yet. 5313 Value &V = getAssociatedValue(); 5314 if (V.getType()->isPointerTy() && 5315 V.getType()->getPointerElementType()->isFunctionTy() && 5316 !A.isModulePass()) 5317 indicatePessimisticFixpoint(); 5318 } 5319 5320 /// See AbstractAttribute::updateImpl(...). 5321 ChangeStatus updateImpl(Attributor &A) override { 5322 // Byval is only replacable if it is readonly otherwise we would write into 5323 // the replaced value and not the copy that byval creates implicitly. 5324 Argument *Arg = getAssociatedArgument(); 5325 if (Arg->hasByValAttr()) { 5326 // TODO: We probably need to verify synchronization is not an issue, e.g., 5327 // there is no race by not copying a constant byval. 5328 const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), 5329 DepClassTy::REQUIRED); 5330 if (!MemAA.isAssumedReadOnly()) 5331 return indicatePessimisticFixpoint(); 5332 } 5333 5334 auto Before = SimplifiedAssociatedValue; 5335 5336 auto PredForCallSite = [&](AbstractCallSite ACS) { 5337 const IRPosition &ACSArgPos = 5338 IRPosition::callsite_argument(ACS, getCallSiteArgNo()); 5339 // Check if a coresponding argument was found or if it is on not 5340 // associated (which can happen for callback calls). 5341 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 5342 return false; 5343 5344 // Simplify the argument operand explicitly and check if the result is 5345 // valid in the current scope. This avoids refering to simplified values 5346 // in other functions, e.g., we don't want to say a an argument in a 5347 // static function is actually an argument in a different function. 5348 bool UsedAssumedInformation = false; 5349 Optional<Constant *> SimpleArgOp = 5350 A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation); 5351 if (!SimpleArgOp.hasValue()) 5352 return true; 5353 if (!SimpleArgOp.getValue()) 5354 return false; 5355 if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp)) 5356 return false; 5357 return unionAssumed(*SimpleArgOp); 5358 }; 5359 5360 // Generate a answer specific to a call site context. 5361 bool Success; 5362 bool AllCallSitesKnown; 5363 if (hasCallBaseContext() && 5364 getCallBaseContext()->getCalledFunction() == Arg->getParent()) 5365 Success = PredForCallSite( 5366 AbstractCallSite(&getCallBaseContext()->getCalledOperandUse())); 5367 else 5368 Success = A.checkForAllCallSites(PredForCallSite, *this, true, 5369 AllCallSitesKnown); 5370 5371 if (!Success) 5372 if (!askSimplifiedValueForOtherAAs(A)) 5373 return indicatePessimisticFixpoint(); 5374 5375 // If a candicate was found in this update, return CHANGED. 5376 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5377 : ChangeStatus ::CHANGED; 5378 } 5379 5380 /// See AbstractAttribute::trackStatistics() 5381 void trackStatistics() const override { 5382 STATS_DECLTRACK_ARG_ATTR(value_simplify) 5383 } 5384 }; 5385 5386 struct AAValueSimplifyReturned : AAValueSimplifyImpl { 5387 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) 5388 : AAValueSimplifyImpl(IRP, A) {} 5389 5390 /// See AAValueSimplify::getAssumedSimplifiedValue() 5391 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5392 if (!isValidState()) 5393 return nullptr; 5394 return SimplifiedAssociatedValue; 5395 } 5396 5397 /// See AbstractAttribute::updateImpl(...). 5398 ChangeStatus updateImpl(Attributor &A) override { 5399 auto Before = SimplifiedAssociatedValue; 5400 5401 auto PredForReturned = [&](Value &V) { 5402 return checkAndUpdate(A, *this, 5403 IRPosition::value(V, getCallBaseContext())); 5404 }; 5405 5406 if (!A.checkForAllReturnedValues(PredForReturned, *this)) 5407 if (!askSimplifiedValueForOtherAAs(A)) 5408 return indicatePessimisticFixpoint(); 5409 5410 // If a candicate was found in this update, return CHANGED. 5411 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5412 : ChangeStatus ::CHANGED; 5413 } 5414 5415 ChangeStatus manifest(Attributor &A) override { 5416 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5417 5418 if (auto *NewV = getReplacementValue(A)) { 5419 auto PredForReturned = 5420 [&](Value &, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 5421 for (ReturnInst *RI : RetInsts) { 5422 Value *ReturnedVal = RI->getReturnValue(); 5423 if (ReturnedVal == NewV || isa<UndefValue>(ReturnedVal)) 5424 return true; 5425 assert(RI->getFunction() == getAnchorScope() && 5426 "ReturnInst in wrong function!"); 5427 LLVM_DEBUG(dbgs() 5428 << "[ValueSimplify] " << *ReturnedVal << " -> " 5429 << *NewV << " in " << *RI << " :: " << *this << "\n"); 5430 if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV)) 5431 Changed = ChangeStatus::CHANGED; 5432 } 5433 return true; 5434 }; 5435 A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this); 5436 } 5437 5438 return Changed | AAValueSimplify::manifest(A); 5439 } 5440 5441 /// See AbstractAttribute::trackStatistics() 5442 void trackStatistics() const override { 5443 STATS_DECLTRACK_FNRET_ATTR(value_simplify) 5444 } 5445 }; 5446 5447 struct AAValueSimplifyFloating : AAValueSimplifyImpl { 5448 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) 5449 : AAValueSimplifyImpl(IRP, A) {} 5450 5451 /// See AbstractAttribute::initialize(...). 5452 void initialize(Attributor &A) override { 5453 AAValueSimplifyImpl::initialize(A); 5454 Value &V = getAnchorValue(); 5455 5456 // TODO: add other stuffs 5457 if (isa<Constant>(V)) 5458 indicatePessimisticFixpoint(); 5459 } 5460 5461 /// Check if \p Cmp is a comparison we can simplify. 5462 /// 5463 /// We handle multiple cases, one in which at least one operand is an 5464 /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other 5465 /// operand. Return true if successful, in that case SimplifiedAssociatedValue 5466 /// will be updated. 5467 bool handleCmp(Attributor &A, CmpInst &Cmp) { 5468 auto Union = [&](Value &V) { 5469 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5470 SimplifiedAssociatedValue, &V, V.getType()); 5471 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5472 }; 5473 5474 Value *LHS = Cmp.getOperand(0); 5475 Value *RHS = Cmp.getOperand(1); 5476 5477 // Simplify the operands first. 5478 bool UsedAssumedInformation = false; 5479 const auto &SimplifiedLHS = 5480 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 5481 *this, UsedAssumedInformation); 5482 if (!SimplifiedLHS.hasValue()) 5483 return true; 5484 if (!SimplifiedLHS.getValue()) 5485 return false; 5486 LHS = *SimplifiedLHS; 5487 5488 const auto &SimplifiedRHS = 5489 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 5490 *this, UsedAssumedInformation); 5491 if (!SimplifiedRHS.hasValue()) 5492 return true; 5493 if (!SimplifiedRHS.getValue()) 5494 return false; 5495 RHS = *SimplifiedRHS; 5496 5497 LLVMContext &Ctx = Cmp.getContext(); 5498 // Handle the trivial case first in which we don't even need to think about 5499 // null or non-null. 5500 if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) { 5501 Constant *NewVal = 5502 ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual()); 5503 if (!Union(*NewVal)) 5504 return false; 5505 if (!UsedAssumedInformation) 5506 indicateOptimisticFixpoint(); 5507 return true; 5508 } 5509 5510 // From now on we only handle equalities (==, !=). 5511 ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp); 5512 if (!ICmp || !ICmp->isEquality()) 5513 return false; 5514 5515 bool LHSIsNull = isa<ConstantPointerNull>(LHS); 5516 bool RHSIsNull = isa<ConstantPointerNull>(RHS); 5517 if (!LHSIsNull && !RHSIsNull) 5518 return false; 5519 5520 // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the 5521 // non-nullptr operand and if we assume it's non-null we can conclude the 5522 // result of the comparison. 5523 assert((LHSIsNull || RHSIsNull) && 5524 "Expected nullptr versus non-nullptr comparison at this point"); 5525 5526 // The index is the operand that we assume is not null. 5527 unsigned PtrIdx = LHSIsNull; 5528 auto &PtrNonNullAA = A.getAAFor<AANonNull>( 5529 *this, IRPosition::value(*ICmp->getOperand(PtrIdx)), 5530 DepClassTy::REQUIRED); 5531 if (!PtrNonNullAA.isAssumedNonNull()) 5532 return false; 5533 UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull(); 5534 5535 // The new value depends on the predicate, true for != and false for ==. 5536 Constant *NewVal = ConstantInt::get( 5537 Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE); 5538 if (!Union(*NewVal)) 5539 return false; 5540 5541 if (!UsedAssumedInformation) 5542 indicateOptimisticFixpoint(); 5543 5544 return true; 5545 } 5546 5547 bool updateWithLoad(Attributor &A, LoadInst &L) { 5548 auto Union = [&](Value &V) { 5549 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5550 SimplifiedAssociatedValue, &V, L.getType()); 5551 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5552 }; 5553 return handleLoad(A, *this, L, Union); 5554 } 5555 5556 /// Use the generic, non-optimistic InstSimplfy functionality if we managed to 5557 /// simplify any operand of the instruction \p I. Return true if successful, 5558 /// in that case SimplifiedAssociatedValue will be updated. 5559 bool handleGenericInst(Attributor &A, Instruction &I) { 5560 bool SomeSimplified = false; 5561 bool UsedAssumedInformation = false; 5562 5563 SmallVector<Value *, 8> NewOps(I.getNumOperands()); 5564 int Idx = 0; 5565 for (Value *Op : I.operands()) { 5566 const auto &SimplifiedOp = 5567 A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()), 5568 *this, UsedAssumedInformation); 5569 // If we are not sure about any operand we are not sure about the entire 5570 // instruction, we'll wait. 5571 if (!SimplifiedOp.hasValue()) 5572 return true; 5573 5574 if (SimplifiedOp.getValue()) 5575 NewOps[Idx] = SimplifiedOp.getValue(); 5576 else 5577 NewOps[Idx] = Op; 5578 5579 SomeSimplified |= (NewOps[Idx] != Op); 5580 ++Idx; 5581 } 5582 5583 // We won't bother with the InstSimplify interface if we didn't simplify any 5584 // operand ourselves. 5585 if (!SomeSimplified) 5586 return false; 5587 5588 InformationCache &InfoCache = A.getInfoCache(); 5589 Function *F = I.getFunction(); 5590 const auto *DT = 5591 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 5592 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5593 auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 5594 OptimizationRemarkEmitter *ORE = nullptr; 5595 5596 const DataLayout &DL = I.getModule()->getDataLayout(); 5597 SimplifyQuery Q(DL, TLI, DT, AC, &I); 5598 if (Value *SimplifiedI = 5599 SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) { 5600 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5601 SimplifiedAssociatedValue, SimplifiedI, I.getType()); 5602 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5603 } 5604 return false; 5605 } 5606 5607 /// See AbstractAttribute::updateImpl(...). 5608 ChangeStatus updateImpl(Attributor &A) override { 5609 auto Before = SimplifiedAssociatedValue; 5610 5611 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &, 5612 bool Stripped) -> bool { 5613 auto &AA = A.getAAFor<AAValueSimplify>( 5614 *this, IRPosition::value(V, getCallBaseContext()), 5615 DepClassTy::REQUIRED); 5616 if (!Stripped && this == &AA) { 5617 5618 if (auto *I = dyn_cast<Instruction>(&V)) { 5619 if (auto *LI = dyn_cast<LoadInst>(&V)) 5620 if (updateWithLoad(A, *LI)) 5621 return true; 5622 if (auto *Cmp = dyn_cast<CmpInst>(&V)) 5623 if (handleCmp(A, *Cmp)) 5624 return true; 5625 if (handleGenericInst(A, *I)) 5626 return true; 5627 } 5628 // TODO: Look the instruction and check recursively. 5629 5630 LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V 5631 << "\n"); 5632 return false; 5633 } 5634 return checkAndUpdate(A, *this, 5635 IRPosition::value(V, getCallBaseContext())); 5636 }; 5637 5638 bool Dummy = false; 5639 if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy, 5640 VisitValueCB, getCtxI(), 5641 /* UseValueSimplify */ false)) 5642 if (!askSimplifiedValueForOtherAAs(A)) 5643 return indicatePessimisticFixpoint(); 5644 5645 // If a candicate was found in this update, return CHANGED. 5646 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5647 : ChangeStatus ::CHANGED; 5648 } 5649 5650 /// See AbstractAttribute::trackStatistics() 5651 void trackStatistics() const override { 5652 STATS_DECLTRACK_FLOATING_ATTR(value_simplify) 5653 } 5654 }; 5655 5656 struct AAValueSimplifyFunction : AAValueSimplifyImpl { 5657 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) 5658 : AAValueSimplifyImpl(IRP, A) {} 5659 5660 /// See AbstractAttribute::initialize(...). 5661 void initialize(Attributor &A) override { 5662 SimplifiedAssociatedValue = nullptr; 5663 indicateOptimisticFixpoint(); 5664 } 5665 /// See AbstractAttribute::initialize(...). 5666 ChangeStatus updateImpl(Attributor &A) override { 5667 llvm_unreachable( 5668 "AAValueSimplify(Function|CallSite)::updateImpl will not be called"); 5669 } 5670 /// See AbstractAttribute::trackStatistics() 5671 void trackStatistics() const override { 5672 STATS_DECLTRACK_FN_ATTR(value_simplify) 5673 } 5674 }; 5675 5676 struct AAValueSimplifyCallSite : AAValueSimplifyFunction { 5677 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) 5678 : AAValueSimplifyFunction(IRP, A) {} 5679 /// See AbstractAttribute::trackStatistics() 5680 void trackStatistics() const override { 5681 STATS_DECLTRACK_CS_ATTR(value_simplify) 5682 } 5683 }; 5684 5685 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl { 5686 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) 5687 : AAValueSimplifyImpl(IRP, A) {} 5688 5689 void initialize(Attributor &A) override { 5690 AAValueSimplifyImpl::initialize(A); 5691 if (!getAssociatedFunction()) 5692 indicatePessimisticFixpoint(); 5693 } 5694 5695 /// See AbstractAttribute::updateImpl(...). 5696 ChangeStatus updateImpl(Attributor &A) override { 5697 auto Before = SimplifiedAssociatedValue; 5698 auto &RetAA = A.getAAFor<AAReturnedValues>( 5699 *this, IRPosition::function(*getAssociatedFunction()), 5700 DepClassTy::REQUIRED); 5701 auto PredForReturned = 5702 [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 5703 bool UsedAssumedInformation = false; 5704 Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent( 5705 &RetVal, *cast<CallBase>(getCtxI()), *this, 5706 UsedAssumedInformation); 5707 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5708 SimplifiedAssociatedValue, CSRetVal, getAssociatedType()); 5709 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5710 }; 5711 if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned)) 5712 if (!askSimplifiedValueForOtherAAs(A)) 5713 return indicatePessimisticFixpoint(); 5714 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5715 : ChangeStatus ::CHANGED; 5716 } 5717 5718 void trackStatistics() const override { 5719 STATS_DECLTRACK_CSRET_ATTR(value_simplify) 5720 } 5721 }; 5722 5723 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { 5724 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) 5725 : AAValueSimplifyFloating(IRP, A) {} 5726 5727 /// See AbstractAttribute::manifest(...). 5728 ChangeStatus manifest(Attributor &A) override { 5729 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5730 5731 if (auto *NewV = getReplacementValue(A)) { 5732 Use &U = cast<CallBase>(&getAnchorValue()) 5733 ->getArgOperandUse(getCallSiteArgNo()); 5734 if (A.changeUseAfterManifest(U, *NewV)) 5735 Changed = ChangeStatus::CHANGED; 5736 } 5737 5738 return Changed | AAValueSimplify::manifest(A); 5739 } 5740 5741 void trackStatistics() const override { 5742 STATS_DECLTRACK_CSARG_ATTR(value_simplify) 5743 } 5744 }; 5745 5746 /// ----------------------- Heap-To-Stack Conversion --------------------------- 5747 struct AAHeapToStackFunction final : public AAHeapToStack { 5748 5749 struct AllocationInfo { 5750 /// The call that allocates the memory. 5751 CallBase *const CB; 5752 5753 /// The library function id for the allocation. 5754 LibFunc LibraryFunctionId = NotLibFunc; 5755 5756 /// The status wrt. a rewrite. 5757 enum { 5758 STACK_DUE_TO_USE, 5759 STACK_DUE_TO_FREE, 5760 INVALID, 5761 } Status = STACK_DUE_TO_USE; 5762 5763 /// Flag to indicate if we encountered a use that might free this allocation 5764 /// but which is not in the deallocation infos. 5765 bool HasPotentiallyFreeingUnknownUses = false; 5766 5767 /// The set of free calls that use this allocation. 5768 SmallPtrSet<CallBase *, 1> PotentialFreeCalls{}; 5769 }; 5770 5771 struct DeallocationInfo { 5772 /// The call that deallocates the memory. 5773 CallBase *const CB; 5774 5775 /// Flag to indicate if we don't know all objects this deallocation might 5776 /// free. 5777 bool MightFreeUnknownObjects = false; 5778 5779 /// The set of allocation calls that are potentially freed. 5780 SmallPtrSet<CallBase *, 1> PotentialAllocationCalls{}; 5781 }; 5782 5783 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) 5784 : AAHeapToStack(IRP, A) {} 5785 5786 ~AAHeapToStackFunction() { 5787 // Ensure we call the destructor so we release any memory allocated in the 5788 // sets. 5789 for (auto &It : AllocationInfos) 5790 It.getSecond()->~AllocationInfo(); 5791 for (auto &It : DeallocationInfos) 5792 It.getSecond()->~DeallocationInfo(); 5793 } 5794 5795 void initialize(Attributor &A) override { 5796 AAHeapToStack::initialize(A); 5797 5798 const Function *F = getAnchorScope(); 5799 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5800 5801 auto AllocationIdentifierCB = [&](Instruction &I) { 5802 CallBase *CB = dyn_cast<CallBase>(&I); 5803 if (!CB) 5804 return true; 5805 if (isFreeCall(CB, TLI)) { 5806 DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB}; 5807 return true; 5808 } 5809 // To do heap to stack, we need to know that the allocation itself is 5810 // removable once uses are rewritten, and that we can initialize the 5811 // alloca to the same pattern as the original allocation result. 5812 if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) { 5813 auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext()); 5814 if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) { 5815 AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB}; 5816 AllocationInfos[CB] = AI; 5817 TLI->getLibFunc(*CB, AI->LibraryFunctionId); 5818 } 5819 } 5820 return true; 5821 }; 5822 5823 bool UsedAssumedInformation = false; 5824 bool Success = A.checkForAllCallLikeInstructions( 5825 AllocationIdentifierCB, *this, UsedAssumedInformation, 5826 /* CheckBBLivenessOnly */ false, 5827 /* CheckPotentiallyDead */ true); 5828 (void)Success; 5829 assert(Success && "Did not expect the call base visit callback to fail!"); 5830 } 5831 5832 const std::string getAsStr() const override { 5833 unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0; 5834 for (const auto &It : AllocationInfos) { 5835 if (It.second->Status == AllocationInfo::INVALID) 5836 ++NumInvalidMallocs; 5837 else 5838 ++NumH2SMallocs; 5839 } 5840 return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" + 5841 std::to_string(NumInvalidMallocs); 5842 } 5843 5844 /// See AbstractAttribute::trackStatistics(). 5845 void trackStatistics() const override { 5846 STATS_DECL( 5847 MallocCalls, Function, 5848 "Number of malloc/calloc/aligned_alloc calls converted to allocas"); 5849 for (auto &It : AllocationInfos) 5850 if (It.second->Status != AllocationInfo::INVALID) 5851 ++BUILD_STAT_NAME(MallocCalls, Function); 5852 } 5853 5854 bool isAssumedHeapToStack(const CallBase &CB) const override { 5855 if (isValidState()) 5856 if (AllocationInfo *AI = AllocationInfos.lookup(&CB)) 5857 return AI->Status != AllocationInfo::INVALID; 5858 return false; 5859 } 5860 5861 bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override { 5862 if (!isValidState()) 5863 return false; 5864 5865 for (auto &It : AllocationInfos) { 5866 AllocationInfo &AI = *It.second; 5867 if (AI.Status == AllocationInfo::INVALID) 5868 continue; 5869 5870 if (AI.PotentialFreeCalls.count(&CB)) 5871 return true; 5872 } 5873 5874 return false; 5875 } 5876 5877 ChangeStatus manifest(Attributor &A) override { 5878 assert(getState().isValidState() && 5879 "Attempted to manifest an invalid state!"); 5880 5881 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 5882 Function *F = getAnchorScope(); 5883 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5884 5885 for (auto &It : AllocationInfos) { 5886 AllocationInfo &AI = *It.second; 5887 if (AI.Status == AllocationInfo::INVALID) 5888 continue; 5889 5890 for (CallBase *FreeCall : AI.PotentialFreeCalls) { 5891 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); 5892 A.deleteAfterManifest(*FreeCall); 5893 HasChanged = ChangeStatus::CHANGED; 5894 } 5895 5896 LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB 5897 << "\n"); 5898 5899 auto Remark = [&](OptimizationRemark OR) { 5900 LibFunc IsAllocShared; 5901 if (TLI->getLibFunc(*AI.CB, IsAllocShared)) 5902 if (IsAllocShared == LibFunc___kmpc_alloc_shared) 5903 return OR << "Moving globalized variable to the stack."; 5904 return OR << "Moving memory allocation from the heap to the stack."; 5905 }; 5906 if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 5907 A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark); 5908 else 5909 A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark); 5910 5911 Value *Size; 5912 Optional<APInt> SizeAPI = getSize(A, *this, AI); 5913 if (SizeAPI.hasValue()) { 5914 Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI); 5915 } else { 5916 LLVMContext &Ctx = AI.CB->getContext(); 5917 auto &DL = A.getInfoCache().getDL(); 5918 ObjectSizeOpts Opts; 5919 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts); 5920 SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB); 5921 assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() && 5922 cast<ConstantInt>(SizeOffsetPair.second)->isZero()); 5923 Size = SizeOffsetPair.first; 5924 } 5925 5926 Align Alignment(1); 5927 if (MaybeAlign RetAlign = AI.CB->getRetAlign()) 5928 Alignment = max(Alignment, RetAlign); 5929 if (Value *Align = getAllocAlignment(AI.CB, TLI)) { 5930 Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align); 5931 assert(AlignmentAPI.hasValue() && 5932 "Expected an alignment during manifest!"); 5933 Alignment = 5934 max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue())); 5935 } 5936 5937 unsigned AS = cast<PointerType>(AI.CB->getType())->getAddressSpace(); 5938 Instruction *Alloca = 5939 new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment, 5940 "", AI.CB->getNextNode()); 5941 5942 if (Alloca->getType() != AI.CB->getType()) 5943 Alloca = new BitCastInst(Alloca, AI.CB->getType(), "malloc_bc", 5944 Alloca->getNextNode()); 5945 5946 auto *I8Ty = Type::getInt8Ty(F->getContext()); 5947 auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty); 5948 assert(InitVal && 5949 "Must be able to materialize initial memory state of allocation"); 5950 5951 A.changeValueAfterManifest(*AI.CB, *Alloca); 5952 5953 if (auto *II = dyn_cast<InvokeInst>(AI.CB)) { 5954 auto *NBB = II->getNormalDest(); 5955 BranchInst::Create(NBB, AI.CB->getParent()); 5956 A.deleteAfterManifest(*AI.CB); 5957 } else { 5958 A.deleteAfterManifest(*AI.CB); 5959 } 5960 5961 // Initialize the alloca with the same value as used by the allocation 5962 // function. We can skip undef as the initial value of an alloc is 5963 // undef, and the memset would simply end up being DSEd. 5964 if (!isa<UndefValue>(InitVal)) { 5965 IRBuilder<> Builder(Alloca->getNextNode()); 5966 // TODO: Use alignment above if align!=1 5967 Builder.CreateMemSet(Alloca, InitVal, Size, None); 5968 } 5969 HasChanged = ChangeStatus::CHANGED; 5970 } 5971 5972 return HasChanged; 5973 } 5974 5975 Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA, 5976 Value &V) { 5977 bool UsedAssumedInformation = false; 5978 Optional<Constant *> SimpleV = 5979 A.getAssumedConstant(V, AA, UsedAssumedInformation); 5980 if (!SimpleV.hasValue()) 5981 return APInt(64, 0); 5982 if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue())) 5983 return CI->getValue(); 5984 return llvm::None; 5985 } 5986 5987 Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA, 5988 AllocationInfo &AI) { 5989 auto Mapper = [&](const Value *V) -> const Value * { 5990 bool UsedAssumedInformation = false; 5991 if (Optional<Constant *> SimpleV = 5992 A.getAssumedConstant(*V, AA, UsedAssumedInformation)) 5993 if (*SimpleV) 5994 return *SimpleV; 5995 return V; 5996 }; 5997 5998 const Function *F = getAnchorScope(); 5999 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 6000 return getAllocSize(AI.CB, TLI, Mapper); 6001 } 6002 6003 /// Collection of all malloc-like calls in a function with associated 6004 /// information. 6005 DenseMap<CallBase *, AllocationInfo *> AllocationInfos; 6006 6007 /// Collection of all free-like calls in a function with associated 6008 /// information. 6009 DenseMap<CallBase *, DeallocationInfo *> DeallocationInfos; 6010 6011 ChangeStatus updateImpl(Attributor &A) override; 6012 }; 6013 6014 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) { 6015 ChangeStatus Changed = ChangeStatus::UNCHANGED; 6016 const Function *F = getAnchorScope(); 6017 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 6018 6019 const auto &LivenessAA = 6020 A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE); 6021 6022 MustBeExecutedContextExplorer &Explorer = 6023 A.getInfoCache().getMustBeExecutedContextExplorer(); 6024 6025 bool StackIsAccessibleByOtherThreads = 6026 A.getInfoCache().stackIsAccessibleByOtherThreads(); 6027 6028 // Flag to ensure we update our deallocation information at most once per 6029 // updateImpl call and only if we use the free check reasoning. 6030 bool HasUpdatedFrees = false; 6031 6032 auto UpdateFrees = [&]() { 6033 HasUpdatedFrees = true; 6034 6035 for (auto &It : DeallocationInfos) { 6036 DeallocationInfo &DI = *It.second; 6037 // For now we cannot use deallocations that have unknown inputs, skip 6038 // them. 6039 if (DI.MightFreeUnknownObjects) 6040 continue; 6041 6042 // No need to analyze dead calls, ignore them instead. 6043 bool UsedAssumedInformation = false; 6044 if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation, 6045 /* CheckBBLivenessOnly */ true)) 6046 continue; 6047 6048 // Use the optimistic version to get the freed objects, ignoring dead 6049 // branches etc. 6050 SmallVector<Value *, 8> Objects; 6051 if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects, 6052 *this, DI.CB)) { 6053 LLVM_DEBUG( 6054 dbgs() 6055 << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n"); 6056 DI.MightFreeUnknownObjects = true; 6057 continue; 6058 } 6059 6060 // Check each object explicitly. 6061 for (auto *Obj : Objects) { 6062 // Free of null and undef can be ignored as no-ops (or UB in the latter 6063 // case). 6064 if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj)) 6065 continue; 6066 6067 CallBase *ObjCB = dyn_cast<CallBase>(Obj); 6068 if (!ObjCB) { 6069 LLVM_DEBUG(dbgs() 6070 << "[H2S] Free of a non-call object: " << *Obj << "\n"); 6071 DI.MightFreeUnknownObjects = true; 6072 continue; 6073 } 6074 6075 AllocationInfo *AI = AllocationInfos.lookup(ObjCB); 6076 if (!AI) { 6077 LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj 6078 << "\n"); 6079 DI.MightFreeUnknownObjects = true; 6080 continue; 6081 } 6082 6083 DI.PotentialAllocationCalls.insert(ObjCB); 6084 } 6085 } 6086 }; 6087 6088 auto FreeCheck = [&](AllocationInfo &AI) { 6089 // If the stack is not accessible by other threads, the "must-free" logic 6090 // doesn't apply as the pointer could be shared and needs to be places in 6091 // "shareable" memory. 6092 if (!StackIsAccessibleByOtherThreads) { 6093 auto &NoSyncAA = 6094 A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL); 6095 if (!NoSyncAA.isAssumedNoSync()) { 6096 LLVM_DEBUG( 6097 dbgs() << "[H2S] found an escaping use, stack is not accessible by " 6098 "other threads and function is not nosync:\n"); 6099 return false; 6100 } 6101 } 6102 if (!HasUpdatedFrees) 6103 UpdateFrees(); 6104 6105 // TODO: Allow multi exit functions that have different free calls. 6106 if (AI.PotentialFreeCalls.size() != 1) { 6107 LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but " 6108 << AI.PotentialFreeCalls.size() << "\n"); 6109 return false; 6110 } 6111 CallBase *UniqueFree = *AI.PotentialFreeCalls.begin(); 6112 DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree); 6113 if (!DI) { 6114 LLVM_DEBUG( 6115 dbgs() << "[H2S] unique free call was not known as deallocation call " 6116 << *UniqueFree << "\n"); 6117 return false; 6118 } 6119 if (DI->MightFreeUnknownObjects) { 6120 LLVM_DEBUG( 6121 dbgs() << "[H2S] unique free call might free unknown allocations\n"); 6122 return false; 6123 } 6124 if (DI->PotentialAllocationCalls.size() > 1) { 6125 LLVM_DEBUG(dbgs() << "[H2S] unique free call might free " 6126 << DI->PotentialAllocationCalls.size() 6127 << " different allocations\n"); 6128 return false; 6129 } 6130 if (*DI->PotentialAllocationCalls.begin() != AI.CB) { 6131 LLVM_DEBUG( 6132 dbgs() 6133 << "[H2S] unique free call not known to free this allocation but " 6134 << **DI->PotentialAllocationCalls.begin() << "\n"); 6135 return false; 6136 } 6137 Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode(); 6138 if (!Explorer.findInContextOf(UniqueFree, CtxI)) { 6139 LLVM_DEBUG( 6140 dbgs() 6141 << "[H2S] unique free call might not be executed with the allocation " 6142 << *UniqueFree << "\n"); 6143 return false; 6144 } 6145 return true; 6146 }; 6147 6148 auto UsesCheck = [&](AllocationInfo &AI) { 6149 bool ValidUsesOnly = true; 6150 6151 auto Pred = [&](const Use &U, bool &Follow) -> bool { 6152 Instruction *UserI = cast<Instruction>(U.getUser()); 6153 if (isa<LoadInst>(UserI)) 6154 return true; 6155 if (auto *SI = dyn_cast<StoreInst>(UserI)) { 6156 if (SI->getValueOperand() == U.get()) { 6157 LLVM_DEBUG(dbgs() 6158 << "[H2S] escaping store to memory: " << *UserI << "\n"); 6159 ValidUsesOnly = false; 6160 } else { 6161 // A store into the malloc'ed memory is fine. 6162 } 6163 return true; 6164 } 6165 if (auto *CB = dyn_cast<CallBase>(UserI)) { 6166 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) 6167 return true; 6168 if (DeallocationInfos.count(CB)) { 6169 AI.PotentialFreeCalls.insert(CB); 6170 return true; 6171 } 6172 6173 unsigned ArgNo = CB->getArgOperandNo(&U); 6174 6175 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 6176 *this, IRPosition::callsite_argument(*CB, ArgNo), 6177 DepClassTy::OPTIONAL); 6178 6179 // If a call site argument use is nofree, we are fine. 6180 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( 6181 *this, IRPosition::callsite_argument(*CB, ArgNo), 6182 DepClassTy::OPTIONAL); 6183 6184 bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture(); 6185 bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree(); 6186 if (MaybeCaptured || 6187 (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared && 6188 MaybeFreed)) { 6189 AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed; 6190 6191 // Emit a missed remark if this is missed OpenMP globalization. 6192 auto Remark = [&](OptimizationRemarkMissed ORM) { 6193 return ORM 6194 << "Could not move globalized variable to the stack. " 6195 "Variable is potentially captured in call. Mark " 6196 "parameter as `__attribute__((noescape))` to override."; 6197 }; 6198 6199 if (ValidUsesOnly && 6200 AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 6201 A.emitRemark<OptimizationRemarkMissed>(AI.CB, "OMP113", Remark); 6202 6203 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n"); 6204 ValidUsesOnly = false; 6205 } 6206 return true; 6207 } 6208 6209 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 6210 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 6211 Follow = true; 6212 return true; 6213 } 6214 // Unknown user for which we can not track uses further (in a way that 6215 // makes sense). 6216 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n"); 6217 ValidUsesOnly = false; 6218 return true; 6219 }; 6220 if (!A.checkForAllUses(Pred, *this, *AI.CB)) 6221 return false; 6222 return ValidUsesOnly; 6223 }; 6224 6225 // The actual update starts here. We look at all allocations and depending on 6226 // their status perform the appropriate check(s). 6227 for (auto &It : AllocationInfos) { 6228 AllocationInfo &AI = *It.second; 6229 if (AI.Status == AllocationInfo::INVALID) 6230 continue; 6231 6232 if (Value *Align = getAllocAlignment(AI.CB, TLI)) { 6233 if (!getAPInt(A, *this, *Align)) { 6234 // Can't generate an alloca which respects the required alignment 6235 // on the allocation. 6236 LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB 6237 << "\n"); 6238 AI.Status = AllocationInfo::INVALID; 6239 Changed = ChangeStatus::CHANGED; 6240 continue; 6241 } 6242 } 6243 6244 if (MaxHeapToStackSize != -1) { 6245 Optional<APInt> Size = getSize(A, *this, AI); 6246 if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) { 6247 LLVM_DEBUG({ 6248 if (!Size.hasValue()) 6249 dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n"; 6250 else 6251 dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " 6252 << MaxHeapToStackSize << "\n"; 6253 }); 6254 6255 AI.Status = AllocationInfo::INVALID; 6256 Changed = ChangeStatus::CHANGED; 6257 continue; 6258 } 6259 } 6260 6261 switch (AI.Status) { 6262 case AllocationInfo::STACK_DUE_TO_USE: 6263 if (UsesCheck(AI)) 6264 continue; 6265 AI.Status = AllocationInfo::STACK_DUE_TO_FREE; 6266 LLVM_FALLTHROUGH; 6267 case AllocationInfo::STACK_DUE_TO_FREE: 6268 if (FreeCheck(AI)) 6269 continue; 6270 AI.Status = AllocationInfo::INVALID; 6271 Changed = ChangeStatus::CHANGED; 6272 continue; 6273 case AllocationInfo::INVALID: 6274 llvm_unreachable("Invalid allocations should never reach this point!"); 6275 }; 6276 } 6277 6278 return Changed; 6279 } 6280 6281 /// ----------------------- Privatizable Pointers ------------------------------ 6282 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { 6283 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) 6284 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} 6285 6286 ChangeStatus indicatePessimisticFixpoint() override { 6287 AAPrivatizablePtr::indicatePessimisticFixpoint(); 6288 PrivatizableType = nullptr; 6289 return ChangeStatus::CHANGED; 6290 } 6291 6292 /// Identify the type we can chose for a private copy of the underlying 6293 /// argument. None means it is not clear yet, nullptr means there is none. 6294 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; 6295 6296 /// Return a privatizable type that encloses both T0 and T1. 6297 /// TODO: This is merely a stub for now as we should manage a mapping as well. 6298 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { 6299 if (!T0.hasValue()) 6300 return T1; 6301 if (!T1.hasValue()) 6302 return T0; 6303 if (T0 == T1) 6304 return T0; 6305 return nullptr; 6306 } 6307 6308 Optional<Type *> getPrivatizableType() const override { 6309 return PrivatizableType; 6310 } 6311 6312 const std::string getAsStr() const override { 6313 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; 6314 } 6315 6316 protected: 6317 Optional<Type *> PrivatizableType; 6318 }; 6319 6320 // TODO: Do this for call site arguments (probably also other values) as well. 6321 6322 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { 6323 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) 6324 : AAPrivatizablePtrImpl(IRP, A) {} 6325 6326 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6327 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6328 // If this is a byval argument and we know all the call sites (so we can 6329 // rewrite them), there is no need to check them explicitly. 6330 bool AllCallSitesKnown; 6331 if (getIRPosition().hasAttr(Attribute::ByVal) && 6332 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, 6333 true, AllCallSitesKnown)) 6334 return getAssociatedValue().getType()->getPointerElementType(); 6335 6336 Optional<Type *> Ty; 6337 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 6338 6339 // Make sure the associated call site argument has the same type at all call 6340 // sites and it is an allocation we know is safe to privatize, for now that 6341 // means we only allow alloca instructions. 6342 // TODO: We can additionally analyze the accesses in the callee to create 6343 // the type from that information instead. That is a little more 6344 // involved and will be done in a follow up patch. 6345 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6346 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 6347 // Check if a coresponding argument was found or if it is one not 6348 // associated (which can happen for callback calls). 6349 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 6350 return false; 6351 6352 // Check that all call sites agree on a type. 6353 auto &PrivCSArgAA = 6354 A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED); 6355 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); 6356 6357 LLVM_DEBUG({ 6358 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; 6359 if (CSTy.hasValue() && CSTy.getValue()) 6360 CSTy.getValue()->print(dbgs()); 6361 else if (CSTy.hasValue()) 6362 dbgs() << "<nullptr>"; 6363 else 6364 dbgs() << "<none>"; 6365 }); 6366 6367 Ty = combineTypes(Ty, CSTy); 6368 6369 LLVM_DEBUG({ 6370 dbgs() << " : New Type: "; 6371 if (Ty.hasValue() && Ty.getValue()) 6372 Ty.getValue()->print(dbgs()); 6373 else if (Ty.hasValue()) 6374 dbgs() << "<nullptr>"; 6375 else 6376 dbgs() << "<none>"; 6377 dbgs() << "\n"; 6378 }); 6379 6380 return !Ty.hasValue() || Ty.getValue(); 6381 }; 6382 6383 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown)) 6384 return nullptr; 6385 return Ty; 6386 } 6387 6388 /// See AbstractAttribute::updateImpl(...). 6389 ChangeStatus updateImpl(Attributor &A) override { 6390 PrivatizableType = identifyPrivatizableType(A); 6391 if (!PrivatizableType.hasValue()) 6392 return ChangeStatus::UNCHANGED; 6393 if (!PrivatizableType.getValue()) 6394 return indicatePessimisticFixpoint(); 6395 6396 // The dependence is optional so we don't give up once we give up on the 6397 // alignment. 6398 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), 6399 DepClassTy::OPTIONAL); 6400 6401 // Avoid arguments with padding for now. 6402 if (!getIRPosition().hasAttr(Attribute::ByVal) && 6403 !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(), 6404 A.getInfoCache().getDL())) { 6405 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n"); 6406 return indicatePessimisticFixpoint(); 6407 } 6408 6409 // Collect the types that will replace the privatizable type in the function 6410 // signature. 6411 SmallVector<Type *, 16> ReplacementTypes; 6412 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 6413 6414 // Verify callee and caller agree on how the promoted argument would be 6415 // passed. 6416 Function &Fn = *getIRPosition().getAnchorScope(); 6417 const auto *TTI = 6418 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); 6419 if (!TTI) { 6420 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function " 6421 << Fn.getName() << "\n"); 6422 return indicatePessimisticFixpoint(); 6423 } 6424 6425 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6426 CallBase *CB = ACS.getInstruction(); 6427 return TTI->areTypesABICompatible( 6428 CB->getCaller(), CB->getCalledFunction(), ReplacementTypes); 6429 }; 6430 bool AllCallSitesKnown; 6431 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, 6432 AllCallSitesKnown)) { 6433 LLVM_DEBUG( 6434 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " 6435 << Fn.getName() << "\n"); 6436 return indicatePessimisticFixpoint(); 6437 } 6438 6439 // Register a rewrite of the argument. 6440 Argument *Arg = getAssociatedArgument(); 6441 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { 6442 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"); 6443 return indicatePessimisticFixpoint(); 6444 } 6445 6446 unsigned ArgNo = Arg->getArgNo(); 6447 6448 // Helper to check if for the given call site the associated argument is 6449 // passed to a callback where the privatization would be different. 6450 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { 6451 SmallVector<const Use *, 4> CallbackUses; 6452 AbstractCallSite::getCallbackUses(CB, CallbackUses); 6453 for (const Use *U : CallbackUses) { 6454 AbstractCallSite CBACS(U); 6455 assert(CBACS && CBACS.isCallbackCall()); 6456 for (Argument &CBArg : CBACS.getCalledFunction()->args()) { 6457 int CBArgNo = CBACS.getCallArgOperandNo(CBArg); 6458 6459 LLVM_DEBUG({ 6460 dbgs() 6461 << "[AAPrivatizablePtr] Argument " << *Arg 6462 << "check if can be privatized in the context of its parent (" 6463 << Arg->getParent()->getName() 6464 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6465 "callback (" 6466 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6467 << ")\n[AAPrivatizablePtr] " << CBArg << " : " 6468 << CBACS.getCallArgOperand(CBArg) << " vs " 6469 << CB.getArgOperand(ArgNo) << "\n" 6470 << "[AAPrivatizablePtr] " << CBArg << " : " 6471 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; 6472 }); 6473 6474 if (CBArgNo != int(ArgNo)) 6475 continue; 6476 const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6477 *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED); 6478 if (CBArgPrivAA.isValidState()) { 6479 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); 6480 if (!CBArgPrivTy.hasValue()) 6481 continue; 6482 if (CBArgPrivTy.getValue() == PrivatizableType) 6483 continue; 6484 } 6485 6486 LLVM_DEBUG({ 6487 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6488 << " cannot be privatized in the context of its parent (" 6489 << Arg->getParent()->getName() 6490 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6491 "callback (" 6492 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6493 << ").\n[AAPrivatizablePtr] for which the argument " 6494 "privatization is not compatible.\n"; 6495 }); 6496 return false; 6497 } 6498 } 6499 return true; 6500 }; 6501 6502 // Helper to check if for the given call site the associated argument is 6503 // passed to a direct call where the privatization would be different. 6504 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { 6505 CallBase *DC = cast<CallBase>(ACS.getInstruction()); 6506 int DCArgNo = ACS.getCallArgOperandNo(ArgNo); 6507 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() && 6508 "Expected a direct call operand for callback call operand"); 6509 6510 LLVM_DEBUG({ 6511 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6512 << " check if be privatized in the context of its parent (" 6513 << Arg->getParent()->getName() 6514 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6515 "direct call of (" 6516 << DCArgNo << "@" << DC->getCalledFunction()->getName() 6517 << ").\n"; 6518 }); 6519 6520 Function *DCCallee = DC->getCalledFunction(); 6521 if (unsigned(DCArgNo) < DCCallee->arg_size()) { 6522 const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6523 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)), 6524 DepClassTy::REQUIRED); 6525 if (DCArgPrivAA.isValidState()) { 6526 auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); 6527 if (!DCArgPrivTy.hasValue()) 6528 return true; 6529 if (DCArgPrivTy.getValue() == PrivatizableType) 6530 return true; 6531 } 6532 } 6533 6534 LLVM_DEBUG({ 6535 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6536 << " cannot be privatized in the context of its parent (" 6537 << Arg->getParent()->getName() 6538 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6539 "direct call of (" 6540 << ACS.getInstruction()->getCalledFunction()->getName() 6541 << ").\n[AAPrivatizablePtr] for which the argument " 6542 "privatization is not compatible.\n"; 6543 }); 6544 return false; 6545 }; 6546 6547 // Helper to check if the associated argument is used at the given abstract 6548 // call site in a way that is incompatible with the privatization assumed 6549 // here. 6550 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { 6551 if (ACS.isDirectCall()) 6552 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); 6553 if (ACS.isCallbackCall()) 6554 return IsCompatiblePrivArgOfDirectCS(ACS); 6555 return false; 6556 }; 6557 6558 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true, 6559 AllCallSitesKnown)) 6560 return indicatePessimisticFixpoint(); 6561 6562 return ChangeStatus::UNCHANGED; 6563 } 6564 6565 /// Given a type to private \p PrivType, collect the constituates (which are 6566 /// used) in \p ReplacementTypes. 6567 static void 6568 identifyReplacementTypes(Type *PrivType, 6569 SmallVectorImpl<Type *> &ReplacementTypes) { 6570 // TODO: For now we expand the privatization type to the fullest which can 6571 // lead to dead arguments that need to be removed later. 6572 assert(PrivType && "Expected privatizable type!"); 6573 6574 // Traverse the type, extract constituate types on the outermost level. 6575 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6576 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) 6577 ReplacementTypes.push_back(PrivStructType->getElementType(u)); 6578 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6579 ReplacementTypes.append(PrivArrayType->getNumElements(), 6580 PrivArrayType->getElementType()); 6581 } else { 6582 ReplacementTypes.push_back(PrivType); 6583 } 6584 } 6585 6586 /// Initialize \p Base according to the type \p PrivType at position \p IP. 6587 /// The values needed are taken from the arguments of \p F starting at 6588 /// position \p ArgNo. 6589 static void createInitialization(Type *PrivType, Value &Base, Function &F, 6590 unsigned ArgNo, Instruction &IP) { 6591 assert(PrivType && "Expected privatizable type!"); 6592 6593 IRBuilder<NoFolder> IRB(&IP); 6594 const DataLayout &DL = F.getParent()->getDataLayout(); 6595 6596 // Traverse the type, build GEPs and stores. 6597 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6598 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6599 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6600 Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo(); 6601 Value *Ptr = 6602 constructPointer(PointeeTy, PrivType, &Base, 6603 PrivStructLayout->getElementOffset(u), IRB, DL); 6604 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6605 } 6606 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6607 Type *PointeeTy = PrivArrayType->getElementType(); 6608 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6609 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6610 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6611 Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base, 6612 u * PointeeTySize, IRB, DL); 6613 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6614 } 6615 } else { 6616 new StoreInst(F.getArg(ArgNo), &Base, &IP); 6617 } 6618 } 6619 6620 /// Extract values from \p Base according to the type \p PrivType at the 6621 /// call position \p ACS. The values are appended to \p ReplacementValues. 6622 void createReplacementValues(Align Alignment, Type *PrivType, 6623 AbstractCallSite ACS, Value *Base, 6624 SmallVectorImpl<Value *> &ReplacementValues) { 6625 assert(Base && "Expected base value!"); 6626 assert(PrivType && "Expected privatizable type!"); 6627 Instruction *IP = ACS.getInstruction(); 6628 6629 IRBuilder<NoFolder> IRB(IP); 6630 const DataLayout &DL = IP->getModule()->getDataLayout(); 6631 6632 Type *PrivPtrType = PrivType->getPointerTo(); 6633 if (Base->getType() != PrivPtrType) 6634 Base = BitCastInst::CreateBitOrPointerCast(Base, PrivPtrType, "", 6635 ACS.getInstruction()); 6636 6637 // Traverse the type, build GEPs and loads. 6638 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6639 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6640 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6641 Type *PointeeTy = PrivStructType->getElementType(u); 6642 Value *Ptr = 6643 constructPointer(PointeeTy->getPointerTo(), PrivType, Base, 6644 PrivStructLayout->getElementOffset(u), IRB, DL); 6645 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6646 L->setAlignment(Alignment); 6647 ReplacementValues.push_back(L); 6648 } 6649 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6650 Type *PointeeTy = PrivArrayType->getElementType(); 6651 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6652 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6653 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6654 Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base, 6655 u * PointeeTySize, IRB, DL); 6656 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6657 L->setAlignment(Alignment); 6658 ReplacementValues.push_back(L); 6659 } 6660 } else { 6661 LoadInst *L = new LoadInst(PrivType, Base, "", IP); 6662 L->setAlignment(Alignment); 6663 ReplacementValues.push_back(L); 6664 } 6665 } 6666 6667 /// See AbstractAttribute::manifest(...) 6668 ChangeStatus manifest(Attributor &A) override { 6669 if (!PrivatizableType.hasValue()) 6670 return ChangeStatus::UNCHANGED; 6671 assert(PrivatizableType.getValue() && "Expected privatizable type!"); 6672 6673 // Collect all tail calls in the function as we cannot allow new allocas to 6674 // escape into tail recursion. 6675 // TODO: Be smarter about new allocas escaping into tail calls. 6676 SmallVector<CallInst *, 16> TailCalls; 6677 bool UsedAssumedInformation = false; 6678 if (!A.checkForAllInstructions( 6679 [&](Instruction &I) { 6680 CallInst &CI = cast<CallInst>(I); 6681 if (CI.isTailCall()) 6682 TailCalls.push_back(&CI); 6683 return true; 6684 }, 6685 *this, {Instruction::Call}, UsedAssumedInformation)) 6686 return ChangeStatus::UNCHANGED; 6687 6688 Argument *Arg = getAssociatedArgument(); 6689 // Query AAAlign attribute for alignment of associated argument to 6690 // determine the best alignment of loads. 6691 const auto &AlignAA = 6692 A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE); 6693 6694 // Callback to repair the associated function. A new alloca is placed at the 6695 // beginning and initialized with the values passed through arguments. The 6696 // new alloca replaces the use of the old pointer argument. 6697 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = 6698 [=](const Attributor::ArgumentReplacementInfo &ARI, 6699 Function &ReplacementFn, Function::arg_iterator ArgIt) { 6700 BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); 6701 Instruction *IP = &*EntryBB.getFirstInsertionPt(); 6702 Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0, 6703 Arg->getName() + ".priv", IP); 6704 createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn, 6705 ArgIt->getArgNo(), *IP); 6706 6707 if (AI->getType() != Arg->getType()) 6708 AI = 6709 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP); 6710 Arg->replaceAllUsesWith(AI); 6711 6712 for (CallInst *CI : TailCalls) 6713 CI->setTailCall(false); 6714 }; 6715 6716 // Callback to repair a call site of the associated function. The elements 6717 // of the privatizable type are loaded prior to the call and passed to the 6718 // new function version. 6719 Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = 6720 [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI, 6721 AbstractCallSite ACS, 6722 SmallVectorImpl<Value *> &NewArgOperands) { 6723 // When no alignment is specified for the load instruction, 6724 // natural alignment is assumed. 6725 createReplacementValues( 6726 assumeAligned(AlignAA.getAssumedAlign()), 6727 PrivatizableType.getValue(), ACS, 6728 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()), 6729 NewArgOperands); 6730 }; 6731 6732 // Collect the types that will replace the privatizable type in the function 6733 // signature. 6734 SmallVector<Type *, 16> ReplacementTypes; 6735 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 6736 6737 // Register a rewrite of the argument. 6738 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes, 6739 std::move(FnRepairCB), 6740 std::move(ACSRepairCB))) 6741 return ChangeStatus::CHANGED; 6742 return ChangeStatus::UNCHANGED; 6743 } 6744 6745 /// See AbstractAttribute::trackStatistics() 6746 void trackStatistics() const override { 6747 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr); 6748 } 6749 }; 6750 6751 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { 6752 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) 6753 : AAPrivatizablePtrImpl(IRP, A) {} 6754 6755 /// See AbstractAttribute::initialize(...). 6756 virtual void initialize(Attributor &A) override { 6757 // TODO: We can privatize more than arguments. 6758 indicatePessimisticFixpoint(); 6759 } 6760 6761 ChangeStatus updateImpl(Attributor &A) override { 6762 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" 6763 "updateImpl will not be called"); 6764 } 6765 6766 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6767 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6768 Value *Obj = getUnderlyingObject(&getAssociatedValue()); 6769 if (!Obj) { 6770 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n"); 6771 return nullptr; 6772 } 6773 6774 if (auto *AI = dyn_cast<AllocaInst>(Obj)) 6775 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 6776 if (CI->isOne()) 6777 return AI->getAllocatedType(); 6778 if (auto *Arg = dyn_cast<Argument>(Obj)) { 6779 auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>( 6780 *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED); 6781 if (PrivArgAA.isAssumedPrivatizablePtr()) 6782 return Obj->getType()->getPointerElementType(); 6783 } 6784 6785 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " 6786 "alloca nor privatizable argument: " 6787 << *Obj << "!\n"); 6788 return nullptr; 6789 } 6790 6791 /// See AbstractAttribute::trackStatistics() 6792 void trackStatistics() const override { 6793 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr); 6794 } 6795 }; 6796 6797 struct AAPrivatizablePtrCallSiteArgument final 6798 : public AAPrivatizablePtrFloating { 6799 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) 6800 : AAPrivatizablePtrFloating(IRP, A) {} 6801 6802 /// See AbstractAttribute::initialize(...). 6803 void initialize(Attributor &A) override { 6804 if (getIRPosition().hasAttr(Attribute::ByVal)) 6805 indicateOptimisticFixpoint(); 6806 } 6807 6808 /// See AbstractAttribute::updateImpl(...). 6809 ChangeStatus updateImpl(Attributor &A) override { 6810 PrivatizableType = identifyPrivatizableType(A); 6811 if (!PrivatizableType.hasValue()) 6812 return ChangeStatus::UNCHANGED; 6813 if (!PrivatizableType.getValue()) 6814 return indicatePessimisticFixpoint(); 6815 6816 const IRPosition &IRP = getIRPosition(); 6817 auto &NoCaptureAA = 6818 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED); 6819 if (!NoCaptureAA.isAssumedNoCapture()) { 6820 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n"); 6821 return indicatePessimisticFixpoint(); 6822 } 6823 6824 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED); 6825 if (!NoAliasAA.isAssumedNoAlias()) { 6826 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n"); 6827 return indicatePessimisticFixpoint(); 6828 } 6829 6830 const auto &MemBehaviorAA = 6831 A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED); 6832 if (!MemBehaviorAA.isAssumedReadOnly()) { 6833 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n"); 6834 return indicatePessimisticFixpoint(); 6835 } 6836 6837 return ChangeStatus::UNCHANGED; 6838 } 6839 6840 /// See AbstractAttribute::trackStatistics() 6841 void trackStatistics() const override { 6842 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr); 6843 } 6844 }; 6845 6846 struct AAPrivatizablePtrCallSiteReturned final 6847 : public AAPrivatizablePtrFloating { 6848 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) 6849 : AAPrivatizablePtrFloating(IRP, A) {} 6850 6851 /// See AbstractAttribute::initialize(...). 6852 void initialize(Attributor &A) override { 6853 // TODO: We can privatize more than arguments. 6854 indicatePessimisticFixpoint(); 6855 } 6856 6857 /// See AbstractAttribute::trackStatistics() 6858 void trackStatistics() const override { 6859 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr); 6860 } 6861 }; 6862 6863 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { 6864 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) 6865 : AAPrivatizablePtrFloating(IRP, A) {} 6866 6867 /// See AbstractAttribute::initialize(...). 6868 void initialize(Attributor &A) override { 6869 // TODO: We can privatize more than arguments. 6870 indicatePessimisticFixpoint(); 6871 } 6872 6873 /// See AbstractAttribute::trackStatistics() 6874 void trackStatistics() const override { 6875 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr); 6876 } 6877 }; 6878 6879 /// -------------------- Memory Behavior Attributes ---------------------------- 6880 /// Includes read-none, read-only, and write-only. 6881 /// ---------------------------------------------------------------------------- 6882 struct AAMemoryBehaviorImpl : public AAMemoryBehavior { 6883 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) 6884 : AAMemoryBehavior(IRP, A) {} 6885 6886 /// See AbstractAttribute::initialize(...). 6887 void initialize(Attributor &A) override { 6888 intersectAssumedBits(BEST_STATE); 6889 getKnownStateFromValue(getIRPosition(), getState()); 6890 AAMemoryBehavior::initialize(A); 6891 } 6892 6893 /// Return the memory behavior information encoded in the IR for \p IRP. 6894 static void getKnownStateFromValue(const IRPosition &IRP, 6895 BitIntegerState &State, 6896 bool IgnoreSubsumingPositions = false) { 6897 SmallVector<Attribute, 2> Attrs; 6898 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 6899 for (const Attribute &Attr : Attrs) { 6900 switch (Attr.getKindAsEnum()) { 6901 case Attribute::ReadNone: 6902 State.addKnownBits(NO_ACCESSES); 6903 break; 6904 case Attribute::ReadOnly: 6905 State.addKnownBits(NO_WRITES); 6906 break; 6907 case Attribute::WriteOnly: 6908 State.addKnownBits(NO_READS); 6909 break; 6910 default: 6911 llvm_unreachable("Unexpected attribute!"); 6912 } 6913 } 6914 6915 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) { 6916 if (!I->mayReadFromMemory()) 6917 State.addKnownBits(NO_READS); 6918 if (!I->mayWriteToMemory()) 6919 State.addKnownBits(NO_WRITES); 6920 } 6921 } 6922 6923 /// See AbstractAttribute::getDeducedAttributes(...). 6924 void getDeducedAttributes(LLVMContext &Ctx, 6925 SmallVectorImpl<Attribute> &Attrs) const override { 6926 assert(Attrs.size() == 0); 6927 if (isAssumedReadNone()) 6928 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 6929 else if (isAssumedReadOnly()) 6930 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly)); 6931 else if (isAssumedWriteOnly()) 6932 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly)); 6933 assert(Attrs.size() <= 1); 6934 } 6935 6936 /// See AbstractAttribute::manifest(...). 6937 ChangeStatus manifest(Attributor &A) override { 6938 if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true)) 6939 return ChangeStatus::UNCHANGED; 6940 6941 const IRPosition &IRP = getIRPosition(); 6942 6943 // Check if we would improve the existing attributes first. 6944 SmallVector<Attribute, 4> DeducedAttrs; 6945 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 6946 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 6947 return IRP.hasAttr(Attr.getKindAsEnum(), 6948 /* IgnoreSubsumingPositions */ true); 6949 })) 6950 return ChangeStatus::UNCHANGED; 6951 6952 // Clear existing attributes. 6953 IRP.removeAttrs(AttrKinds); 6954 6955 // Use the generic manifest method. 6956 return IRAttribute::manifest(A); 6957 } 6958 6959 /// See AbstractState::getAsStr(). 6960 const std::string getAsStr() const override { 6961 if (isAssumedReadNone()) 6962 return "readnone"; 6963 if (isAssumedReadOnly()) 6964 return "readonly"; 6965 if (isAssumedWriteOnly()) 6966 return "writeonly"; 6967 return "may-read/write"; 6968 } 6969 6970 /// The set of IR attributes AAMemoryBehavior deals with. 6971 static const Attribute::AttrKind AttrKinds[3]; 6972 }; 6973 6974 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { 6975 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; 6976 6977 /// Memory behavior attribute for a floating value. 6978 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { 6979 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) 6980 : AAMemoryBehaviorImpl(IRP, A) {} 6981 6982 /// See AbstractAttribute::updateImpl(...). 6983 ChangeStatus updateImpl(Attributor &A) override; 6984 6985 /// See AbstractAttribute::trackStatistics() 6986 void trackStatistics() const override { 6987 if (isAssumedReadNone()) 6988 STATS_DECLTRACK_FLOATING_ATTR(readnone) 6989 else if (isAssumedReadOnly()) 6990 STATS_DECLTRACK_FLOATING_ATTR(readonly) 6991 else if (isAssumedWriteOnly()) 6992 STATS_DECLTRACK_FLOATING_ATTR(writeonly) 6993 } 6994 6995 private: 6996 /// Return true if users of \p UserI might access the underlying 6997 /// variable/location described by \p U and should therefore be analyzed. 6998 bool followUsersOfUseIn(Attributor &A, const Use &U, 6999 const Instruction *UserI); 7000 7001 /// Update the state according to the effect of use \p U in \p UserI. 7002 void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI); 7003 }; 7004 7005 /// Memory behavior attribute for function argument. 7006 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { 7007 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) 7008 : AAMemoryBehaviorFloating(IRP, A) {} 7009 7010 /// See AbstractAttribute::initialize(...). 7011 void initialize(Attributor &A) override { 7012 intersectAssumedBits(BEST_STATE); 7013 const IRPosition &IRP = getIRPosition(); 7014 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we 7015 // can query it when we use has/getAttr. That would allow us to reuse the 7016 // initialize of the base class here. 7017 bool HasByVal = 7018 IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true); 7019 getKnownStateFromValue(IRP, getState(), 7020 /* IgnoreSubsumingPositions */ HasByVal); 7021 7022 // Initialize the use vector with all direct uses of the associated value. 7023 Argument *Arg = getAssociatedArgument(); 7024 if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) 7025 indicatePessimisticFixpoint(); 7026 } 7027 7028 ChangeStatus manifest(Attributor &A) override { 7029 // TODO: Pointer arguments are not supported on vectors of pointers yet. 7030 if (!getAssociatedValue().getType()->isPointerTy()) 7031 return ChangeStatus::UNCHANGED; 7032 7033 // TODO: From readattrs.ll: "inalloca parameters are always 7034 // considered written" 7035 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) { 7036 removeKnownBits(NO_WRITES); 7037 removeAssumedBits(NO_WRITES); 7038 } 7039 return AAMemoryBehaviorFloating::manifest(A); 7040 } 7041 7042 /// See AbstractAttribute::trackStatistics() 7043 void trackStatistics() const override { 7044 if (isAssumedReadNone()) 7045 STATS_DECLTRACK_ARG_ATTR(readnone) 7046 else if (isAssumedReadOnly()) 7047 STATS_DECLTRACK_ARG_ATTR(readonly) 7048 else if (isAssumedWriteOnly()) 7049 STATS_DECLTRACK_ARG_ATTR(writeonly) 7050 } 7051 }; 7052 7053 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { 7054 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) 7055 : AAMemoryBehaviorArgument(IRP, A) {} 7056 7057 /// See AbstractAttribute::initialize(...). 7058 void initialize(Attributor &A) override { 7059 // If we don't have an associated attribute this is either a variadic call 7060 // or an indirect call, either way, nothing to do here. 7061 Argument *Arg = getAssociatedArgument(); 7062 if (!Arg) { 7063 indicatePessimisticFixpoint(); 7064 return; 7065 } 7066 if (Arg->hasByValAttr()) { 7067 addKnownBits(NO_WRITES); 7068 removeKnownBits(NO_READS); 7069 removeAssumedBits(NO_READS); 7070 } 7071 AAMemoryBehaviorArgument::initialize(A); 7072 if (getAssociatedFunction()->isDeclaration()) 7073 indicatePessimisticFixpoint(); 7074 } 7075 7076 /// See AbstractAttribute::updateImpl(...). 7077 ChangeStatus updateImpl(Attributor &A) override { 7078 // TODO: Once we have call site specific value information we can provide 7079 // call site specific liveness liveness information and then it makes 7080 // sense to specialize attributes for call sites arguments instead of 7081 // redirecting requests to the callee argument. 7082 Argument *Arg = getAssociatedArgument(); 7083 const IRPosition &ArgPos = IRPosition::argument(*Arg); 7084 auto &ArgAA = 7085 A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED); 7086 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 7087 } 7088 7089 /// See AbstractAttribute::trackStatistics() 7090 void trackStatistics() const override { 7091 if (isAssumedReadNone()) 7092 STATS_DECLTRACK_CSARG_ATTR(readnone) 7093 else if (isAssumedReadOnly()) 7094 STATS_DECLTRACK_CSARG_ATTR(readonly) 7095 else if (isAssumedWriteOnly()) 7096 STATS_DECLTRACK_CSARG_ATTR(writeonly) 7097 } 7098 }; 7099 7100 /// Memory behavior attribute for a call site return position. 7101 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { 7102 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) 7103 : AAMemoryBehaviorFloating(IRP, A) {} 7104 7105 /// See AbstractAttribute::initialize(...). 7106 void initialize(Attributor &A) override { 7107 AAMemoryBehaviorImpl::initialize(A); 7108 Function *F = getAssociatedFunction(); 7109 if (!F || F->isDeclaration()) 7110 indicatePessimisticFixpoint(); 7111 } 7112 7113 /// See AbstractAttribute::manifest(...). 7114 ChangeStatus manifest(Attributor &A) override { 7115 // We do not annotate returned values. 7116 return ChangeStatus::UNCHANGED; 7117 } 7118 7119 /// See AbstractAttribute::trackStatistics() 7120 void trackStatistics() const override {} 7121 }; 7122 7123 /// An AA to represent the memory behavior function attributes. 7124 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { 7125 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) 7126 : AAMemoryBehaviorImpl(IRP, A) {} 7127 7128 /// See AbstractAttribute::updateImpl(Attributor &A). 7129 virtual ChangeStatus updateImpl(Attributor &A) override; 7130 7131 /// See AbstractAttribute::manifest(...). 7132 ChangeStatus manifest(Attributor &A) override { 7133 Function &F = cast<Function>(getAnchorValue()); 7134 if (isAssumedReadNone()) { 7135 F.removeFnAttr(Attribute::ArgMemOnly); 7136 F.removeFnAttr(Attribute::InaccessibleMemOnly); 7137 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); 7138 } 7139 return AAMemoryBehaviorImpl::manifest(A); 7140 } 7141 7142 /// See AbstractAttribute::trackStatistics() 7143 void trackStatistics() const override { 7144 if (isAssumedReadNone()) 7145 STATS_DECLTRACK_FN_ATTR(readnone) 7146 else if (isAssumedReadOnly()) 7147 STATS_DECLTRACK_FN_ATTR(readonly) 7148 else if (isAssumedWriteOnly()) 7149 STATS_DECLTRACK_FN_ATTR(writeonly) 7150 } 7151 }; 7152 7153 /// AAMemoryBehavior attribute for call sites. 7154 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl { 7155 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) 7156 : AAMemoryBehaviorImpl(IRP, A) {} 7157 7158 /// See AbstractAttribute::initialize(...). 7159 void initialize(Attributor &A) override { 7160 AAMemoryBehaviorImpl::initialize(A); 7161 Function *F = getAssociatedFunction(); 7162 if (!F || F->isDeclaration()) 7163 indicatePessimisticFixpoint(); 7164 } 7165 7166 /// See AbstractAttribute::updateImpl(...). 7167 ChangeStatus updateImpl(Attributor &A) override { 7168 // TODO: Once we have call site specific value information we can provide 7169 // call site specific liveness liveness information and then it makes 7170 // sense to specialize attributes for call sites arguments instead of 7171 // redirecting requests to the callee argument. 7172 Function *F = getAssociatedFunction(); 7173 const IRPosition &FnPos = IRPosition::function(*F); 7174 auto &FnAA = 7175 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED); 7176 return clampStateAndIndicateChange(getState(), FnAA.getState()); 7177 } 7178 7179 /// See AbstractAttribute::trackStatistics() 7180 void trackStatistics() const override { 7181 if (isAssumedReadNone()) 7182 STATS_DECLTRACK_CS_ATTR(readnone) 7183 else if (isAssumedReadOnly()) 7184 STATS_DECLTRACK_CS_ATTR(readonly) 7185 else if (isAssumedWriteOnly()) 7186 STATS_DECLTRACK_CS_ATTR(writeonly) 7187 } 7188 }; 7189 7190 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { 7191 7192 // The current assumed state used to determine a change. 7193 auto AssumedState = getAssumed(); 7194 7195 auto CheckRWInst = [&](Instruction &I) { 7196 // If the instruction has an own memory behavior state, use it to restrict 7197 // the local state. No further analysis is required as the other memory 7198 // state is as optimistic as it gets. 7199 if (const auto *CB = dyn_cast<CallBase>(&I)) { 7200 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 7201 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 7202 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7203 return !isAtFixpoint(); 7204 } 7205 7206 // Remove access kind modifiers if necessary. 7207 if (I.mayReadFromMemory()) 7208 removeAssumedBits(NO_READS); 7209 if (I.mayWriteToMemory()) 7210 removeAssumedBits(NO_WRITES); 7211 return !isAtFixpoint(); 7212 }; 7213 7214 bool UsedAssumedInformation = false; 7215 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 7216 UsedAssumedInformation)) 7217 return indicatePessimisticFixpoint(); 7218 7219 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7220 : ChangeStatus::UNCHANGED; 7221 } 7222 7223 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { 7224 7225 const IRPosition &IRP = getIRPosition(); 7226 const IRPosition &FnPos = IRPosition::function_scope(IRP); 7227 AAMemoryBehavior::StateType &S = getState(); 7228 7229 // First, check the function scope. We take the known information and we avoid 7230 // work if the assumed information implies the current assumed information for 7231 // this attribute. This is a valid for all but byval arguments. 7232 Argument *Arg = IRP.getAssociatedArgument(); 7233 AAMemoryBehavior::base_t FnMemAssumedState = 7234 AAMemoryBehavior::StateType::getWorstState(); 7235 if (!Arg || !Arg->hasByValAttr()) { 7236 const auto &FnMemAA = 7237 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL); 7238 FnMemAssumedState = FnMemAA.getAssumed(); 7239 S.addKnownBits(FnMemAA.getKnown()); 7240 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed()) 7241 return ChangeStatus::UNCHANGED; 7242 } 7243 7244 // The current assumed state used to determine a change. 7245 auto AssumedState = S.getAssumed(); 7246 7247 // Make sure the value is not captured (except through "return"), if 7248 // it is, any information derived would be irrelevant anyway as we cannot 7249 // check the potential aliases introduced by the capture. However, no need 7250 // to fall back to anythign less optimistic than the function state. 7251 const auto &ArgNoCaptureAA = 7252 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL); 7253 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 7254 S.intersectAssumedBits(FnMemAssumedState); 7255 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7256 : ChangeStatus::UNCHANGED; 7257 } 7258 7259 // Visit and expand uses until all are analyzed or a fixpoint is reached. 7260 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 7261 Instruction *UserI = cast<Instruction>(U.getUser()); 7262 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI 7263 << " \n"); 7264 7265 // Droppable users, e.g., llvm::assume does not actually perform any action. 7266 if (UserI->isDroppable()) 7267 return true; 7268 7269 // Check if the users of UserI should also be visited. 7270 Follow = followUsersOfUseIn(A, U, UserI); 7271 7272 // If UserI might touch memory we analyze the use in detail. 7273 if (UserI->mayReadOrWriteMemory()) 7274 analyzeUseIn(A, U, UserI); 7275 7276 return !isAtFixpoint(); 7277 }; 7278 7279 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) 7280 return indicatePessimisticFixpoint(); 7281 7282 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7283 : ChangeStatus::UNCHANGED; 7284 } 7285 7286 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U, 7287 const Instruction *UserI) { 7288 // The loaded value is unrelated to the pointer argument, no need to 7289 // follow the users of the load. 7290 if (isa<LoadInst>(UserI)) 7291 return false; 7292 7293 // By default we follow all uses assuming UserI might leak information on U, 7294 // we have special handling for call sites operands though. 7295 const auto *CB = dyn_cast<CallBase>(UserI); 7296 if (!CB || !CB->isArgOperand(&U)) 7297 return true; 7298 7299 // If the use is a call argument known not to be captured, the users of 7300 // the call do not need to be visited because they have to be unrelated to 7301 // the input. Note that this check is not trivial even though we disallow 7302 // general capturing of the underlying argument. The reason is that the 7303 // call might the argument "through return", which we allow and for which we 7304 // need to check call users. 7305 if (U.get()->getType()->isPointerTy()) { 7306 unsigned ArgNo = CB->getArgOperandNo(&U); 7307 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 7308 *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL); 7309 return !ArgNoCaptureAA.isAssumedNoCapture(); 7310 } 7311 7312 return true; 7313 } 7314 7315 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U, 7316 const Instruction *UserI) { 7317 assert(UserI->mayReadOrWriteMemory()); 7318 7319 switch (UserI->getOpcode()) { 7320 default: 7321 // TODO: Handle all atomics and other side-effect operations we know of. 7322 break; 7323 case Instruction::Load: 7324 // Loads cause the NO_READS property to disappear. 7325 removeAssumedBits(NO_READS); 7326 return; 7327 7328 case Instruction::Store: 7329 // Stores cause the NO_WRITES property to disappear if the use is the 7330 // pointer operand. Note that while capturing was taken care of somewhere 7331 // else we need to deal with stores of the value that is not looked through. 7332 if (cast<StoreInst>(UserI)->getPointerOperand() == U.get()) 7333 removeAssumedBits(NO_WRITES); 7334 else 7335 indicatePessimisticFixpoint(); 7336 return; 7337 7338 case Instruction::Call: 7339 case Instruction::CallBr: 7340 case Instruction::Invoke: { 7341 // For call sites we look at the argument memory behavior attribute (this 7342 // could be recursive!) in order to restrict our own state. 7343 const auto *CB = cast<CallBase>(UserI); 7344 7345 // Give up on operand bundles. 7346 if (CB->isBundleOperand(&U)) { 7347 indicatePessimisticFixpoint(); 7348 return; 7349 } 7350 7351 // Calling a function does read the function pointer, maybe write it if the 7352 // function is self-modifying. 7353 if (CB->isCallee(&U)) { 7354 removeAssumedBits(NO_READS); 7355 break; 7356 } 7357 7358 // Adjust the possible access behavior based on the information on the 7359 // argument. 7360 IRPosition Pos; 7361 if (U.get()->getType()->isPointerTy()) 7362 Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)); 7363 else 7364 Pos = IRPosition::callsite_function(*CB); 7365 const auto &MemBehaviorAA = 7366 A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL); 7367 // "assumed" has at most the same bits as the MemBehaviorAA assumed 7368 // and at least "known". 7369 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7370 return; 7371 } 7372 }; 7373 7374 // Generally, look at the "may-properties" and adjust the assumed state if we 7375 // did not trigger special handling before. 7376 if (UserI->mayReadFromMemory()) 7377 removeAssumedBits(NO_READS); 7378 if (UserI->mayWriteToMemory()) 7379 removeAssumedBits(NO_WRITES); 7380 } 7381 } // namespace 7382 7383 /// -------------------- Memory Locations Attributes --------------------------- 7384 /// Includes read-none, argmemonly, inaccessiblememonly, 7385 /// inaccessiblememorargmemonly 7386 /// ---------------------------------------------------------------------------- 7387 7388 std::string AAMemoryLocation::getMemoryLocationsAsStr( 7389 AAMemoryLocation::MemoryLocationsKind MLK) { 7390 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) 7391 return "all memory"; 7392 if (MLK == AAMemoryLocation::NO_LOCATIONS) 7393 return "no memory"; 7394 std::string S = "memory:"; 7395 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) 7396 S += "stack,"; 7397 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) 7398 S += "constant,"; 7399 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) 7400 S += "internal global,"; 7401 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) 7402 S += "external global,"; 7403 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) 7404 S += "argument,"; 7405 if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) 7406 S += "inaccessible,"; 7407 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) 7408 S += "malloced,"; 7409 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) 7410 S += "unknown,"; 7411 S.pop_back(); 7412 return S; 7413 } 7414 7415 namespace { 7416 struct AAMemoryLocationImpl : public AAMemoryLocation { 7417 7418 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) 7419 : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { 7420 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7421 AccessKind2Accesses[u] = nullptr; 7422 } 7423 7424 ~AAMemoryLocationImpl() { 7425 // The AccessSets are allocated via a BumpPtrAllocator, we call 7426 // the destructor manually. 7427 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7428 if (AccessKind2Accesses[u]) 7429 AccessKind2Accesses[u]->~AccessSet(); 7430 } 7431 7432 /// See AbstractAttribute::initialize(...). 7433 void initialize(Attributor &A) override { 7434 intersectAssumedBits(BEST_STATE); 7435 getKnownStateFromValue(A, getIRPosition(), getState()); 7436 AAMemoryLocation::initialize(A); 7437 } 7438 7439 /// Return the memory behavior information encoded in the IR for \p IRP. 7440 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, 7441 BitIntegerState &State, 7442 bool IgnoreSubsumingPositions = false) { 7443 // For internal functions we ignore `argmemonly` and 7444 // `inaccessiblememorargmemonly` as we might break it via interprocedural 7445 // constant propagation. It is unclear if this is the best way but it is 7446 // unlikely this will cause real performance problems. If we are deriving 7447 // attributes for the anchor function we even remove the attribute in 7448 // addition to ignoring it. 7449 bool UseArgMemOnly = true; 7450 Function *AnchorFn = IRP.getAnchorScope(); 7451 if (AnchorFn && A.isRunOn(*AnchorFn)) 7452 UseArgMemOnly = !AnchorFn->hasLocalLinkage(); 7453 7454 SmallVector<Attribute, 2> Attrs; 7455 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 7456 for (const Attribute &Attr : Attrs) { 7457 switch (Attr.getKindAsEnum()) { 7458 case Attribute::ReadNone: 7459 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM); 7460 break; 7461 case Attribute::InaccessibleMemOnly: 7462 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 7463 break; 7464 case Attribute::ArgMemOnly: 7465 if (UseArgMemOnly) 7466 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true)); 7467 else 7468 IRP.removeAttrs({Attribute::ArgMemOnly}); 7469 break; 7470 case Attribute::InaccessibleMemOrArgMemOnly: 7471 if (UseArgMemOnly) 7472 State.addKnownBits(inverseLocation( 7473 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 7474 else 7475 IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly}); 7476 break; 7477 default: 7478 llvm_unreachable("Unexpected attribute!"); 7479 } 7480 } 7481 } 7482 7483 /// See AbstractAttribute::getDeducedAttributes(...). 7484 void getDeducedAttributes(LLVMContext &Ctx, 7485 SmallVectorImpl<Attribute> &Attrs) const override { 7486 assert(Attrs.size() == 0); 7487 if (isAssumedReadNone()) { 7488 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 7489 } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { 7490 if (isAssumedInaccessibleMemOnly()) 7491 Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly)); 7492 else if (isAssumedArgMemOnly()) 7493 Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly)); 7494 else if (isAssumedInaccessibleOrArgMemOnly()) 7495 Attrs.push_back( 7496 Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly)); 7497 } 7498 assert(Attrs.size() <= 1); 7499 } 7500 7501 /// See AbstractAttribute::manifest(...). 7502 ChangeStatus manifest(Attributor &A) override { 7503 const IRPosition &IRP = getIRPosition(); 7504 7505 // Check if we would improve the existing attributes first. 7506 SmallVector<Attribute, 4> DeducedAttrs; 7507 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 7508 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 7509 return IRP.hasAttr(Attr.getKindAsEnum(), 7510 /* IgnoreSubsumingPositions */ true); 7511 })) 7512 return ChangeStatus::UNCHANGED; 7513 7514 // Clear existing attributes. 7515 IRP.removeAttrs(AttrKinds); 7516 if (isAssumedReadNone()) 7517 IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds); 7518 7519 // Use the generic manifest method. 7520 return IRAttribute::manifest(A); 7521 } 7522 7523 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). 7524 bool checkForAllAccessesToMemoryKind( 7525 function_ref<bool(const Instruction *, const Value *, AccessKind, 7526 MemoryLocationsKind)> 7527 Pred, 7528 MemoryLocationsKind RequestedMLK) const override { 7529 if (!isValidState()) 7530 return false; 7531 7532 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); 7533 if (AssumedMLK == NO_LOCATIONS) 7534 return true; 7535 7536 unsigned Idx = 0; 7537 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; 7538 CurMLK *= 2, ++Idx) { 7539 if (CurMLK & RequestedMLK) 7540 continue; 7541 7542 if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) 7543 for (const AccessInfo &AI : *Accesses) 7544 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) 7545 return false; 7546 } 7547 7548 return true; 7549 } 7550 7551 ChangeStatus indicatePessimisticFixpoint() override { 7552 // If we give up and indicate a pessimistic fixpoint this instruction will 7553 // become an access for all potential access kinds: 7554 // TODO: Add pointers for argmemonly and globals to improve the results of 7555 // checkForAllAccessesToMemoryKind. 7556 bool Changed = false; 7557 MemoryLocationsKind KnownMLK = getKnown(); 7558 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 7559 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) 7560 if (!(CurMLK & KnownMLK)) 7561 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed, 7562 getAccessKindFromInst(I)); 7563 return AAMemoryLocation::indicatePessimisticFixpoint(); 7564 } 7565 7566 protected: 7567 /// Helper struct to tie together an instruction that has a read or write 7568 /// effect with the pointer it accesses (if any). 7569 struct AccessInfo { 7570 7571 /// The instruction that caused the access. 7572 const Instruction *I; 7573 7574 /// The base pointer that is accessed, or null if unknown. 7575 const Value *Ptr; 7576 7577 /// The kind of access (read/write/read+write). 7578 AccessKind Kind; 7579 7580 bool operator==(const AccessInfo &RHS) const { 7581 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; 7582 } 7583 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { 7584 if (LHS.I != RHS.I) 7585 return LHS.I < RHS.I; 7586 if (LHS.Ptr != RHS.Ptr) 7587 return LHS.Ptr < RHS.Ptr; 7588 if (LHS.Kind != RHS.Kind) 7589 return LHS.Kind < RHS.Kind; 7590 return false; 7591 } 7592 }; 7593 7594 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the 7595 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. 7596 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; 7597 AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()]; 7598 7599 /// Categorize the pointer arguments of CB that might access memory in 7600 /// AccessedLoc and update the state and access map accordingly. 7601 void 7602 categorizeArgumentPointerLocations(Attributor &A, CallBase &CB, 7603 AAMemoryLocation::StateType &AccessedLocs, 7604 bool &Changed); 7605 7606 /// Return the kind(s) of location that may be accessed by \p V. 7607 AAMemoryLocation::MemoryLocationsKind 7608 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); 7609 7610 /// Return the access kind as determined by \p I. 7611 AccessKind getAccessKindFromInst(const Instruction *I) { 7612 AccessKind AK = READ_WRITE; 7613 if (I) { 7614 AK = I->mayReadFromMemory() ? READ : NONE; 7615 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); 7616 } 7617 return AK; 7618 } 7619 7620 /// Update the state \p State and the AccessKind2Accesses given that \p I is 7621 /// an access of kind \p AK to a \p MLK memory location with the access 7622 /// pointer \p Ptr. 7623 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, 7624 MemoryLocationsKind MLK, const Instruction *I, 7625 const Value *Ptr, bool &Changed, 7626 AccessKind AK = READ_WRITE) { 7627 7628 assert(isPowerOf2_32(MLK) && "Expected a single location set!"); 7629 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)]; 7630 if (!Accesses) 7631 Accesses = new (Allocator) AccessSet(); 7632 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second; 7633 State.removeAssumedBits(MLK); 7634 } 7635 7636 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or 7637 /// arguments, and update the state and access map accordingly. 7638 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, 7639 AAMemoryLocation::StateType &State, bool &Changed); 7640 7641 /// Used to allocate access sets. 7642 BumpPtrAllocator &Allocator; 7643 7644 /// The set of IR attributes AAMemoryLocation deals with. 7645 static const Attribute::AttrKind AttrKinds[4]; 7646 }; 7647 7648 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = { 7649 Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly, 7650 Attribute::InaccessibleMemOrArgMemOnly}; 7651 7652 void AAMemoryLocationImpl::categorizePtrValue( 7653 Attributor &A, const Instruction &I, const Value &Ptr, 7654 AAMemoryLocation::StateType &State, bool &Changed) { 7655 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for " 7656 << Ptr << " [" 7657 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n"); 7658 7659 SmallVector<Value *, 8> Objects; 7660 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I)) { 7661 LLVM_DEBUG( 7662 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"); 7663 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed, 7664 getAccessKindFromInst(&I)); 7665 return; 7666 } 7667 7668 for (Value *Obj : Objects) { 7669 // TODO: recognize the TBAA used for constant accesses. 7670 MemoryLocationsKind MLK = NO_LOCATIONS; 7671 if (isa<UndefValue>(Obj)) 7672 continue; 7673 if (isa<Argument>(Obj)) { 7674 // TODO: For now we do not treat byval arguments as local copies performed 7675 // on the call edge, though, we should. To make that happen we need to 7676 // teach various passes, e.g., DSE, about the copy effect of a byval. That 7677 // would also allow us to mark functions only accessing byval arguments as 7678 // readnone again, atguably their acceses have no effect outside of the 7679 // function, like accesses to allocas. 7680 MLK = NO_ARGUMENT_MEM; 7681 } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) { 7682 // Reading constant memory is not treated as a read "effect" by the 7683 // function attr pass so we won't neither. Constants defined by TBAA are 7684 // similar. (We know we do not write it because it is constant.) 7685 if (auto *GVar = dyn_cast<GlobalVariable>(GV)) 7686 if (GVar->isConstant()) 7687 continue; 7688 7689 if (GV->hasLocalLinkage()) 7690 MLK = NO_GLOBAL_INTERNAL_MEM; 7691 else 7692 MLK = NO_GLOBAL_EXTERNAL_MEM; 7693 } else if (isa<ConstantPointerNull>(Obj) && 7694 !NullPointerIsDefined(getAssociatedFunction(), 7695 Ptr.getType()->getPointerAddressSpace())) { 7696 continue; 7697 } else if (isa<AllocaInst>(Obj)) { 7698 MLK = NO_LOCAL_MEM; 7699 } else if (const auto *CB = dyn_cast<CallBase>(Obj)) { 7700 const auto &NoAliasAA = A.getAAFor<AANoAlias>( 7701 *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL); 7702 if (NoAliasAA.isAssumedNoAlias()) 7703 MLK = NO_MALLOCED_MEM; 7704 else 7705 MLK = NO_UNKOWN_MEM; 7706 } else { 7707 MLK = NO_UNKOWN_MEM; 7708 } 7709 7710 assert(MLK != NO_LOCATIONS && "No location specified!"); 7711 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: " 7712 << *Obj << " -> " << getMemoryLocationsAsStr(MLK) 7713 << "\n"); 7714 updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed, 7715 getAccessKindFromInst(&I)); 7716 } 7717 7718 LLVM_DEBUG( 7719 dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: " 7720 << getMemoryLocationsAsStr(State.getAssumed()) << "\n"); 7721 } 7722 7723 void AAMemoryLocationImpl::categorizeArgumentPointerLocations( 7724 Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs, 7725 bool &Changed) { 7726 for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) { 7727 7728 // Skip non-pointer arguments. 7729 const Value *ArgOp = CB.getArgOperand(ArgNo); 7730 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 7731 continue; 7732 7733 // Skip readnone arguments. 7734 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo); 7735 const auto &ArgOpMemLocationAA = 7736 A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL); 7737 7738 if (ArgOpMemLocationAA.isAssumedReadNone()) 7739 continue; 7740 7741 // Categorize potentially accessed pointer arguments as if there was an 7742 // access instruction with them as pointer. 7743 categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed); 7744 } 7745 } 7746 7747 AAMemoryLocation::MemoryLocationsKind 7748 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, 7749 bool &Changed) { 7750 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for " 7751 << I << "\n"); 7752 7753 AAMemoryLocation::StateType AccessedLocs; 7754 AccessedLocs.intersectAssumedBits(NO_LOCATIONS); 7755 7756 if (auto *CB = dyn_cast<CallBase>(&I)) { 7757 7758 // First check if we assume any memory is access is visible. 7759 const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>( 7760 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 7761 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I 7762 << " [" << CBMemLocationAA << "]\n"); 7763 7764 if (CBMemLocationAA.isAssumedReadNone()) 7765 return NO_LOCATIONS; 7766 7767 if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) { 7768 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr, 7769 Changed, getAccessKindFromInst(&I)); 7770 return AccessedLocs.getAssumed(); 7771 } 7772 7773 uint32_t CBAssumedNotAccessedLocs = 7774 CBMemLocationAA.getAssumedNotAccessedLocation(); 7775 7776 // Set the argmemonly and global bit as we handle them separately below. 7777 uint32_t CBAssumedNotAccessedLocsNoArgMem = 7778 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; 7779 7780 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { 7781 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) 7782 continue; 7783 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed, 7784 getAccessKindFromInst(&I)); 7785 } 7786 7787 // Now handle global memory if it might be accessed. This is slightly tricky 7788 // as NO_GLOBAL_MEM has multiple bits set. 7789 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); 7790 if (HasGlobalAccesses) { 7791 auto AccessPred = [&](const Instruction *, const Value *Ptr, 7792 AccessKind Kind, MemoryLocationsKind MLK) { 7793 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed, 7794 getAccessKindFromInst(&I)); 7795 return true; 7796 }; 7797 if (!CBMemLocationAA.checkForAllAccessesToMemoryKind( 7798 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false))) 7799 return AccessedLocs.getWorstState(); 7800 } 7801 7802 LLVM_DEBUG( 7803 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " 7804 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 7805 7806 // Now handle argument memory if it might be accessed. 7807 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); 7808 if (HasArgAccesses) 7809 categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed); 7810 7811 LLVM_DEBUG( 7812 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " 7813 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 7814 7815 return AccessedLocs.getAssumed(); 7816 } 7817 7818 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) { 7819 LLVM_DEBUG( 7820 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " 7821 << I << " [" << *Ptr << "]\n"); 7822 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed); 7823 return AccessedLocs.getAssumed(); 7824 } 7825 7826 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " 7827 << I << "\n"); 7828 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed, 7829 getAccessKindFromInst(&I)); 7830 return AccessedLocs.getAssumed(); 7831 } 7832 7833 /// An AA to represent the memory behavior function attributes. 7834 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { 7835 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) 7836 : AAMemoryLocationImpl(IRP, A) {} 7837 7838 /// See AbstractAttribute::updateImpl(Attributor &A). 7839 virtual ChangeStatus updateImpl(Attributor &A) override { 7840 7841 const auto &MemBehaviorAA = 7842 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 7843 if (MemBehaviorAA.isAssumedReadNone()) { 7844 if (MemBehaviorAA.isKnownReadNone()) 7845 return indicateOptimisticFixpoint(); 7846 assert(isAssumedReadNone() && 7847 "AAMemoryLocation was not read-none but AAMemoryBehavior was!"); 7848 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 7849 return ChangeStatus::UNCHANGED; 7850 } 7851 7852 // The current assumed state used to determine a change. 7853 auto AssumedState = getAssumed(); 7854 bool Changed = false; 7855 7856 auto CheckRWInst = [&](Instruction &I) { 7857 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); 7858 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I 7859 << ": " << getMemoryLocationsAsStr(MLK) << "\n"); 7860 removeAssumedBits(inverseLocation(MLK, false, false)); 7861 // Stop once only the valid bit set in the *not assumed location*, thus 7862 // once we don't actually exclude any memory locations in the state. 7863 return getAssumedNotAccessedLocation() != VALID_STATE; 7864 }; 7865 7866 bool UsedAssumedInformation = false; 7867 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 7868 UsedAssumedInformation)) 7869 return indicatePessimisticFixpoint(); 7870 7871 Changed |= AssumedState != getAssumed(); 7872 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 7873 } 7874 7875 /// See AbstractAttribute::trackStatistics() 7876 void trackStatistics() const override { 7877 if (isAssumedReadNone()) 7878 STATS_DECLTRACK_FN_ATTR(readnone) 7879 else if (isAssumedArgMemOnly()) 7880 STATS_DECLTRACK_FN_ATTR(argmemonly) 7881 else if (isAssumedInaccessibleMemOnly()) 7882 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly) 7883 else if (isAssumedInaccessibleOrArgMemOnly()) 7884 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly) 7885 } 7886 }; 7887 7888 /// AAMemoryLocation attribute for call sites. 7889 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { 7890 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) 7891 : AAMemoryLocationImpl(IRP, A) {} 7892 7893 /// See AbstractAttribute::initialize(...). 7894 void initialize(Attributor &A) override { 7895 AAMemoryLocationImpl::initialize(A); 7896 Function *F = getAssociatedFunction(); 7897 if (!F || F->isDeclaration()) 7898 indicatePessimisticFixpoint(); 7899 } 7900 7901 /// See AbstractAttribute::updateImpl(...). 7902 ChangeStatus updateImpl(Attributor &A) override { 7903 // TODO: Once we have call site specific value information we can provide 7904 // call site specific liveness liveness information and then it makes 7905 // sense to specialize attributes for call sites arguments instead of 7906 // redirecting requests to the callee argument. 7907 Function *F = getAssociatedFunction(); 7908 const IRPosition &FnPos = IRPosition::function(*F); 7909 auto &FnAA = 7910 A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED); 7911 bool Changed = false; 7912 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 7913 AccessKind Kind, MemoryLocationsKind MLK) { 7914 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed, 7915 getAccessKindFromInst(I)); 7916 return true; 7917 }; 7918 if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS)) 7919 return indicatePessimisticFixpoint(); 7920 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 7921 } 7922 7923 /// See AbstractAttribute::trackStatistics() 7924 void trackStatistics() const override { 7925 if (isAssumedReadNone()) 7926 STATS_DECLTRACK_CS_ATTR(readnone) 7927 } 7928 }; 7929 7930 /// ------------------ Value Constant Range Attribute ------------------------- 7931 7932 struct AAValueConstantRangeImpl : AAValueConstantRange { 7933 using StateType = IntegerRangeState; 7934 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) 7935 : AAValueConstantRange(IRP, A) {} 7936 7937 /// See AbstractAttribute::initialize(..). 7938 void initialize(Attributor &A) override { 7939 if (A.hasSimplificationCallback(getIRPosition())) { 7940 indicatePessimisticFixpoint(); 7941 return; 7942 } 7943 7944 // Intersect a range given by SCEV. 7945 intersectKnown(getConstantRangeFromSCEV(A, getCtxI())); 7946 7947 // Intersect a range given by LVI. 7948 intersectKnown(getConstantRangeFromLVI(A, getCtxI())); 7949 } 7950 7951 /// See AbstractAttribute::getAsStr(). 7952 const std::string getAsStr() const override { 7953 std::string Str; 7954 llvm::raw_string_ostream OS(Str); 7955 OS << "range(" << getBitWidth() << ")<"; 7956 getKnown().print(OS); 7957 OS << " / "; 7958 getAssumed().print(OS); 7959 OS << ">"; 7960 return OS.str(); 7961 } 7962 7963 /// Helper function to get a SCEV expr for the associated value at program 7964 /// point \p I. 7965 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { 7966 if (!getAnchorScope()) 7967 return nullptr; 7968 7969 ScalarEvolution *SE = 7970 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 7971 *getAnchorScope()); 7972 7973 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( 7974 *getAnchorScope()); 7975 7976 if (!SE || !LI) 7977 return nullptr; 7978 7979 const SCEV *S = SE->getSCEV(&getAssociatedValue()); 7980 if (!I) 7981 return S; 7982 7983 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent())); 7984 } 7985 7986 /// Helper function to get a range from SCEV for the associated value at 7987 /// program point \p I. 7988 ConstantRange getConstantRangeFromSCEV(Attributor &A, 7989 const Instruction *I = nullptr) const { 7990 if (!getAnchorScope()) 7991 return getWorstState(getBitWidth()); 7992 7993 ScalarEvolution *SE = 7994 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 7995 *getAnchorScope()); 7996 7997 const SCEV *S = getSCEV(A, I); 7998 if (!SE || !S) 7999 return getWorstState(getBitWidth()); 8000 8001 return SE->getUnsignedRange(S); 8002 } 8003 8004 /// Helper function to get a range from LVI for the associated value at 8005 /// program point \p I. 8006 ConstantRange 8007 getConstantRangeFromLVI(Attributor &A, 8008 const Instruction *CtxI = nullptr) const { 8009 if (!getAnchorScope()) 8010 return getWorstState(getBitWidth()); 8011 8012 LazyValueInfo *LVI = 8013 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( 8014 *getAnchorScope()); 8015 8016 if (!LVI || !CtxI) 8017 return getWorstState(getBitWidth()); 8018 return LVI->getConstantRange(&getAssociatedValue(), 8019 const_cast<Instruction *>(CtxI)); 8020 } 8021 8022 /// Return true if \p CtxI is valid for querying outside analyses. 8023 /// This basically makes sure we do not ask intra-procedural analysis 8024 /// about a context in the wrong function or a context that violates 8025 /// dominance assumptions they might have. The \p AllowAACtxI flag indicates 8026 /// if the original context of this AA is OK or should be considered invalid. 8027 bool isValidCtxInstructionForOutsideAnalysis(Attributor &A, 8028 const Instruction *CtxI, 8029 bool AllowAACtxI) const { 8030 if (!CtxI || (!AllowAACtxI && CtxI == getCtxI())) 8031 return false; 8032 8033 // Our context might be in a different function, neither intra-procedural 8034 // analysis (ScalarEvolution nor LazyValueInfo) can handle that. 8035 if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction())) 8036 return false; 8037 8038 // If the context is not dominated by the value there are paths to the 8039 // context that do not define the value. This cannot be handled by 8040 // LazyValueInfo so we need to bail. 8041 if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) { 8042 InformationCache &InfoCache = A.getInfoCache(); 8043 const DominatorTree *DT = 8044 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( 8045 *I->getFunction()); 8046 return DT && DT->dominates(I, CtxI); 8047 } 8048 8049 return true; 8050 } 8051 8052 /// See AAValueConstantRange::getKnownConstantRange(..). 8053 ConstantRange 8054 getKnownConstantRange(Attributor &A, 8055 const Instruction *CtxI = nullptr) const override { 8056 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8057 /* AllowAACtxI */ false)) 8058 return getKnown(); 8059 8060 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8061 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8062 return getKnown().intersectWith(SCEVR).intersectWith(LVIR); 8063 } 8064 8065 /// See AAValueConstantRange::getAssumedConstantRange(..). 8066 ConstantRange 8067 getAssumedConstantRange(Attributor &A, 8068 const Instruction *CtxI = nullptr) const override { 8069 // TODO: Make SCEV use Attributor assumption. 8070 // We may be able to bound a variable range via assumptions in 8071 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to 8072 // evolve to x^2 + x, then we can say that y is in [2, 12]. 8073 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8074 /* AllowAACtxI */ false)) 8075 return getAssumed(); 8076 8077 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8078 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8079 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR); 8080 } 8081 8082 /// Helper function to create MDNode for range metadata. 8083 static MDNode * 8084 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, 8085 const ConstantRange &AssumedConstantRange) { 8086 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get( 8087 Ty, AssumedConstantRange.getLower())), 8088 ConstantAsMetadata::get(ConstantInt::get( 8089 Ty, AssumedConstantRange.getUpper()))}; 8090 return MDNode::get(Ctx, LowAndHigh); 8091 } 8092 8093 /// Return true if \p Assumed is included in \p KnownRanges. 8094 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { 8095 8096 if (Assumed.isFullSet()) 8097 return false; 8098 8099 if (!KnownRanges) 8100 return true; 8101 8102 // If multiple ranges are annotated in IR, we give up to annotate assumed 8103 // range for now. 8104 8105 // TODO: If there exists a known range which containts assumed range, we 8106 // can say assumed range is better. 8107 if (KnownRanges->getNumOperands() > 2) 8108 return false; 8109 8110 ConstantInt *Lower = 8111 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0)); 8112 ConstantInt *Upper = 8113 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1)); 8114 8115 ConstantRange Known(Lower->getValue(), Upper->getValue()); 8116 return Known.contains(Assumed) && Known != Assumed; 8117 } 8118 8119 /// Helper function to set range metadata. 8120 static bool 8121 setRangeMetadataIfisBetterRange(Instruction *I, 8122 const ConstantRange &AssumedConstantRange) { 8123 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range); 8124 if (isBetterRange(AssumedConstantRange, OldRangeMD)) { 8125 if (!AssumedConstantRange.isEmptySet()) { 8126 I->setMetadata(LLVMContext::MD_range, 8127 getMDNodeForConstantRange(I->getType(), I->getContext(), 8128 AssumedConstantRange)); 8129 return true; 8130 } 8131 } 8132 return false; 8133 } 8134 8135 /// See AbstractAttribute::manifest() 8136 ChangeStatus manifest(Attributor &A) override { 8137 ChangeStatus Changed = ChangeStatus::UNCHANGED; 8138 ConstantRange AssumedConstantRange = getAssumedConstantRange(A); 8139 assert(!AssumedConstantRange.isFullSet() && "Invalid state"); 8140 8141 auto &V = getAssociatedValue(); 8142 if (!AssumedConstantRange.isEmptySet() && 8143 !AssumedConstantRange.isSingleElement()) { 8144 if (Instruction *I = dyn_cast<Instruction>(&V)) { 8145 assert(I == getCtxI() && "Should not annotate an instruction which is " 8146 "not the context instruction"); 8147 if (isa<CallInst>(I) || isa<LoadInst>(I)) 8148 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) 8149 Changed = ChangeStatus::CHANGED; 8150 } 8151 } 8152 8153 return Changed; 8154 } 8155 }; 8156 8157 struct AAValueConstantRangeArgument final 8158 : AAArgumentFromCallSiteArguments< 8159 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8160 true /* BridgeCallBaseContext */> { 8161 using Base = AAArgumentFromCallSiteArguments< 8162 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8163 true /* BridgeCallBaseContext */>; 8164 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) 8165 : Base(IRP, A) {} 8166 8167 /// See AbstractAttribute::initialize(..). 8168 void initialize(Attributor &A) override { 8169 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8170 indicatePessimisticFixpoint(); 8171 } else { 8172 Base::initialize(A); 8173 } 8174 } 8175 8176 /// See AbstractAttribute::trackStatistics() 8177 void trackStatistics() const override { 8178 STATS_DECLTRACK_ARG_ATTR(value_range) 8179 } 8180 }; 8181 8182 struct AAValueConstantRangeReturned 8183 : AAReturnedFromReturnedValues<AAValueConstantRange, 8184 AAValueConstantRangeImpl, 8185 AAValueConstantRangeImpl::StateType, 8186 /* PropogateCallBaseContext */ true> { 8187 using Base = 8188 AAReturnedFromReturnedValues<AAValueConstantRange, 8189 AAValueConstantRangeImpl, 8190 AAValueConstantRangeImpl::StateType, 8191 /* PropogateCallBaseContext */ true>; 8192 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) 8193 : Base(IRP, A) {} 8194 8195 /// See AbstractAttribute::initialize(...). 8196 void initialize(Attributor &A) override {} 8197 8198 /// See AbstractAttribute::trackStatistics() 8199 void trackStatistics() const override { 8200 STATS_DECLTRACK_FNRET_ATTR(value_range) 8201 } 8202 }; 8203 8204 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { 8205 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) 8206 : AAValueConstantRangeImpl(IRP, A) {} 8207 8208 /// See AbstractAttribute::initialize(...). 8209 void initialize(Attributor &A) override { 8210 AAValueConstantRangeImpl::initialize(A); 8211 if (isAtFixpoint()) 8212 return; 8213 8214 Value &V = getAssociatedValue(); 8215 8216 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8217 unionAssumed(ConstantRange(C->getValue())); 8218 indicateOptimisticFixpoint(); 8219 return; 8220 } 8221 8222 if (isa<UndefValue>(&V)) { 8223 // Collapse the undef state to 0. 8224 unionAssumed(ConstantRange(APInt(getBitWidth(), 0))); 8225 indicateOptimisticFixpoint(); 8226 return; 8227 } 8228 8229 if (isa<CallBase>(&V)) 8230 return; 8231 8232 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V)) 8233 return; 8234 8235 // If it is a load instruction with range metadata, use it. 8236 if (LoadInst *LI = dyn_cast<LoadInst>(&V)) 8237 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) { 8238 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8239 return; 8240 } 8241 8242 // We can work with PHI and select instruction as we traverse their operands 8243 // during update. 8244 if (isa<SelectInst>(V) || isa<PHINode>(V)) 8245 return; 8246 8247 // Otherwise we give up. 8248 indicatePessimisticFixpoint(); 8249 8250 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: " 8251 << getAssociatedValue() << "\n"); 8252 } 8253 8254 bool calculateBinaryOperator( 8255 Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, 8256 const Instruction *CtxI, 8257 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8258 Value *LHS = BinOp->getOperand(0); 8259 Value *RHS = BinOp->getOperand(1); 8260 8261 // Simplify the operands first. 8262 bool UsedAssumedInformation = false; 8263 const auto &SimplifiedLHS = 8264 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8265 *this, UsedAssumedInformation); 8266 if (!SimplifiedLHS.hasValue()) 8267 return true; 8268 if (!SimplifiedLHS.getValue()) 8269 return false; 8270 LHS = *SimplifiedLHS; 8271 8272 const auto &SimplifiedRHS = 8273 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8274 *this, UsedAssumedInformation); 8275 if (!SimplifiedRHS.hasValue()) 8276 return true; 8277 if (!SimplifiedRHS.getValue()) 8278 return false; 8279 RHS = *SimplifiedRHS; 8280 8281 // TODO: Allow non integers as well. 8282 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8283 return false; 8284 8285 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8286 *this, IRPosition::value(*LHS, getCallBaseContext()), 8287 DepClassTy::REQUIRED); 8288 QuerriedAAs.push_back(&LHSAA); 8289 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8290 8291 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8292 *this, IRPosition::value(*RHS, getCallBaseContext()), 8293 DepClassTy::REQUIRED); 8294 QuerriedAAs.push_back(&RHSAA); 8295 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8296 8297 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange); 8298 8299 T.unionAssumed(AssumedRange); 8300 8301 // TODO: Track a known state too. 8302 8303 return T.isValidState(); 8304 } 8305 8306 bool calculateCastInst( 8307 Attributor &A, CastInst *CastI, IntegerRangeState &T, 8308 const Instruction *CtxI, 8309 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8310 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!"); 8311 // TODO: Allow non integers as well. 8312 Value *OpV = CastI->getOperand(0); 8313 8314 // Simplify the operand first. 8315 bool UsedAssumedInformation = false; 8316 const auto &SimplifiedOpV = 8317 A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()), 8318 *this, UsedAssumedInformation); 8319 if (!SimplifiedOpV.hasValue()) 8320 return true; 8321 if (!SimplifiedOpV.getValue()) 8322 return false; 8323 OpV = *SimplifiedOpV; 8324 8325 if (!OpV->getType()->isIntegerTy()) 8326 return false; 8327 8328 auto &OpAA = A.getAAFor<AAValueConstantRange>( 8329 *this, IRPosition::value(*OpV, getCallBaseContext()), 8330 DepClassTy::REQUIRED); 8331 QuerriedAAs.push_back(&OpAA); 8332 T.unionAssumed( 8333 OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth())); 8334 return T.isValidState(); 8335 } 8336 8337 bool 8338 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, 8339 const Instruction *CtxI, 8340 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8341 Value *LHS = CmpI->getOperand(0); 8342 Value *RHS = CmpI->getOperand(1); 8343 8344 // Simplify the operands first. 8345 bool UsedAssumedInformation = false; 8346 const auto &SimplifiedLHS = 8347 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8348 *this, UsedAssumedInformation); 8349 if (!SimplifiedLHS.hasValue()) 8350 return true; 8351 if (!SimplifiedLHS.getValue()) 8352 return false; 8353 LHS = *SimplifiedLHS; 8354 8355 const auto &SimplifiedRHS = 8356 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8357 *this, UsedAssumedInformation); 8358 if (!SimplifiedRHS.hasValue()) 8359 return true; 8360 if (!SimplifiedRHS.getValue()) 8361 return false; 8362 RHS = *SimplifiedRHS; 8363 8364 // TODO: Allow non integers as well. 8365 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8366 return false; 8367 8368 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8369 *this, IRPosition::value(*LHS, getCallBaseContext()), 8370 DepClassTy::REQUIRED); 8371 QuerriedAAs.push_back(&LHSAA); 8372 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8373 *this, IRPosition::value(*RHS, getCallBaseContext()), 8374 DepClassTy::REQUIRED); 8375 QuerriedAAs.push_back(&RHSAA); 8376 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8377 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8378 8379 // If one of them is empty set, we can't decide. 8380 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) 8381 return true; 8382 8383 bool MustTrue = false, MustFalse = false; 8384 8385 auto AllowedRegion = 8386 ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange); 8387 8388 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet()) 8389 MustFalse = true; 8390 8391 if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange)) 8392 MustTrue = true; 8393 8394 assert((!MustTrue || !MustFalse) && 8395 "Either MustTrue or MustFalse should be false!"); 8396 8397 if (MustTrue) 8398 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); 8399 else if (MustFalse) 8400 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); 8401 else 8402 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); 8403 8404 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA 8405 << " " << RHSAA << "\n"); 8406 8407 // TODO: Track a known state too. 8408 return T.isValidState(); 8409 } 8410 8411 /// See AbstractAttribute::updateImpl(...). 8412 ChangeStatus updateImpl(Attributor &A) override { 8413 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 8414 IntegerRangeState &T, bool Stripped) -> bool { 8415 Instruction *I = dyn_cast<Instruction>(&V); 8416 if (!I || isa<CallBase>(I)) { 8417 8418 // Simplify the operand first. 8419 bool UsedAssumedInformation = false; 8420 const auto &SimplifiedOpV = 8421 A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()), 8422 *this, UsedAssumedInformation); 8423 if (!SimplifiedOpV.hasValue()) 8424 return true; 8425 if (!SimplifiedOpV.getValue()) 8426 return false; 8427 Value *VPtr = *SimplifiedOpV; 8428 8429 // If the value is not instruction, we query AA to Attributor. 8430 const auto &AA = A.getAAFor<AAValueConstantRange>( 8431 *this, IRPosition::value(*VPtr, getCallBaseContext()), 8432 DepClassTy::REQUIRED); 8433 8434 // Clamp operator is not used to utilize a program point CtxI. 8435 T.unionAssumed(AA.getAssumedConstantRange(A, CtxI)); 8436 8437 return T.isValidState(); 8438 } 8439 8440 SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; 8441 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) { 8442 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) 8443 return false; 8444 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) { 8445 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) 8446 return false; 8447 } else if (auto *CastI = dyn_cast<CastInst>(I)) { 8448 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) 8449 return false; 8450 } else { 8451 // Give up with other instructions. 8452 // TODO: Add other instructions 8453 8454 T.indicatePessimisticFixpoint(); 8455 return false; 8456 } 8457 8458 // Catch circular reasoning in a pessimistic way for now. 8459 // TODO: Check how the range evolves and if we stripped anything, see also 8460 // AADereferenceable or AAAlign for similar situations. 8461 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { 8462 if (QueriedAA != this) 8463 continue; 8464 // If we are in a stady state we do not need to worry. 8465 if (T.getAssumed() == getState().getAssumed()) 8466 continue; 8467 T.indicatePessimisticFixpoint(); 8468 } 8469 8470 return T.isValidState(); 8471 }; 8472 8473 IntegerRangeState T(getBitWidth()); 8474 8475 if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T, 8476 VisitValueCB, getCtxI(), 8477 /* UseValueSimplify */ false)) 8478 return indicatePessimisticFixpoint(); 8479 8480 // Ensure that long def-use chains can't cause circular reasoning either by 8481 // introducing a cutoff below. 8482 if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED) 8483 return ChangeStatus::UNCHANGED; 8484 if (++NumChanges > MaxNumChanges) { 8485 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges 8486 << " but only " << MaxNumChanges 8487 << " are allowed to avoid cyclic reasoning."); 8488 return indicatePessimisticFixpoint(); 8489 } 8490 return ChangeStatus::CHANGED; 8491 } 8492 8493 /// See AbstractAttribute::trackStatistics() 8494 void trackStatistics() const override { 8495 STATS_DECLTRACK_FLOATING_ATTR(value_range) 8496 } 8497 8498 /// Tracker to bail after too many widening steps of the constant range. 8499 int NumChanges = 0; 8500 8501 /// Upper bound for the number of allowed changes (=widening steps) for the 8502 /// constant range before we give up. 8503 static constexpr int MaxNumChanges = 5; 8504 }; 8505 8506 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { 8507 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) 8508 : AAValueConstantRangeImpl(IRP, A) {} 8509 8510 /// See AbstractAttribute::initialize(...). 8511 ChangeStatus updateImpl(Attributor &A) override { 8512 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will " 8513 "not be called"); 8514 } 8515 8516 /// See AbstractAttribute::trackStatistics() 8517 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) } 8518 }; 8519 8520 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { 8521 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) 8522 : AAValueConstantRangeFunction(IRP, A) {} 8523 8524 /// See AbstractAttribute::trackStatistics() 8525 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) } 8526 }; 8527 8528 struct AAValueConstantRangeCallSiteReturned 8529 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8530 AAValueConstantRangeImpl, 8531 AAValueConstantRangeImpl::StateType, 8532 /* IntroduceCallBaseContext */ true> { 8533 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) 8534 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8535 AAValueConstantRangeImpl, 8536 AAValueConstantRangeImpl::StateType, 8537 /* IntroduceCallBaseContext */ true>(IRP, 8538 A) { 8539 } 8540 8541 /// See AbstractAttribute::initialize(...). 8542 void initialize(Attributor &A) override { 8543 // If it is a load instruction with range metadata, use the metadata. 8544 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue())) 8545 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range)) 8546 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8547 8548 AAValueConstantRangeImpl::initialize(A); 8549 } 8550 8551 /// See AbstractAttribute::trackStatistics() 8552 void trackStatistics() const override { 8553 STATS_DECLTRACK_CSRET_ATTR(value_range) 8554 } 8555 }; 8556 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { 8557 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) 8558 : AAValueConstantRangeFloating(IRP, A) {} 8559 8560 /// See AbstractAttribute::manifest() 8561 ChangeStatus manifest(Attributor &A) override { 8562 return ChangeStatus::UNCHANGED; 8563 } 8564 8565 /// See AbstractAttribute::trackStatistics() 8566 void trackStatistics() const override { 8567 STATS_DECLTRACK_CSARG_ATTR(value_range) 8568 } 8569 }; 8570 8571 /// ------------------ Potential Values Attribute ------------------------- 8572 8573 struct AAPotentialValuesImpl : AAPotentialValues { 8574 using StateType = PotentialConstantIntValuesState; 8575 8576 AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A) 8577 : AAPotentialValues(IRP, A) {} 8578 8579 /// See AbstractAttribute::initialize(..). 8580 void initialize(Attributor &A) override { 8581 if (A.hasSimplificationCallback(getIRPosition())) 8582 indicatePessimisticFixpoint(); 8583 else 8584 AAPotentialValues::initialize(A); 8585 } 8586 8587 /// See AbstractAttribute::getAsStr(). 8588 const std::string getAsStr() const override { 8589 std::string Str; 8590 llvm::raw_string_ostream OS(Str); 8591 OS << getState(); 8592 return OS.str(); 8593 } 8594 8595 /// See AbstractAttribute::updateImpl(...). 8596 ChangeStatus updateImpl(Attributor &A) override { 8597 return indicatePessimisticFixpoint(); 8598 } 8599 }; 8600 8601 struct AAPotentialValuesArgument final 8602 : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 8603 PotentialConstantIntValuesState> { 8604 using Base = 8605 AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 8606 PotentialConstantIntValuesState>; 8607 AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A) 8608 : Base(IRP, A) {} 8609 8610 /// See AbstractAttribute::initialize(..). 8611 void initialize(Attributor &A) override { 8612 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8613 indicatePessimisticFixpoint(); 8614 } else { 8615 Base::initialize(A); 8616 } 8617 } 8618 8619 /// See AbstractAttribute::trackStatistics() 8620 void trackStatistics() const override { 8621 STATS_DECLTRACK_ARG_ATTR(potential_values) 8622 } 8623 }; 8624 8625 struct AAPotentialValuesReturned 8626 : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> { 8627 using Base = 8628 AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>; 8629 AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A) 8630 : Base(IRP, A) {} 8631 8632 /// See AbstractAttribute::trackStatistics() 8633 void trackStatistics() const override { 8634 STATS_DECLTRACK_FNRET_ATTR(potential_values) 8635 } 8636 }; 8637 8638 struct AAPotentialValuesFloating : AAPotentialValuesImpl { 8639 AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A) 8640 : AAPotentialValuesImpl(IRP, A) {} 8641 8642 /// See AbstractAttribute::initialize(..). 8643 void initialize(Attributor &A) override { 8644 AAPotentialValuesImpl::initialize(A); 8645 if (isAtFixpoint()) 8646 return; 8647 8648 Value &V = getAssociatedValue(); 8649 8650 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8651 unionAssumed(C->getValue()); 8652 indicateOptimisticFixpoint(); 8653 return; 8654 } 8655 8656 if (isa<UndefValue>(&V)) { 8657 unionAssumedWithUndef(); 8658 indicateOptimisticFixpoint(); 8659 return; 8660 } 8661 8662 if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V)) 8663 return; 8664 8665 if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V)) 8666 return; 8667 8668 indicatePessimisticFixpoint(); 8669 8670 LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: " 8671 << getAssociatedValue() << "\n"); 8672 } 8673 8674 static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS, 8675 const APInt &RHS) { 8676 return ICmpInst::compare(LHS, RHS, ICI->getPredicate()); 8677 } 8678 8679 static APInt calculateCastInst(const CastInst *CI, const APInt &Src, 8680 uint32_t ResultBitWidth) { 8681 Instruction::CastOps CastOp = CI->getOpcode(); 8682 switch (CastOp) { 8683 default: 8684 llvm_unreachable("unsupported or not integer cast"); 8685 case Instruction::Trunc: 8686 return Src.trunc(ResultBitWidth); 8687 case Instruction::SExt: 8688 return Src.sext(ResultBitWidth); 8689 case Instruction::ZExt: 8690 return Src.zext(ResultBitWidth); 8691 case Instruction::BitCast: 8692 return Src; 8693 } 8694 } 8695 8696 static APInt calculateBinaryOperator(const BinaryOperator *BinOp, 8697 const APInt &LHS, const APInt &RHS, 8698 bool &SkipOperation, bool &Unsupported) { 8699 Instruction::BinaryOps BinOpcode = BinOp->getOpcode(); 8700 // Unsupported is set to true when the binary operator is not supported. 8701 // SkipOperation is set to true when UB occur with the given operand pair 8702 // (LHS, RHS). 8703 // TODO: we should look at nsw and nuw keywords to handle operations 8704 // that create poison or undef value. 8705 switch (BinOpcode) { 8706 default: 8707 Unsupported = true; 8708 return LHS; 8709 case Instruction::Add: 8710 return LHS + RHS; 8711 case Instruction::Sub: 8712 return LHS - RHS; 8713 case Instruction::Mul: 8714 return LHS * RHS; 8715 case Instruction::UDiv: 8716 if (RHS.isZero()) { 8717 SkipOperation = true; 8718 return LHS; 8719 } 8720 return LHS.udiv(RHS); 8721 case Instruction::SDiv: 8722 if (RHS.isZero()) { 8723 SkipOperation = true; 8724 return LHS; 8725 } 8726 return LHS.sdiv(RHS); 8727 case Instruction::URem: 8728 if (RHS.isZero()) { 8729 SkipOperation = true; 8730 return LHS; 8731 } 8732 return LHS.urem(RHS); 8733 case Instruction::SRem: 8734 if (RHS.isZero()) { 8735 SkipOperation = true; 8736 return LHS; 8737 } 8738 return LHS.srem(RHS); 8739 case Instruction::Shl: 8740 return LHS.shl(RHS); 8741 case Instruction::LShr: 8742 return LHS.lshr(RHS); 8743 case Instruction::AShr: 8744 return LHS.ashr(RHS); 8745 case Instruction::And: 8746 return LHS & RHS; 8747 case Instruction::Or: 8748 return LHS | RHS; 8749 case Instruction::Xor: 8750 return LHS ^ RHS; 8751 } 8752 } 8753 8754 bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp, 8755 const APInt &LHS, const APInt &RHS) { 8756 bool SkipOperation = false; 8757 bool Unsupported = false; 8758 APInt Result = 8759 calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported); 8760 if (Unsupported) 8761 return false; 8762 // If SkipOperation is true, we can ignore this operand pair (L, R). 8763 if (!SkipOperation) 8764 unionAssumed(Result); 8765 return isValidState(); 8766 } 8767 8768 ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) { 8769 auto AssumedBefore = getAssumed(); 8770 Value *LHS = ICI->getOperand(0); 8771 Value *RHS = ICI->getOperand(1); 8772 8773 // Simplify the operands first. 8774 bool UsedAssumedInformation = false; 8775 const auto &SimplifiedLHS = 8776 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8777 *this, UsedAssumedInformation); 8778 if (!SimplifiedLHS.hasValue()) 8779 return ChangeStatus::UNCHANGED; 8780 if (!SimplifiedLHS.getValue()) 8781 return indicatePessimisticFixpoint(); 8782 LHS = *SimplifiedLHS; 8783 8784 const auto &SimplifiedRHS = 8785 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8786 *this, UsedAssumedInformation); 8787 if (!SimplifiedRHS.hasValue()) 8788 return ChangeStatus::UNCHANGED; 8789 if (!SimplifiedRHS.getValue()) 8790 return indicatePessimisticFixpoint(); 8791 RHS = *SimplifiedRHS; 8792 8793 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8794 return indicatePessimisticFixpoint(); 8795 8796 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 8797 DepClassTy::REQUIRED); 8798 if (!LHSAA.isValidState()) 8799 return indicatePessimisticFixpoint(); 8800 8801 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 8802 DepClassTy::REQUIRED); 8803 if (!RHSAA.isValidState()) 8804 return indicatePessimisticFixpoint(); 8805 8806 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 8807 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 8808 8809 // TODO: make use of undef flag to limit potential values aggressively. 8810 bool MaybeTrue = false, MaybeFalse = false; 8811 const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0); 8812 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 8813 // The result of any comparison between undefs can be soundly replaced 8814 // with undef. 8815 unionAssumedWithUndef(); 8816 } else if (LHSAA.undefIsContained()) { 8817 for (const APInt &R : RHSAAPVS) { 8818 bool CmpResult = calculateICmpInst(ICI, Zero, R); 8819 MaybeTrue |= CmpResult; 8820 MaybeFalse |= !CmpResult; 8821 if (MaybeTrue & MaybeFalse) 8822 return indicatePessimisticFixpoint(); 8823 } 8824 } else if (RHSAA.undefIsContained()) { 8825 for (const APInt &L : LHSAAPVS) { 8826 bool CmpResult = calculateICmpInst(ICI, L, Zero); 8827 MaybeTrue |= CmpResult; 8828 MaybeFalse |= !CmpResult; 8829 if (MaybeTrue & MaybeFalse) 8830 return indicatePessimisticFixpoint(); 8831 } 8832 } else { 8833 for (const APInt &L : LHSAAPVS) { 8834 for (const APInt &R : RHSAAPVS) { 8835 bool CmpResult = calculateICmpInst(ICI, L, R); 8836 MaybeTrue |= CmpResult; 8837 MaybeFalse |= !CmpResult; 8838 if (MaybeTrue & MaybeFalse) 8839 return indicatePessimisticFixpoint(); 8840 } 8841 } 8842 } 8843 if (MaybeTrue) 8844 unionAssumed(APInt(/* numBits */ 1, /* val */ 1)); 8845 if (MaybeFalse) 8846 unionAssumed(APInt(/* numBits */ 1, /* val */ 0)); 8847 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 8848 : ChangeStatus::CHANGED; 8849 } 8850 8851 ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) { 8852 auto AssumedBefore = getAssumed(); 8853 Value *LHS = SI->getTrueValue(); 8854 Value *RHS = SI->getFalseValue(); 8855 8856 // Simplify the operands first. 8857 bool UsedAssumedInformation = false; 8858 const auto &SimplifiedLHS = 8859 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8860 *this, UsedAssumedInformation); 8861 if (!SimplifiedLHS.hasValue()) 8862 return ChangeStatus::UNCHANGED; 8863 if (!SimplifiedLHS.getValue()) 8864 return indicatePessimisticFixpoint(); 8865 LHS = *SimplifiedLHS; 8866 8867 const auto &SimplifiedRHS = 8868 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8869 *this, UsedAssumedInformation); 8870 if (!SimplifiedRHS.hasValue()) 8871 return ChangeStatus::UNCHANGED; 8872 if (!SimplifiedRHS.getValue()) 8873 return indicatePessimisticFixpoint(); 8874 RHS = *SimplifiedRHS; 8875 8876 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8877 return indicatePessimisticFixpoint(); 8878 8879 Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this, 8880 UsedAssumedInformation); 8881 8882 // Check if we only need one operand. 8883 bool OnlyLeft = false, OnlyRight = false; 8884 if (C.hasValue() && *C && (*C)->isOneValue()) 8885 OnlyLeft = true; 8886 else if (C.hasValue() && *C && (*C)->isZeroValue()) 8887 OnlyRight = true; 8888 8889 const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr; 8890 if (!OnlyRight) { 8891 LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 8892 DepClassTy::REQUIRED); 8893 if (!LHSAA->isValidState()) 8894 return indicatePessimisticFixpoint(); 8895 } 8896 if (!OnlyLeft) { 8897 RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 8898 DepClassTy::REQUIRED); 8899 if (!RHSAA->isValidState()) 8900 return indicatePessimisticFixpoint(); 8901 } 8902 8903 if (!LHSAA || !RHSAA) { 8904 // select (true/false), lhs, rhs 8905 auto *OpAA = LHSAA ? LHSAA : RHSAA; 8906 8907 if (OpAA->undefIsContained()) 8908 unionAssumedWithUndef(); 8909 else 8910 unionAssumed(*OpAA); 8911 8912 } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) { 8913 // select i1 *, undef , undef => undef 8914 unionAssumedWithUndef(); 8915 } else { 8916 unionAssumed(*LHSAA); 8917 unionAssumed(*RHSAA); 8918 } 8919 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 8920 : ChangeStatus::CHANGED; 8921 } 8922 8923 ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) { 8924 auto AssumedBefore = getAssumed(); 8925 if (!CI->isIntegerCast()) 8926 return indicatePessimisticFixpoint(); 8927 assert(CI->getNumOperands() == 1 && "Expected cast to be unary!"); 8928 uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth(); 8929 Value *Src = CI->getOperand(0); 8930 8931 // Simplify the operand first. 8932 bool UsedAssumedInformation = false; 8933 const auto &SimplifiedSrc = 8934 A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()), 8935 *this, UsedAssumedInformation); 8936 if (!SimplifiedSrc.hasValue()) 8937 return ChangeStatus::UNCHANGED; 8938 if (!SimplifiedSrc.getValue()) 8939 return indicatePessimisticFixpoint(); 8940 Src = *SimplifiedSrc; 8941 8942 auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src), 8943 DepClassTy::REQUIRED); 8944 if (!SrcAA.isValidState()) 8945 return indicatePessimisticFixpoint(); 8946 const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet(); 8947 if (SrcAA.undefIsContained()) 8948 unionAssumedWithUndef(); 8949 else { 8950 for (const APInt &S : SrcAAPVS) { 8951 APInt T = calculateCastInst(CI, S, ResultBitWidth); 8952 unionAssumed(T); 8953 } 8954 } 8955 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 8956 : ChangeStatus::CHANGED; 8957 } 8958 8959 ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) { 8960 auto AssumedBefore = getAssumed(); 8961 Value *LHS = BinOp->getOperand(0); 8962 Value *RHS = BinOp->getOperand(1); 8963 8964 // Simplify the operands first. 8965 bool UsedAssumedInformation = false; 8966 const auto &SimplifiedLHS = 8967 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8968 *this, UsedAssumedInformation); 8969 if (!SimplifiedLHS.hasValue()) 8970 return ChangeStatus::UNCHANGED; 8971 if (!SimplifiedLHS.getValue()) 8972 return indicatePessimisticFixpoint(); 8973 LHS = *SimplifiedLHS; 8974 8975 const auto &SimplifiedRHS = 8976 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8977 *this, UsedAssumedInformation); 8978 if (!SimplifiedRHS.hasValue()) 8979 return ChangeStatus::UNCHANGED; 8980 if (!SimplifiedRHS.getValue()) 8981 return indicatePessimisticFixpoint(); 8982 RHS = *SimplifiedRHS; 8983 8984 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8985 return indicatePessimisticFixpoint(); 8986 8987 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 8988 DepClassTy::REQUIRED); 8989 if (!LHSAA.isValidState()) 8990 return indicatePessimisticFixpoint(); 8991 8992 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 8993 DepClassTy::REQUIRED); 8994 if (!RHSAA.isValidState()) 8995 return indicatePessimisticFixpoint(); 8996 8997 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 8998 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 8999 const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0); 9000 9001 // TODO: make use of undef flag to limit potential values aggressively. 9002 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 9003 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero)) 9004 return indicatePessimisticFixpoint(); 9005 } else if (LHSAA.undefIsContained()) { 9006 for (const APInt &R : RHSAAPVS) { 9007 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R)) 9008 return indicatePessimisticFixpoint(); 9009 } 9010 } else if (RHSAA.undefIsContained()) { 9011 for (const APInt &L : LHSAAPVS) { 9012 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero)) 9013 return indicatePessimisticFixpoint(); 9014 } 9015 } else { 9016 for (const APInt &L : LHSAAPVS) { 9017 for (const APInt &R : RHSAAPVS) { 9018 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R)) 9019 return indicatePessimisticFixpoint(); 9020 } 9021 } 9022 } 9023 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9024 : ChangeStatus::CHANGED; 9025 } 9026 9027 ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) { 9028 auto AssumedBefore = getAssumed(); 9029 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 9030 Value *IncomingValue = PHI->getIncomingValue(u); 9031 9032 // Simplify the operand first. 9033 bool UsedAssumedInformation = false; 9034 const auto &SimplifiedIncomingValue = A.getAssumedSimplified( 9035 IRPosition::value(*IncomingValue, getCallBaseContext()), *this, 9036 UsedAssumedInformation); 9037 if (!SimplifiedIncomingValue.hasValue()) 9038 continue; 9039 if (!SimplifiedIncomingValue.getValue()) 9040 return indicatePessimisticFixpoint(); 9041 IncomingValue = *SimplifiedIncomingValue; 9042 9043 auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>( 9044 *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED); 9045 if (!PotentialValuesAA.isValidState()) 9046 return indicatePessimisticFixpoint(); 9047 if (PotentialValuesAA.undefIsContained()) 9048 unionAssumedWithUndef(); 9049 else 9050 unionAssumed(PotentialValuesAA.getAssumed()); 9051 } 9052 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9053 : ChangeStatus::CHANGED; 9054 } 9055 9056 ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) { 9057 if (!L.getType()->isIntegerTy()) 9058 return indicatePessimisticFixpoint(); 9059 9060 auto Union = [&](Value &V) { 9061 if (isa<UndefValue>(V)) { 9062 unionAssumedWithUndef(); 9063 return true; 9064 } 9065 if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) { 9066 unionAssumed(CI->getValue()); 9067 return true; 9068 } 9069 return false; 9070 }; 9071 auto AssumedBefore = getAssumed(); 9072 9073 if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union)) 9074 return indicatePessimisticFixpoint(); 9075 9076 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9077 : ChangeStatus::CHANGED; 9078 } 9079 9080 /// See AbstractAttribute::updateImpl(...). 9081 ChangeStatus updateImpl(Attributor &A) override { 9082 Value &V = getAssociatedValue(); 9083 Instruction *I = dyn_cast<Instruction>(&V); 9084 9085 if (auto *ICI = dyn_cast<ICmpInst>(I)) 9086 return updateWithICmpInst(A, ICI); 9087 9088 if (auto *SI = dyn_cast<SelectInst>(I)) 9089 return updateWithSelectInst(A, SI); 9090 9091 if (auto *CI = dyn_cast<CastInst>(I)) 9092 return updateWithCastInst(A, CI); 9093 9094 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) 9095 return updateWithBinaryOperator(A, BinOp); 9096 9097 if (auto *PHI = dyn_cast<PHINode>(I)) 9098 return updateWithPHINode(A, PHI); 9099 9100 if (auto *L = dyn_cast<LoadInst>(I)) 9101 return updateWithLoad(A, *L); 9102 9103 return indicatePessimisticFixpoint(); 9104 } 9105 9106 /// See AbstractAttribute::trackStatistics() 9107 void trackStatistics() const override { 9108 STATS_DECLTRACK_FLOATING_ATTR(potential_values) 9109 } 9110 }; 9111 9112 struct AAPotentialValuesFunction : AAPotentialValuesImpl { 9113 AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A) 9114 : AAPotentialValuesImpl(IRP, A) {} 9115 9116 /// See AbstractAttribute::initialize(...). 9117 ChangeStatus updateImpl(Attributor &A) override { 9118 llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will " 9119 "not be called"); 9120 } 9121 9122 /// See AbstractAttribute::trackStatistics() 9123 void trackStatistics() const override { 9124 STATS_DECLTRACK_FN_ATTR(potential_values) 9125 } 9126 }; 9127 9128 struct AAPotentialValuesCallSite : AAPotentialValuesFunction { 9129 AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A) 9130 : AAPotentialValuesFunction(IRP, A) {} 9131 9132 /// See AbstractAttribute::trackStatistics() 9133 void trackStatistics() const override { 9134 STATS_DECLTRACK_CS_ATTR(potential_values) 9135 } 9136 }; 9137 9138 struct AAPotentialValuesCallSiteReturned 9139 : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> { 9140 AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A) 9141 : AACallSiteReturnedFromReturned<AAPotentialValues, 9142 AAPotentialValuesImpl>(IRP, A) {} 9143 9144 /// See AbstractAttribute::trackStatistics() 9145 void trackStatistics() const override { 9146 STATS_DECLTRACK_CSRET_ATTR(potential_values) 9147 } 9148 }; 9149 9150 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating { 9151 AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A) 9152 : AAPotentialValuesFloating(IRP, A) {} 9153 9154 /// See AbstractAttribute::initialize(..). 9155 void initialize(Attributor &A) override { 9156 AAPotentialValuesImpl::initialize(A); 9157 if (isAtFixpoint()) 9158 return; 9159 9160 Value &V = getAssociatedValue(); 9161 9162 if (auto *C = dyn_cast<ConstantInt>(&V)) { 9163 unionAssumed(C->getValue()); 9164 indicateOptimisticFixpoint(); 9165 return; 9166 } 9167 9168 if (isa<UndefValue>(&V)) { 9169 unionAssumedWithUndef(); 9170 indicateOptimisticFixpoint(); 9171 return; 9172 } 9173 } 9174 9175 /// See AbstractAttribute::updateImpl(...). 9176 ChangeStatus updateImpl(Attributor &A) override { 9177 Value &V = getAssociatedValue(); 9178 auto AssumedBefore = getAssumed(); 9179 auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V), 9180 DepClassTy::REQUIRED); 9181 const auto &S = AA.getAssumed(); 9182 unionAssumed(S); 9183 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9184 : ChangeStatus::CHANGED; 9185 } 9186 9187 /// See AbstractAttribute::trackStatistics() 9188 void trackStatistics() const override { 9189 STATS_DECLTRACK_CSARG_ATTR(potential_values) 9190 } 9191 }; 9192 9193 /// ------------------------ NoUndef Attribute --------------------------------- 9194 struct AANoUndefImpl : AANoUndef { 9195 AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {} 9196 9197 /// See AbstractAttribute::initialize(...). 9198 void initialize(Attributor &A) override { 9199 if (getIRPosition().hasAttr({Attribute::NoUndef})) { 9200 indicateOptimisticFixpoint(); 9201 return; 9202 } 9203 Value &V = getAssociatedValue(); 9204 if (isa<UndefValue>(V)) 9205 indicatePessimisticFixpoint(); 9206 else if (isa<FreezeInst>(V)) 9207 indicateOptimisticFixpoint(); 9208 else if (getPositionKind() != IRPosition::IRP_RETURNED && 9209 isGuaranteedNotToBeUndefOrPoison(&V)) 9210 indicateOptimisticFixpoint(); 9211 else 9212 AANoUndef::initialize(A); 9213 } 9214 9215 /// See followUsesInMBEC 9216 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 9217 AANoUndef::StateType &State) { 9218 const Value *UseV = U->get(); 9219 const DominatorTree *DT = nullptr; 9220 AssumptionCache *AC = nullptr; 9221 InformationCache &InfoCache = A.getInfoCache(); 9222 if (Function *F = getAnchorScope()) { 9223 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 9224 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 9225 } 9226 State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT)); 9227 bool TrackUse = false; 9228 // Track use for instructions which must produce undef or poison bits when 9229 // at least one operand contains such bits. 9230 if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I)) 9231 TrackUse = true; 9232 return TrackUse; 9233 } 9234 9235 /// See AbstractAttribute::getAsStr(). 9236 const std::string getAsStr() const override { 9237 return getAssumed() ? "noundef" : "may-undef-or-poison"; 9238 } 9239 9240 ChangeStatus manifest(Attributor &A) override { 9241 // We don't manifest noundef attribute for dead positions because the 9242 // associated values with dead positions would be replaced with undef 9243 // values. 9244 bool UsedAssumedInformation = false; 9245 if (A.isAssumedDead(getIRPosition(), nullptr, nullptr, 9246 UsedAssumedInformation)) 9247 return ChangeStatus::UNCHANGED; 9248 // A position whose simplified value does not have any value is 9249 // considered to be dead. We don't manifest noundef in such positions for 9250 // the same reason above. 9251 if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation) 9252 .hasValue()) 9253 return ChangeStatus::UNCHANGED; 9254 return AANoUndef::manifest(A); 9255 } 9256 }; 9257 9258 struct AANoUndefFloating : public AANoUndefImpl { 9259 AANoUndefFloating(const IRPosition &IRP, Attributor &A) 9260 : AANoUndefImpl(IRP, A) {} 9261 9262 /// See AbstractAttribute::initialize(...). 9263 void initialize(Attributor &A) override { 9264 AANoUndefImpl::initialize(A); 9265 if (!getState().isAtFixpoint()) 9266 if (Instruction *CtxI = getCtxI()) 9267 followUsesInMBEC(*this, A, getState(), *CtxI); 9268 } 9269 9270 /// See AbstractAttribute::updateImpl(...). 9271 ChangeStatus updateImpl(Attributor &A) override { 9272 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 9273 AANoUndef::StateType &T, bool Stripped) -> bool { 9274 const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V), 9275 DepClassTy::REQUIRED); 9276 if (!Stripped && this == &AA) { 9277 T.indicatePessimisticFixpoint(); 9278 } else { 9279 const AANoUndef::StateType &S = 9280 static_cast<const AANoUndef::StateType &>(AA.getState()); 9281 T ^= S; 9282 } 9283 return T.isValidState(); 9284 }; 9285 9286 StateType T; 9287 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 9288 VisitValueCB, getCtxI())) 9289 return indicatePessimisticFixpoint(); 9290 9291 return clampStateAndIndicateChange(getState(), T); 9292 } 9293 9294 /// See AbstractAttribute::trackStatistics() 9295 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9296 }; 9297 9298 struct AANoUndefReturned final 9299 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> { 9300 AANoUndefReturned(const IRPosition &IRP, Attributor &A) 9301 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {} 9302 9303 /// See AbstractAttribute::trackStatistics() 9304 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9305 }; 9306 9307 struct AANoUndefArgument final 9308 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> { 9309 AANoUndefArgument(const IRPosition &IRP, Attributor &A) 9310 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {} 9311 9312 /// See AbstractAttribute::trackStatistics() 9313 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) } 9314 }; 9315 9316 struct AANoUndefCallSiteArgument final : AANoUndefFloating { 9317 AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A) 9318 : AANoUndefFloating(IRP, A) {} 9319 9320 /// See AbstractAttribute::trackStatistics() 9321 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) } 9322 }; 9323 9324 struct AANoUndefCallSiteReturned final 9325 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> { 9326 AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A) 9327 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {} 9328 9329 /// See AbstractAttribute::trackStatistics() 9330 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) } 9331 }; 9332 9333 struct AACallEdgesImpl : public AACallEdges { 9334 AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {} 9335 9336 virtual const SetVector<Function *> &getOptimisticEdges() const override { 9337 return CalledFunctions; 9338 } 9339 9340 virtual bool hasUnknownCallee() const override { return HasUnknownCallee; } 9341 9342 virtual bool hasNonAsmUnknownCallee() const override { 9343 return HasUnknownCalleeNonAsm; 9344 } 9345 9346 const std::string getAsStr() const override { 9347 return "CallEdges[" + std::to_string(HasUnknownCallee) + "," + 9348 std::to_string(CalledFunctions.size()) + "]"; 9349 } 9350 9351 void trackStatistics() const override {} 9352 9353 protected: 9354 void addCalledFunction(Function *Fn, ChangeStatus &Change) { 9355 if (CalledFunctions.insert(Fn)) { 9356 Change = ChangeStatus::CHANGED; 9357 LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName() 9358 << "\n"); 9359 } 9360 } 9361 9362 void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) { 9363 if (!HasUnknownCallee) 9364 Change = ChangeStatus::CHANGED; 9365 if (NonAsm && !HasUnknownCalleeNonAsm) 9366 Change = ChangeStatus::CHANGED; 9367 HasUnknownCalleeNonAsm |= NonAsm; 9368 HasUnknownCallee = true; 9369 } 9370 9371 private: 9372 /// Optimistic set of functions that might be called by this position. 9373 SetVector<Function *> CalledFunctions; 9374 9375 /// Is there any call with a unknown callee. 9376 bool HasUnknownCallee = false; 9377 9378 /// Is there any call with a unknown callee, excluding any inline asm. 9379 bool HasUnknownCalleeNonAsm = false; 9380 }; 9381 9382 struct AACallEdgesCallSite : public AACallEdgesImpl { 9383 AACallEdgesCallSite(const IRPosition &IRP, Attributor &A) 9384 : AACallEdgesImpl(IRP, A) {} 9385 /// See AbstractAttribute::updateImpl(...). 9386 ChangeStatus updateImpl(Attributor &A) override { 9387 ChangeStatus Change = ChangeStatus::UNCHANGED; 9388 9389 auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown, 9390 bool Stripped) -> bool { 9391 if (Function *Fn = dyn_cast<Function>(&V)) { 9392 addCalledFunction(Fn, Change); 9393 } else { 9394 LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n"); 9395 setHasUnknownCallee(true, Change); 9396 } 9397 9398 // Explore all values. 9399 return true; 9400 }; 9401 9402 // Process any value that we might call. 9403 auto ProcessCalledOperand = [&](Value *V) { 9404 bool DummyValue = false; 9405 if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this, 9406 DummyValue, VisitValue, nullptr, 9407 false)) { 9408 // If we haven't gone through all values, assume that there are unknown 9409 // callees. 9410 setHasUnknownCallee(true, Change); 9411 } 9412 }; 9413 9414 CallBase *CB = static_cast<CallBase *>(getCtxI()); 9415 9416 if (CB->isInlineAsm()) { 9417 setHasUnknownCallee(false, Change); 9418 return Change; 9419 } 9420 9421 // Process callee metadata if available. 9422 if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) { 9423 for (auto &Op : MD->operands()) { 9424 Function *Callee = mdconst::dyn_extract_or_null<Function>(Op); 9425 if (Callee) 9426 addCalledFunction(Callee, Change); 9427 } 9428 return Change; 9429 } 9430 9431 // The most simple case. 9432 ProcessCalledOperand(CB->getCalledOperand()); 9433 9434 // Process callback functions. 9435 SmallVector<const Use *, 4u> CallbackUses; 9436 AbstractCallSite::getCallbackUses(*CB, CallbackUses); 9437 for (const Use *U : CallbackUses) 9438 ProcessCalledOperand(U->get()); 9439 9440 return Change; 9441 } 9442 }; 9443 9444 struct AACallEdgesFunction : public AACallEdgesImpl { 9445 AACallEdgesFunction(const IRPosition &IRP, Attributor &A) 9446 : AACallEdgesImpl(IRP, A) {} 9447 9448 /// See AbstractAttribute::updateImpl(...). 9449 ChangeStatus updateImpl(Attributor &A) override { 9450 ChangeStatus Change = ChangeStatus::UNCHANGED; 9451 9452 auto ProcessCallInst = [&](Instruction &Inst) { 9453 CallBase &CB = static_cast<CallBase &>(Inst); 9454 9455 auto &CBEdges = A.getAAFor<AACallEdges>( 9456 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9457 if (CBEdges.hasNonAsmUnknownCallee()) 9458 setHasUnknownCallee(true, Change); 9459 if (CBEdges.hasUnknownCallee()) 9460 setHasUnknownCallee(false, Change); 9461 9462 for (Function *F : CBEdges.getOptimisticEdges()) 9463 addCalledFunction(F, Change); 9464 9465 return true; 9466 }; 9467 9468 // Visit all callable instructions. 9469 bool UsedAssumedInformation = false; 9470 if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this, 9471 UsedAssumedInformation)) { 9472 // If we haven't looked at all call like instructions, assume that there 9473 // are unknown callees. 9474 setHasUnknownCallee(true, Change); 9475 } 9476 9477 return Change; 9478 } 9479 }; 9480 9481 struct AAFunctionReachabilityFunction : public AAFunctionReachability { 9482 private: 9483 struct QuerySet { 9484 void markReachable(Function *Fn) { 9485 Reachable.insert(Fn); 9486 Unreachable.erase(Fn); 9487 } 9488 9489 ChangeStatus update(Attributor &A, const AAFunctionReachability &AA, 9490 ArrayRef<const AACallEdges *> AAEdgesList) { 9491 ChangeStatus Change = ChangeStatus::UNCHANGED; 9492 9493 for (auto *AAEdges : AAEdgesList) { 9494 if (AAEdges->hasUnknownCallee()) { 9495 if (!CanReachUnknownCallee) 9496 Change = ChangeStatus::CHANGED; 9497 CanReachUnknownCallee = true; 9498 return Change; 9499 } 9500 } 9501 9502 for (Function *Fn : make_early_inc_range(Unreachable)) { 9503 if (checkIfReachable(A, AA, AAEdgesList, Fn)) { 9504 Change = ChangeStatus::CHANGED; 9505 markReachable(Fn); 9506 } 9507 } 9508 return Change; 9509 } 9510 9511 bool isReachable(Attributor &A, const AAFunctionReachability &AA, 9512 ArrayRef<const AACallEdges *> AAEdgesList, Function *Fn) { 9513 // Assume that we can reach the function. 9514 // TODO: Be more specific with the unknown callee. 9515 if (CanReachUnknownCallee) 9516 return true; 9517 9518 if (Reachable.count(Fn)) 9519 return true; 9520 9521 if (Unreachable.count(Fn)) 9522 return false; 9523 9524 // We need to assume that this function can't reach Fn to prevent 9525 // an infinite loop if this function is recursive. 9526 Unreachable.insert(Fn); 9527 9528 bool Result = checkIfReachable(A, AA, AAEdgesList, Fn); 9529 if (Result) 9530 markReachable(Fn); 9531 return Result; 9532 } 9533 9534 bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA, 9535 ArrayRef<const AACallEdges *> AAEdgesList, 9536 Function *Fn) const { 9537 9538 // Handle the most trivial case first. 9539 for (auto *AAEdges : AAEdgesList) { 9540 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9541 9542 if (Edges.count(Fn)) 9543 return true; 9544 } 9545 9546 SmallVector<const AAFunctionReachability *, 8> Deps; 9547 for (auto &AAEdges : AAEdgesList) { 9548 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9549 9550 for (Function *Edge : Edges) { 9551 // We don't need a dependency if the result is reachable. 9552 const AAFunctionReachability &EdgeReachability = 9553 A.getAAFor<AAFunctionReachability>( 9554 AA, IRPosition::function(*Edge), DepClassTy::NONE); 9555 Deps.push_back(&EdgeReachability); 9556 9557 if (EdgeReachability.canReach(A, Fn)) 9558 return true; 9559 } 9560 } 9561 9562 // The result is false for now, set dependencies and leave. 9563 for (auto Dep : Deps) 9564 A.recordDependence(AA, *Dep, DepClassTy::REQUIRED); 9565 9566 return false; 9567 } 9568 9569 /// Set of functions that we know for sure is reachable. 9570 DenseSet<Function *> Reachable; 9571 9572 /// Set of functions that are unreachable, but might become reachable. 9573 DenseSet<Function *> Unreachable; 9574 9575 /// If we can reach a function with a call to a unknown function we assume 9576 /// that we can reach any function. 9577 bool CanReachUnknownCallee = false; 9578 }; 9579 9580 public: 9581 AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A) 9582 : AAFunctionReachability(IRP, A) {} 9583 9584 bool canReach(Attributor &A, Function *Fn) const override { 9585 const AACallEdges &AAEdges = 9586 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9587 9588 // Attributor returns attributes as const, so this function has to be 9589 // const for users of this attribute to use it without having to do 9590 // a const_cast. 9591 // This is a hack for us to be able to cache queries. 9592 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9593 bool Result = 9594 NonConstThis->WholeFunction.isReachable(A, *this, {&AAEdges}, Fn); 9595 9596 return Result; 9597 } 9598 9599 /// Can \p CB reach \p Fn 9600 bool canReach(Attributor &A, CallBase &CB, Function *Fn) const override { 9601 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9602 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9603 9604 // Attributor returns attributes as const, so this function has to be 9605 // const for users of this attribute to use it without having to do 9606 // a const_cast. 9607 // This is a hack for us to be able to cache queries. 9608 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9609 QuerySet &CBQuery = NonConstThis->CBQueries[&CB]; 9610 9611 bool Result = CBQuery.isReachable(A, *this, {&AAEdges}, Fn); 9612 9613 return Result; 9614 } 9615 9616 /// See AbstractAttribute::updateImpl(...). 9617 ChangeStatus updateImpl(Attributor &A) override { 9618 const AACallEdges &AAEdges = 9619 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9620 ChangeStatus Change = ChangeStatus::UNCHANGED; 9621 9622 Change |= WholeFunction.update(A, *this, {&AAEdges}); 9623 9624 for (auto CBPair : CBQueries) { 9625 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9626 *this, IRPosition::callsite_function(*CBPair.first), 9627 DepClassTy::REQUIRED); 9628 9629 Change |= CBPair.second.update(A, *this, {&AAEdges}); 9630 } 9631 9632 return Change; 9633 } 9634 9635 const std::string getAsStr() const override { 9636 size_t QueryCount = 9637 WholeFunction.Reachable.size() + WholeFunction.Unreachable.size(); 9638 9639 return "FunctionReachability [" + 9640 std::to_string(WholeFunction.Reachable.size()) + "," + 9641 std::to_string(QueryCount) + "]"; 9642 } 9643 9644 void trackStatistics() const override {} 9645 9646 private: 9647 bool canReachUnknownCallee() const override { 9648 return WholeFunction.CanReachUnknownCallee; 9649 } 9650 9651 /// Used to answer if a the whole function can reacha a specific function. 9652 QuerySet WholeFunction; 9653 9654 /// Used to answer if a call base inside this function can reach a specific 9655 /// function. 9656 DenseMap<CallBase *, QuerySet> CBQueries; 9657 }; 9658 9659 /// ---------------------- Assumption Propagation ------------------------------ 9660 struct AAAssumptionInfoImpl : public AAAssumptionInfo { 9661 AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A, 9662 const DenseSet<StringRef> &Known) 9663 : AAAssumptionInfo(IRP, A, Known) {} 9664 9665 bool hasAssumption(const StringRef Assumption) const override { 9666 return isValidState() && setContains(Assumption); 9667 } 9668 9669 /// See AbstractAttribute::getAsStr() 9670 const std::string getAsStr() const override { 9671 const SetContents &Known = getKnown(); 9672 const SetContents &Assumed = getAssumed(); 9673 9674 const std::string KnownStr = 9675 llvm::join(Known.getSet().begin(), Known.getSet().end(), ","); 9676 const std::string AssumedStr = 9677 (Assumed.isUniversal()) 9678 ? "Universal" 9679 : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ","); 9680 9681 return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]"; 9682 } 9683 }; 9684 9685 /// Propagates assumption information from parent functions to all of their 9686 /// successors. An assumption can be propagated if the containing function 9687 /// dominates the called function. 9688 /// 9689 /// We start with a "known" set of assumptions already valid for the associated 9690 /// function and an "assumed" set that initially contains all possible 9691 /// assumptions. The assumed set is inter-procedurally updated by narrowing its 9692 /// contents as concrete values are known. The concrete values are seeded by the 9693 /// first nodes that are either entries into the call graph, or contains no 9694 /// assumptions. Each node is updated as the intersection of the assumed state 9695 /// with all of its predecessors. 9696 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl { 9697 AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A) 9698 : AAAssumptionInfoImpl(IRP, A, 9699 getAssumptions(*IRP.getAssociatedFunction())) {} 9700 9701 /// See AbstractAttribute::manifest(...). 9702 ChangeStatus manifest(Attributor &A) override { 9703 const auto &Assumptions = getKnown(); 9704 9705 // Don't manifest a universal set if it somehow made it here. 9706 if (Assumptions.isUniversal()) 9707 return ChangeStatus::UNCHANGED; 9708 9709 Function *AssociatedFunction = getAssociatedFunction(); 9710 9711 bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet()); 9712 9713 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9714 } 9715 9716 /// See AbstractAttribute::updateImpl(...). 9717 ChangeStatus updateImpl(Attributor &A) override { 9718 bool Changed = false; 9719 9720 auto CallSitePred = [&](AbstractCallSite ACS) { 9721 const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>( 9722 *this, IRPosition::callsite_function(*ACS.getInstruction()), 9723 DepClassTy::REQUIRED); 9724 // Get the set of assumptions shared by all of this function's callers. 9725 Changed |= getIntersection(AssumptionAA.getAssumed()); 9726 return !getAssumed().empty() || !getKnown().empty(); 9727 }; 9728 9729 bool AllCallSitesKnown; 9730 // Get the intersection of all assumptions held by this node's predecessors. 9731 // If we don't know all the call sites then this is either an entry into the 9732 // call graph or an empty node. This node is known to only contain its own 9733 // assumptions and can be propagated to its successors. 9734 if (!A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) 9735 return indicatePessimisticFixpoint(); 9736 9737 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9738 } 9739 9740 void trackStatistics() const override {} 9741 }; 9742 9743 /// Assumption Info defined for call sites. 9744 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl { 9745 9746 AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A) 9747 : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {} 9748 9749 /// See AbstractAttribute::initialize(...). 9750 void initialize(Attributor &A) override { 9751 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 9752 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 9753 } 9754 9755 /// See AbstractAttribute::manifest(...). 9756 ChangeStatus manifest(Attributor &A) override { 9757 // Don't manifest a universal set if it somehow made it here. 9758 if (getKnown().isUniversal()) 9759 return ChangeStatus::UNCHANGED; 9760 9761 CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue()); 9762 bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet()); 9763 9764 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9765 } 9766 9767 /// See AbstractAttribute::updateImpl(...). 9768 ChangeStatus updateImpl(Attributor &A) override { 9769 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 9770 auto &AssumptionAA = 9771 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 9772 bool Changed = getIntersection(AssumptionAA.getAssumed()); 9773 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9774 } 9775 9776 /// See AbstractAttribute::trackStatistics() 9777 void trackStatistics() const override {} 9778 9779 private: 9780 /// Helper to initialized the known set as all the assumptions this call and 9781 /// the callee contain. 9782 DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) { 9783 const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue()); 9784 auto Assumptions = getAssumptions(CB); 9785 if (Function *F = IRP.getAssociatedFunction()) 9786 set_union(Assumptions, getAssumptions(*F)); 9787 if (Function *F = IRP.getAssociatedFunction()) 9788 set_union(Assumptions, getAssumptions(*F)); 9789 return Assumptions; 9790 } 9791 }; 9792 9793 } // namespace 9794 9795 AACallGraphNode *AACallEdgeIterator::operator*() const { 9796 return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>( 9797 &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I)))); 9798 } 9799 9800 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); } 9801 9802 const char AAReturnedValues::ID = 0; 9803 const char AANoUnwind::ID = 0; 9804 const char AANoSync::ID = 0; 9805 const char AANoFree::ID = 0; 9806 const char AANonNull::ID = 0; 9807 const char AANoRecurse::ID = 0; 9808 const char AAWillReturn::ID = 0; 9809 const char AAUndefinedBehavior::ID = 0; 9810 const char AANoAlias::ID = 0; 9811 const char AAReachability::ID = 0; 9812 const char AANoReturn::ID = 0; 9813 const char AAIsDead::ID = 0; 9814 const char AADereferenceable::ID = 0; 9815 const char AAAlign::ID = 0; 9816 const char AANoCapture::ID = 0; 9817 const char AAValueSimplify::ID = 0; 9818 const char AAHeapToStack::ID = 0; 9819 const char AAPrivatizablePtr::ID = 0; 9820 const char AAMemoryBehavior::ID = 0; 9821 const char AAMemoryLocation::ID = 0; 9822 const char AAValueConstantRange::ID = 0; 9823 const char AAPotentialValues::ID = 0; 9824 const char AANoUndef::ID = 0; 9825 const char AACallEdges::ID = 0; 9826 const char AAFunctionReachability::ID = 0; 9827 const char AAPointerInfo::ID = 0; 9828 const char AAAssumptionInfo::ID = 0; 9829 9830 // Macro magic to create the static generator function for attributes that 9831 // follow the naming scheme. 9832 9833 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ 9834 case IRPosition::PK: \ 9835 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); 9836 9837 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ 9838 case IRPosition::PK: \ 9839 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ 9840 ++NumAAs; \ 9841 break; 9842 9843 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9844 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9845 CLASS *AA = nullptr; \ 9846 switch (IRP.getPositionKind()) { \ 9847 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9848 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 9849 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 9850 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 9851 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 9852 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 9853 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 9854 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 9855 } \ 9856 return *AA; \ 9857 } 9858 9859 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9860 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9861 CLASS *AA = nullptr; \ 9862 switch (IRP.getPositionKind()) { \ 9863 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9864 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ 9865 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 9866 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 9867 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 9868 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 9869 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 9870 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 9871 } \ 9872 return *AA; \ 9873 } 9874 9875 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9876 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9877 CLASS *AA = nullptr; \ 9878 switch (IRP.getPositionKind()) { \ 9879 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9880 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 9881 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 9882 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 9883 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 9884 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 9885 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 9886 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 9887 } \ 9888 return *AA; \ 9889 } 9890 9891 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9892 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9893 CLASS *AA = nullptr; \ 9894 switch (IRP.getPositionKind()) { \ 9895 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9896 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 9897 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 9898 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 9899 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 9900 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 9901 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 9902 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 9903 } \ 9904 return *AA; \ 9905 } 9906 9907 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9908 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9909 CLASS *AA = nullptr; \ 9910 switch (IRP.getPositionKind()) { \ 9911 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9912 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 9913 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 9914 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 9915 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 9916 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 9917 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 9918 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 9919 } \ 9920 return *AA; \ 9921 } 9922 9923 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) 9924 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) 9925 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) 9926 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) 9927 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) 9928 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) 9929 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) 9930 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges) 9931 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo) 9932 9933 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) 9934 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) 9935 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) 9936 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) 9937 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) 9938 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) 9939 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) 9940 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues) 9941 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef) 9942 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo) 9943 9944 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) 9945 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) 9946 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) 9947 9948 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) 9949 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability) 9950 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) 9951 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability) 9952 9953 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) 9954 9955 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION 9956 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION 9957 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION 9958 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION 9959 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION 9960 #undef SWITCH_PK_CREATE 9961 #undef SWITCH_PK_INV 9962