1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // See the Attributor.h file comment and the class descriptions in that file for 10 // more information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/IPO/Attributor.h" 15 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/SCCIterator.h" 18 #include "llvm/ADT/SetOperations.h" 19 #include "llvm/ADT/SmallPtrSet.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumeBundleQueries.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/LazyValueInfo.h" 27 #include "llvm/Analysis/MemoryBuiltins.h" 28 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 29 #include "llvm/Analysis/ScalarEvolution.h" 30 #include "llvm/Analysis/TargetTransformInfo.h" 31 #include "llvm/Analysis/ValueTracking.h" 32 #include "llvm/IR/Assumptions.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/IRBuilder.h" 35 #include "llvm/IR/Instruction.h" 36 #include "llvm/IR/Instructions.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/IR/NoFolder.h" 39 #include "llvm/Support/Alignment.h" 40 #include "llvm/Support/Casting.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/FileSystem.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Transforms/IPO/ArgumentPromotion.h" 46 #include "llvm/Transforms/Utils/Local.h" 47 #include <cassert> 48 49 using namespace llvm; 50 51 #define DEBUG_TYPE "attributor" 52 53 static cl::opt<bool> ManifestInternal( 54 "attributor-manifest-internal", cl::Hidden, 55 cl::desc("Manifest Attributor internal string attributes."), 56 cl::init(false)); 57 58 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), 59 cl::Hidden); 60 61 template <> 62 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0; 63 64 static cl::opt<unsigned, true> MaxPotentialValues( 65 "attributor-max-potential-values", cl::Hidden, 66 cl::desc("Maximum number of potential values to be " 67 "tracked for each position."), 68 cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues), 69 cl::init(7)); 70 71 STATISTIC(NumAAs, "Number of abstract attributes created"); 72 73 // Some helper macros to deal with statistics tracking. 74 // 75 // Usage: 76 // For simple IR attribute tracking overload trackStatistics in the abstract 77 // attribute and choose the right STATS_DECLTRACK_********* macro, 78 // e.g.,: 79 // void trackStatistics() const override { 80 // STATS_DECLTRACK_ARG_ATTR(returned) 81 // } 82 // If there is a single "increment" side one can use the macro 83 // STATS_DECLTRACK with a custom message. If there are multiple increment 84 // sides, STATS_DECL and STATS_TRACK can also be used separately. 85 // 86 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ 87 ("Number of " #TYPE " marked '" #NAME "'") 88 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME 89 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); 90 #define STATS_DECL(NAME, TYPE, MSG) \ 91 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); 92 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); 93 #define STATS_DECLTRACK(NAME, TYPE, MSG) \ 94 { \ 95 STATS_DECL(NAME, TYPE, MSG) \ 96 STATS_TRACK(NAME, TYPE) \ 97 } 98 #define STATS_DECLTRACK_ARG_ATTR(NAME) \ 99 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) 100 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ 101 STATS_DECLTRACK(NAME, CSArguments, \ 102 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) 103 #define STATS_DECLTRACK_FN_ATTR(NAME) \ 104 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) 105 #define STATS_DECLTRACK_CS_ATTR(NAME) \ 106 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) 107 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ 108 STATS_DECLTRACK(NAME, FunctionReturn, \ 109 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) 110 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ 111 STATS_DECLTRACK(NAME, CSReturn, \ 112 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) 113 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ 114 STATS_DECLTRACK(NAME, Floating, \ 115 ("Number of floating values known to be '" #NAME "'")) 116 117 // Specialization of the operator<< for abstract attributes subclasses. This 118 // disambiguates situations where multiple operators are applicable. 119 namespace llvm { 120 #define PIPE_OPERATOR(CLASS) \ 121 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ 122 return OS << static_cast<const AbstractAttribute &>(AA); \ 123 } 124 125 PIPE_OPERATOR(AAIsDead) 126 PIPE_OPERATOR(AANoUnwind) 127 PIPE_OPERATOR(AANoSync) 128 PIPE_OPERATOR(AANoRecurse) 129 PIPE_OPERATOR(AAWillReturn) 130 PIPE_OPERATOR(AANoReturn) 131 PIPE_OPERATOR(AAReturnedValues) 132 PIPE_OPERATOR(AANonNull) 133 PIPE_OPERATOR(AANoAlias) 134 PIPE_OPERATOR(AADereferenceable) 135 PIPE_OPERATOR(AAAlign) 136 PIPE_OPERATOR(AANoCapture) 137 PIPE_OPERATOR(AAValueSimplify) 138 PIPE_OPERATOR(AANoFree) 139 PIPE_OPERATOR(AAHeapToStack) 140 PIPE_OPERATOR(AAReachability) 141 PIPE_OPERATOR(AAMemoryBehavior) 142 PIPE_OPERATOR(AAMemoryLocation) 143 PIPE_OPERATOR(AAValueConstantRange) 144 PIPE_OPERATOR(AAPrivatizablePtr) 145 PIPE_OPERATOR(AAUndefinedBehavior) 146 PIPE_OPERATOR(AAPotentialValues) 147 PIPE_OPERATOR(AANoUndef) 148 PIPE_OPERATOR(AACallEdges) 149 PIPE_OPERATOR(AAFunctionReachability) 150 PIPE_OPERATOR(AAPointerInfo) 151 PIPE_OPERATOR(AAAssumptionInfo) 152 153 #undef PIPE_OPERATOR 154 155 template <> 156 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, 157 const DerefState &R) { 158 ChangeStatus CS0 = 159 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); 160 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); 161 return CS0 | CS1; 162 } 163 164 } // namespace llvm 165 166 /// Get pointer operand of memory accessing instruction. If \p I is 167 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, 168 /// is set to false and the instruction is volatile, return nullptr. 169 static const Value *getPointerOperand(const Instruction *I, 170 bool AllowVolatile) { 171 if (!AllowVolatile && I->isVolatile()) 172 return nullptr; 173 174 if (auto *LI = dyn_cast<LoadInst>(I)) { 175 return LI->getPointerOperand(); 176 } 177 178 if (auto *SI = dyn_cast<StoreInst>(I)) { 179 return SI->getPointerOperand(); 180 } 181 182 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { 183 return CXI->getPointerOperand(); 184 } 185 186 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { 187 return RMWI->getPointerOperand(); 188 } 189 190 return nullptr; 191 } 192 193 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and 194 /// advanced by \p Offset bytes. To aid later analysis the method tries to build 195 /// getelement pointer instructions that traverse the natural type of \p Ptr if 196 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence 197 /// through a cast to i8*. 198 /// 199 /// TODO: This could probably live somewhere more prominantly if it doesn't 200 /// already exist. 201 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr, 202 int64_t Offset, IRBuilder<NoFolder> &IRB, 203 const DataLayout &DL) { 204 assert(Offset >= 0 && "Negative offset not supported yet!"); 205 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset 206 << "-bytes as " << *ResTy << "\n"); 207 208 if (Offset) { 209 Type *Ty = PtrElemTy; 210 APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset); 211 SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset); 212 213 SmallVector<Value *, 4> ValIndices; 214 std::string GEPName = Ptr->getName().str(); 215 for (const APInt &Index : IntIndices) { 216 ValIndices.push_back(IRB.getInt(Index)); 217 GEPName += "." + std::to_string(Index.getZExtValue()); 218 } 219 220 // Create a GEP for the indices collected above. 221 Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName); 222 223 // If an offset is left we use byte-wise adjustment. 224 if (IntOffset != 0) { 225 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); 226 Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset), 227 GEPName + ".b" + Twine(IntOffset.getZExtValue())); 228 } 229 } 230 231 // Ensure the result has the requested type. 232 Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast"); 233 234 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n"); 235 return Ptr; 236 } 237 238 /// Recursively visit all values that might become \p IRP at some point. This 239 /// will be done by looking through cast instructions, selects, phis, and calls 240 /// with the "returned" attribute. Once we cannot look through the value any 241 /// further, the callback \p VisitValueCB is invoked and passed the current 242 /// value, the \p State, and a flag to indicate if we stripped anything. 243 /// Stripped means that we unpacked the value associated with \p IRP at least 244 /// once. Note that the value used for the callback may still be the value 245 /// associated with \p IRP (due to PHIs). To limit how much effort is invested, 246 /// we will never visit more values than specified by \p MaxValues. 247 template <typename StateTy> 248 static bool genericValueTraversal( 249 Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA, 250 StateTy &State, 251 function_ref<bool(Value &, const Instruction *, StateTy &, bool)> 252 VisitValueCB, 253 const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16, 254 function_ref<Value *(Value *)> StripCB = nullptr) { 255 256 const AAIsDead *LivenessAA = nullptr; 257 if (IRP.getAnchorScope()) 258 LivenessAA = &A.getAAFor<AAIsDead>( 259 QueryingAA, 260 IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()), 261 DepClassTy::NONE); 262 bool AnyDead = false; 263 264 Value *InitialV = &IRP.getAssociatedValue(); 265 using Item = std::pair<Value *, const Instruction *>; 266 SmallSet<Item, 16> Visited; 267 SmallVector<Item, 16> Worklist; 268 Worklist.push_back({InitialV, CtxI}); 269 270 int Iteration = 0; 271 do { 272 Item I = Worklist.pop_back_val(); 273 Value *V = I.first; 274 CtxI = I.second; 275 if (StripCB) 276 V = StripCB(V); 277 278 // Check if we should process the current value. To prevent endless 279 // recursion keep a record of the values we followed! 280 if (!Visited.insert(I).second) 281 continue; 282 283 // Make sure we limit the compile time for complex expressions. 284 if (Iteration++ >= MaxValues) 285 return false; 286 287 // Explicitly look through calls with a "returned" attribute if we do 288 // not have a pointer as stripPointerCasts only works on them. 289 Value *NewV = nullptr; 290 if (V->getType()->isPointerTy()) { 291 NewV = V->stripPointerCasts(); 292 } else { 293 auto *CB = dyn_cast<CallBase>(V); 294 if (CB && CB->getCalledFunction()) { 295 for (Argument &Arg : CB->getCalledFunction()->args()) 296 if (Arg.hasReturnedAttr()) { 297 NewV = CB->getArgOperand(Arg.getArgNo()); 298 break; 299 } 300 } 301 } 302 if (NewV && NewV != V) { 303 Worklist.push_back({NewV, CtxI}); 304 continue; 305 } 306 307 // Look through select instructions, visit assumed potential values. 308 if (auto *SI = dyn_cast<SelectInst>(V)) { 309 bool UsedAssumedInformation = false; 310 Optional<Constant *> C = A.getAssumedConstant( 311 *SI->getCondition(), QueryingAA, UsedAssumedInformation); 312 bool NoValueYet = !C.hasValue(); 313 if (NoValueYet || isa_and_nonnull<UndefValue>(*C)) 314 continue; 315 if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) { 316 if (CI->isZero()) 317 Worklist.push_back({SI->getFalseValue(), CtxI}); 318 else 319 Worklist.push_back({SI->getTrueValue(), CtxI}); 320 continue; 321 } 322 // We could not simplify the condition, assume both values.( 323 Worklist.push_back({SI->getTrueValue(), CtxI}); 324 Worklist.push_back({SI->getFalseValue(), CtxI}); 325 continue; 326 } 327 328 // Look through phi nodes, visit all live operands. 329 if (auto *PHI = dyn_cast<PHINode>(V)) { 330 assert(LivenessAA && 331 "Expected liveness in the presence of instructions!"); 332 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 333 BasicBlock *IncomingBB = PHI->getIncomingBlock(u); 334 bool UsedAssumedInformation = false; 335 if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA, 336 LivenessAA, UsedAssumedInformation, 337 /* CheckBBLivenessOnly */ true)) { 338 AnyDead = true; 339 continue; 340 } 341 Worklist.push_back( 342 {PHI->getIncomingValue(u), IncomingBB->getTerminator()}); 343 } 344 continue; 345 } 346 347 if (UseValueSimplify && !isa<Constant>(V)) { 348 bool UsedAssumedInformation = false; 349 Optional<Value *> SimpleV = 350 A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation); 351 if (!SimpleV.hasValue()) 352 continue; 353 if (!SimpleV.getValue()) 354 return false; 355 Value *NewV = SimpleV.getValue(); 356 if (NewV != V) { 357 Worklist.push_back({NewV, CtxI}); 358 continue; 359 } 360 } 361 362 // Once a leaf is reached we inform the user through the callback. 363 if (!VisitValueCB(*V, CtxI, State, Iteration > 1)) 364 return false; 365 } while (!Worklist.empty()); 366 367 // If we actually used liveness information so we have to record a dependence. 368 if (AnyDead) 369 A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL); 370 371 // All values have been visited. 372 return true; 373 } 374 375 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr, 376 SmallVectorImpl<Value *> &Objects, 377 const AbstractAttribute &QueryingAA, 378 const Instruction *CtxI) { 379 auto StripCB = [&](Value *V) { return getUnderlyingObject(V); }; 380 SmallPtrSet<Value *, 8> SeenObjects; 381 auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *, 382 SmallVectorImpl<Value *> &Objects, 383 bool) -> bool { 384 if (SeenObjects.insert(&Val).second) 385 Objects.push_back(&Val); 386 return true; 387 }; 388 if (!genericValueTraversal<decltype(Objects)>( 389 A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI, 390 true, 32, StripCB)) 391 return false; 392 return true; 393 } 394 395 const Value *stripAndAccumulateMinimalOffsets( 396 Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val, 397 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, 398 bool UseAssumed = false) { 399 400 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { 401 const IRPosition &Pos = IRPosition::value(V); 402 // Only track dependence if we are going to use the assumed info. 403 const AAValueConstantRange &ValueConstantRangeAA = 404 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, 405 UseAssumed ? DepClassTy::OPTIONAL 406 : DepClassTy::NONE); 407 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() 408 : ValueConstantRangeAA.getKnown(); 409 // We can only use the lower part of the range because the upper part can 410 // be higher than what the value can really be. 411 ROffset = Range.getSignedMin(); 412 return true; 413 }; 414 415 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, 416 /* AllowInvariant */ false, 417 AttributorAnalysis); 418 } 419 420 static const Value *getMinimalBaseOfAccessPointerOperand( 421 Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I, 422 int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) { 423 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); 424 if (!Ptr) 425 return nullptr; 426 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 427 const Value *Base = stripAndAccumulateMinimalOffsets( 428 A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds); 429 430 BytesOffset = OffsetAPInt.getSExtValue(); 431 return Base; 432 } 433 434 static const Value * 435 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset, 436 const DataLayout &DL, 437 bool AllowNonInbounds = false) { 438 const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false); 439 if (!Ptr) 440 return nullptr; 441 442 return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL, 443 AllowNonInbounds); 444 } 445 446 /// Clamp the information known for all returned values of a function 447 /// (identified by \p QueryingAA) into \p S. 448 template <typename AAType, typename StateType = typename AAType::StateType> 449 static void clampReturnedValueStates( 450 Attributor &A, const AAType &QueryingAA, StateType &S, 451 const IRPosition::CallBaseContext *CBContext = nullptr) { 452 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " 453 << QueryingAA << " into " << S << "\n"); 454 455 assert((QueryingAA.getIRPosition().getPositionKind() == 456 IRPosition::IRP_RETURNED || 457 QueryingAA.getIRPosition().getPositionKind() == 458 IRPosition::IRP_CALL_SITE_RETURNED) && 459 "Can only clamp returned value states for a function returned or call " 460 "site returned position!"); 461 462 // Use an optional state as there might not be any return values and we want 463 // to join (IntegerState::operator&) the state of all there are. 464 Optional<StateType> T; 465 466 // Callback for each possibly returned value. 467 auto CheckReturnValue = [&](Value &RV) -> bool { 468 const IRPosition &RVPos = IRPosition::value(RV, CBContext); 469 const AAType &AA = 470 A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED); 471 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() 472 << " @ " << RVPos << "\n"); 473 const StateType &AAS = AA.getState(); 474 if (T.hasValue()) 475 *T &= AAS; 476 else 477 T = AAS; 478 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T 479 << "\n"); 480 return T->isValidState(); 481 }; 482 483 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) 484 S.indicatePessimisticFixpoint(); 485 else if (T.hasValue()) 486 S ^= *T; 487 } 488 489 namespace { 490 /// Helper class for generic deduction: return value -> returned position. 491 template <typename AAType, typename BaseType, 492 typename StateType = typename BaseType::StateType, 493 bool PropagateCallBaseContext = false> 494 struct AAReturnedFromReturnedValues : public BaseType { 495 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) 496 : BaseType(IRP, A) {} 497 498 /// See AbstractAttribute::updateImpl(...). 499 ChangeStatus updateImpl(Attributor &A) override { 500 StateType S(StateType::getBestState(this->getState())); 501 clampReturnedValueStates<AAType, StateType>( 502 A, *this, S, 503 PropagateCallBaseContext ? this->getCallBaseContext() : nullptr); 504 // TODO: If we know we visited all returned values, thus no are assumed 505 // dead, we can take the known information from the state T. 506 return clampStateAndIndicateChange<StateType>(this->getState(), S); 507 } 508 }; 509 510 /// Clamp the information known at all call sites for a given argument 511 /// (identified by \p QueryingAA) into \p S. 512 template <typename AAType, typename StateType = typename AAType::StateType> 513 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, 514 StateType &S) { 515 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " 516 << QueryingAA << " into " << S << "\n"); 517 518 assert(QueryingAA.getIRPosition().getPositionKind() == 519 IRPosition::IRP_ARGUMENT && 520 "Can only clamp call site argument states for an argument position!"); 521 522 // Use an optional state as there might not be any return values and we want 523 // to join (IntegerState::operator&) the state of all there are. 524 Optional<StateType> T; 525 526 // The argument number which is also the call site argument number. 527 unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo(); 528 529 auto CallSiteCheck = [&](AbstractCallSite ACS) { 530 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 531 // Check if a coresponding argument was found or if it is on not associated 532 // (which can happen for callback calls). 533 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 534 return false; 535 536 const AAType &AA = 537 A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED); 538 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() 539 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n"); 540 const StateType &AAS = AA.getState(); 541 if (T.hasValue()) 542 *T &= AAS; 543 else 544 T = AAS; 545 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T 546 << "\n"); 547 return T->isValidState(); 548 }; 549 550 bool AllCallSitesKnown; 551 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, 552 AllCallSitesKnown)) 553 S.indicatePessimisticFixpoint(); 554 else if (T.hasValue()) 555 S ^= *T; 556 } 557 558 /// This function is the bridge between argument position and the call base 559 /// context. 560 template <typename AAType, typename BaseType, 561 typename StateType = typename AAType::StateType> 562 bool getArgumentStateFromCallBaseContext(Attributor &A, 563 BaseType &QueryingAttribute, 564 IRPosition &Pos, StateType &State) { 565 assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && 566 "Expected an 'argument' position !"); 567 const CallBase *CBContext = Pos.getCallBaseContext(); 568 if (!CBContext) 569 return false; 570 571 int ArgNo = Pos.getCallSiteArgNo(); 572 assert(ArgNo >= 0 && "Invalid Arg No!"); 573 574 const auto &AA = A.getAAFor<AAType>( 575 QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo), 576 DepClassTy::REQUIRED); 577 const StateType &CBArgumentState = 578 static_cast<const StateType &>(AA.getState()); 579 580 LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument" 581 << "Position:" << Pos << "CB Arg state:" << CBArgumentState 582 << "\n"); 583 584 // NOTE: If we want to do call site grouping it should happen here. 585 State ^= CBArgumentState; 586 return true; 587 } 588 589 /// Helper class for generic deduction: call site argument -> argument position. 590 template <typename AAType, typename BaseType, 591 typename StateType = typename AAType::StateType, 592 bool BridgeCallBaseContext = false> 593 struct AAArgumentFromCallSiteArguments : public BaseType { 594 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) 595 : BaseType(IRP, A) {} 596 597 /// See AbstractAttribute::updateImpl(...). 598 ChangeStatus updateImpl(Attributor &A) override { 599 StateType S = StateType::getBestState(this->getState()); 600 601 if (BridgeCallBaseContext) { 602 bool Success = 603 getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>( 604 A, *this, this->getIRPosition(), S); 605 if (Success) 606 return clampStateAndIndicateChange<StateType>(this->getState(), S); 607 } 608 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); 609 610 // TODO: If we know we visited all incoming values, thus no are assumed 611 // dead, we can take the known information from the state T. 612 return clampStateAndIndicateChange<StateType>(this->getState(), S); 613 } 614 }; 615 616 /// Helper class for generic replication: function returned -> cs returned. 617 template <typename AAType, typename BaseType, 618 typename StateType = typename BaseType::StateType, 619 bool IntroduceCallBaseContext = false> 620 struct AACallSiteReturnedFromReturned : public BaseType { 621 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) 622 : BaseType(IRP, A) {} 623 624 /// See AbstractAttribute::updateImpl(...). 625 ChangeStatus updateImpl(Attributor &A) override { 626 assert(this->getIRPosition().getPositionKind() == 627 IRPosition::IRP_CALL_SITE_RETURNED && 628 "Can only wrap function returned positions for call site returned " 629 "positions!"); 630 auto &S = this->getState(); 631 632 const Function *AssociatedFunction = 633 this->getIRPosition().getAssociatedFunction(); 634 if (!AssociatedFunction) 635 return S.indicatePessimisticFixpoint(); 636 637 CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue()); 638 if (IntroduceCallBaseContext) 639 LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:" 640 << CBContext << "\n"); 641 642 IRPosition FnPos = IRPosition::returned( 643 *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr); 644 const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED); 645 return clampStateAndIndicateChange(S, AA.getState()); 646 } 647 }; 648 } // namespace 649 650 /// Helper function to accumulate uses. 651 template <class AAType, typename StateType = typename AAType::StateType> 652 static void followUsesInContext(AAType &AA, Attributor &A, 653 MustBeExecutedContextExplorer &Explorer, 654 const Instruction *CtxI, 655 SetVector<const Use *> &Uses, 656 StateType &State) { 657 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); 658 for (unsigned u = 0; u < Uses.size(); ++u) { 659 const Use *U = Uses[u]; 660 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { 661 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); 662 if (Found && AA.followUseInMBEC(A, U, UserI, State)) 663 for (const Use &Us : UserI->uses()) 664 Uses.insert(&Us); 665 } 666 } 667 } 668 669 /// Use the must-be-executed-context around \p I to add information into \p S. 670 /// The AAType class is required to have `followUseInMBEC` method with the 671 /// following signature and behaviour: 672 /// 673 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) 674 /// U - Underlying use. 675 /// I - The user of the \p U. 676 /// Returns true if the value should be tracked transitively. 677 /// 678 template <class AAType, typename StateType = typename AAType::StateType> 679 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, 680 Instruction &CtxI) { 681 682 // Container for (transitive) uses of the associated value. 683 SetVector<const Use *> Uses; 684 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) 685 Uses.insert(&U); 686 687 MustBeExecutedContextExplorer &Explorer = 688 A.getInfoCache().getMustBeExecutedContextExplorer(); 689 690 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); 691 692 if (S.isAtFixpoint()) 693 return; 694 695 SmallVector<const BranchInst *, 4> BrInsts; 696 auto Pred = [&](const Instruction *I) { 697 if (const BranchInst *Br = dyn_cast<BranchInst>(I)) 698 if (Br->isConditional()) 699 BrInsts.push_back(Br); 700 return true; 701 }; 702 703 // Here, accumulate conditional branch instructions in the context. We 704 // explore the child paths and collect the known states. The disjunction of 705 // those states can be merged to its own state. Let ParentState_i be a state 706 // to indicate the known information for an i-th branch instruction in the 707 // context. ChildStates are created for its successors respectively. 708 // 709 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} 710 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} 711 // ... 712 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} 713 // 714 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m 715 // 716 // FIXME: Currently, recursive branches are not handled. For example, we 717 // can't deduce that ptr must be dereferenced in below function. 718 // 719 // void f(int a, int c, int *ptr) { 720 // if(a) 721 // if (b) { 722 // *ptr = 0; 723 // } else { 724 // *ptr = 1; 725 // } 726 // else { 727 // if (b) { 728 // *ptr = 0; 729 // } else { 730 // *ptr = 1; 731 // } 732 // } 733 // } 734 735 Explorer.checkForAllContext(&CtxI, Pred); 736 for (const BranchInst *Br : BrInsts) { 737 StateType ParentState; 738 739 // The known state of the parent state is a conjunction of children's 740 // known states so it is initialized with a best state. 741 ParentState.indicateOptimisticFixpoint(); 742 743 for (const BasicBlock *BB : Br->successors()) { 744 StateType ChildState; 745 746 size_t BeforeSize = Uses.size(); 747 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); 748 749 // Erase uses which only appear in the child. 750 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) 751 It = Uses.erase(It); 752 753 ParentState &= ChildState; 754 } 755 756 // Use only known state. 757 S += ParentState; 758 } 759 } 760 761 /// ------------------------ PointerInfo --------------------------------------- 762 763 namespace llvm { 764 namespace AA { 765 namespace PointerInfo { 766 767 /// An access kind description as used by AAPointerInfo. 768 struct OffsetAndSize; 769 770 struct State; 771 772 } // namespace PointerInfo 773 } // namespace AA 774 775 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage. 776 template <> 777 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> { 778 using Access = AAPointerInfo::Access; 779 static inline Access getEmptyKey(); 780 static inline Access getTombstoneKey(); 781 static unsigned getHashValue(const Access &A); 782 static bool isEqual(const Access &LHS, const Access &RHS); 783 }; 784 785 /// Helper that allows OffsetAndSize as a key in a DenseMap. 786 template <> 787 struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize> 788 : DenseMapInfo<std::pair<int64_t, int64_t>> {}; 789 790 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign 791 /// but the instruction 792 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> { 793 using Base = DenseMapInfo<Instruction *>; 794 using Access = AAPointerInfo::Access; 795 static inline Access getEmptyKey(); 796 static inline Access getTombstoneKey(); 797 static unsigned getHashValue(const Access &A); 798 static bool isEqual(const Access &LHS, const Access &RHS); 799 }; 800 801 } // namespace llvm 802 803 /// Helper to represent an access offset and size, with logic to deal with 804 /// uncertainty and check for overlapping accesses. 805 struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> { 806 using BaseTy = std::pair<int64_t, int64_t>; 807 OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {} 808 OffsetAndSize(const BaseTy &P) : BaseTy(P) {} 809 int64_t getOffset() const { return first; } 810 int64_t getSize() const { return second; } 811 static OffsetAndSize getUnknown() { return OffsetAndSize(Unknown, Unknown); } 812 813 /// Return true if this offset and size pair might describe an address that 814 /// overlaps with \p OAS. 815 bool mayOverlap(const OffsetAndSize &OAS) const { 816 // Any unknown value and we are giving up -> overlap. 817 if (OAS.getOffset() == OffsetAndSize::Unknown || 818 OAS.getSize() == OffsetAndSize::Unknown || 819 getOffset() == OffsetAndSize::Unknown || 820 getSize() == OffsetAndSize::Unknown) 821 return true; 822 823 // Check if one offset point is in the other interval [offset, offset+size]. 824 return OAS.getOffset() + OAS.getSize() > getOffset() && 825 OAS.getOffset() < getOffset() + getSize(); 826 } 827 828 /// Constant used to represent unknown offset or sizes. 829 static constexpr int64_t Unknown = 1 << 31; 830 }; 831 832 /// Implementation of the DenseMapInfo. 833 /// 834 ///{ 835 inline llvm::AccessAsInstructionInfo::Access 836 llvm::AccessAsInstructionInfo::getEmptyKey() { 837 return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr); 838 } 839 inline llvm::AccessAsInstructionInfo::Access 840 llvm::AccessAsInstructionInfo::getTombstoneKey() { 841 return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ, 842 nullptr); 843 } 844 unsigned llvm::AccessAsInstructionInfo::getHashValue( 845 const llvm::AccessAsInstructionInfo::Access &A) { 846 return Base::getHashValue(A.getRemoteInst()); 847 } 848 bool llvm::AccessAsInstructionInfo::isEqual( 849 const llvm::AccessAsInstructionInfo::Access &LHS, 850 const llvm::AccessAsInstructionInfo::Access &RHS) { 851 return LHS.getRemoteInst() == RHS.getRemoteInst(); 852 } 853 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access 854 llvm::DenseMapInfo<AAPointerInfo::Access>::getEmptyKey() { 855 return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ, 856 nullptr); 857 } 858 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access 859 llvm::DenseMapInfo<AAPointerInfo::Access>::getTombstoneKey() { 860 return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE, 861 nullptr); 862 } 863 864 unsigned llvm::DenseMapInfo<AAPointerInfo::Access>::getHashValue( 865 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &A) { 866 return detail::combineHashValue( 867 DenseMapInfo<Instruction *>::getHashValue(A.getRemoteInst()), 868 (A.isWrittenValueYetUndetermined() 869 ? ~0 870 : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) + 871 A.getKind(); 872 } 873 874 bool llvm::DenseMapInfo<AAPointerInfo::Access>::isEqual( 875 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &LHS, 876 const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &RHS) { 877 return LHS == RHS; 878 } 879 ///} 880 881 /// A type to track pointer/struct usage and accesses for AAPointerInfo. 882 struct AA::PointerInfo::State : public AbstractState { 883 884 /// Return the best possible representable state. 885 static State getBestState(const State &SIS) { return State(); } 886 887 /// Return the worst possible representable state. 888 static State getWorstState(const State &SIS) { 889 State R; 890 R.indicatePessimisticFixpoint(); 891 return R; 892 } 893 894 State() {} 895 State(const State &SIS) : AccessBins(SIS.AccessBins) {} 896 State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {} 897 898 const State &getAssumed() const { return *this; } 899 900 /// See AbstractState::isValidState(). 901 bool isValidState() const override { return BS.isValidState(); } 902 903 /// See AbstractState::isAtFixpoint(). 904 bool isAtFixpoint() const override { return BS.isAtFixpoint(); } 905 906 /// See AbstractState::indicateOptimisticFixpoint(). 907 ChangeStatus indicateOptimisticFixpoint() override { 908 BS.indicateOptimisticFixpoint(); 909 return ChangeStatus::UNCHANGED; 910 } 911 912 /// See AbstractState::indicatePessimisticFixpoint(). 913 ChangeStatus indicatePessimisticFixpoint() override { 914 BS.indicatePessimisticFixpoint(); 915 return ChangeStatus::CHANGED; 916 } 917 918 State &operator=(const State &R) { 919 if (this == &R) 920 return *this; 921 BS = R.BS; 922 AccessBins = R.AccessBins; 923 return *this; 924 } 925 926 State &operator=(State &&R) { 927 if (this == &R) 928 return *this; 929 std::swap(BS, R.BS); 930 std::swap(AccessBins, R.AccessBins); 931 return *this; 932 } 933 934 bool operator==(const State &R) const { 935 if (BS != R.BS) 936 return false; 937 if (AccessBins.size() != R.AccessBins.size()) 938 return false; 939 auto It = begin(), RIt = R.begin(), E = end(); 940 while (It != E) { 941 if (It->getFirst() != RIt->getFirst()) 942 return false; 943 auto &Accs = It->getSecond(); 944 auto &RAccs = RIt->getSecond(); 945 if (Accs.size() != RAccs.size()) 946 return false; 947 auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end(); 948 while (AccIt != AccE) { 949 if (*AccIt != *RAccIt) 950 return false; 951 ++AccIt; 952 ++RAccIt; 953 } 954 ++It; 955 ++RIt; 956 } 957 return true; 958 } 959 bool operator!=(const State &R) const { return !(*this == R); } 960 961 /// We store accesses in a set with the instruction as key. 962 using Accesses = DenseSet<AAPointerInfo::Access, AccessAsInstructionInfo>; 963 964 /// We store all accesses in bins denoted by their offset and size. 965 using AccessBinsTy = DenseMap<OffsetAndSize, Accesses>; 966 967 AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); } 968 AccessBinsTy::const_iterator end() const { return AccessBins.end(); } 969 970 protected: 971 /// The bins with all the accesses for the associated pointer. 972 DenseMap<OffsetAndSize, Accesses> AccessBins; 973 974 /// Add a new access to the state at offset \p Offset and with size \p Size. 975 /// The access is associated with \p I, writes \p Content (if anything), and 976 /// is of kind \p Kind. 977 /// \Returns CHANGED, if the state changed, UNCHANGED otherwise. 978 ChangeStatus addAccess(int64_t Offset, int64_t Size, Instruction &I, 979 Optional<Value *> Content, 980 AAPointerInfo::AccessKind Kind, Type *Ty, 981 Instruction *RemoteI = nullptr, 982 Accesses *BinPtr = nullptr) { 983 OffsetAndSize Key{Offset, Size}; 984 Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key]; 985 AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty); 986 // Check if we have an access for this instruction in this bin, if not, 987 // simply add it. 988 auto It = Bin.find(Acc); 989 if (It == Bin.end()) { 990 Bin.insert(Acc); 991 return ChangeStatus::CHANGED; 992 } 993 // If the existing access is the same as then new one, nothing changed. 994 AAPointerInfo::Access Before = *It; 995 // The new one will be combined with the existing one. 996 *It &= Acc; 997 return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; 998 } 999 1000 /// See AAPointerInfo::forallInterferingAccesses. 1001 bool forallInterferingAccesses( 1002 Instruction &I, 1003 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { 1004 if (!isValidState()) 1005 return false; 1006 // First find the offset and size of I. 1007 OffsetAndSize OAS(-1, -1); 1008 for (auto &It : AccessBins) { 1009 for (auto &Access : It.getSecond()) { 1010 if (Access.getRemoteInst() == &I) { 1011 OAS = It.getFirst(); 1012 break; 1013 } 1014 } 1015 if (OAS.getSize() != -1) 1016 break; 1017 } 1018 if (OAS.getSize() == -1) 1019 return true; 1020 1021 // Now that we have an offset and size, find all overlapping ones and use 1022 // the callback on the accesses. 1023 for (auto &It : AccessBins) { 1024 OffsetAndSize ItOAS = It.getFirst(); 1025 if (!OAS.mayOverlap(ItOAS)) 1026 continue; 1027 for (auto &Access : It.getSecond()) 1028 if (!CB(Access, OAS == ItOAS)) 1029 return false; 1030 } 1031 return true; 1032 } 1033 1034 private: 1035 /// State to track fixpoint and validity. 1036 BooleanState BS; 1037 }; 1038 1039 namespace { 1040 struct AAPointerInfoImpl 1041 : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> { 1042 using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>; 1043 AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {} 1044 1045 /// See AbstractAttribute::initialize(...). 1046 void initialize(Attributor &A) override { AAPointerInfo::initialize(A); } 1047 1048 /// See AbstractAttribute::getAsStr(). 1049 const std::string getAsStr() const override { 1050 return std::string("PointerInfo ") + 1051 (isValidState() ? (std::string("#") + 1052 std::to_string(AccessBins.size()) + " bins") 1053 : "<invalid>"); 1054 } 1055 1056 /// See AbstractAttribute::manifest(...). 1057 ChangeStatus manifest(Attributor &A) override { 1058 return AAPointerInfo::manifest(A); 1059 } 1060 1061 bool forallInterferingAccesses( 1062 LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB) 1063 const override { 1064 return State::forallInterferingAccesses(LI, CB); 1065 } 1066 bool forallInterferingAccesses( 1067 StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB) 1068 const override { 1069 return State::forallInterferingAccesses(SI, CB); 1070 } 1071 1072 ChangeStatus translateAndAddCalleeState(Attributor &A, 1073 const AAPointerInfo &CalleeAA, 1074 int64_t CallArgOffset, CallBase &CB) { 1075 using namespace AA::PointerInfo; 1076 if (!CalleeAA.getState().isValidState() || !isValidState()) 1077 return indicatePessimisticFixpoint(); 1078 1079 const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA); 1080 bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr(); 1081 1082 // Combine the accesses bin by bin. 1083 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1084 for (auto &It : CalleeImplAA.getState()) { 1085 OffsetAndSize OAS = OffsetAndSize::getUnknown(); 1086 if (CallArgOffset != OffsetAndSize::Unknown) 1087 OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset, 1088 It.first.getSize()); 1089 Accesses &Bin = AccessBins[OAS]; 1090 for (const AAPointerInfo::Access &RAcc : It.second) { 1091 if (IsByval && !RAcc.isRead()) 1092 continue; 1093 bool UsedAssumedInformation = false; 1094 Optional<Value *> Content = A.translateArgumentToCallSiteContent( 1095 RAcc.getContent(), CB, *this, UsedAssumedInformation); 1096 AccessKind AK = 1097 AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ 1098 : AccessKind::AK_READ_WRITE)); 1099 Changed = 1100 Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK, 1101 RAcc.getType(), RAcc.getRemoteInst(), &Bin); 1102 } 1103 } 1104 return Changed; 1105 } 1106 1107 /// Statistic tracking for all AAPointerInfo implementations. 1108 /// See AbstractAttribute::trackStatistics(). 1109 void trackPointerInfoStatistics(const IRPosition &IRP) const {} 1110 }; 1111 1112 struct AAPointerInfoFloating : public AAPointerInfoImpl { 1113 using AccessKind = AAPointerInfo::AccessKind; 1114 AAPointerInfoFloating(const IRPosition &IRP, Attributor &A) 1115 : AAPointerInfoImpl(IRP, A) {} 1116 1117 /// See AbstractAttribute::initialize(...). 1118 void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); } 1119 1120 /// Deal with an access and signal if it was handled successfully. 1121 bool handleAccess(Attributor &A, Instruction &I, Value &Ptr, 1122 Optional<Value *> Content, AccessKind Kind, int64_t Offset, 1123 ChangeStatus &Changed, Type *Ty, 1124 int64_t Size = AA::PointerInfo::OffsetAndSize::Unknown) { 1125 using namespace AA::PointerInfo; 1126 // No need to find a size if one is given or the offset is unknown. 1127 if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown && 1128 Ty) { 1129 const DataLayout &DL = A.getDataLayout(); 1130 TypeSize AccessSize = DL.getTypeStoreSize(Ty); 1131 if (!AccessSize.isScalable()) 1132 Size = AccessSize.getFixedSize(); 1133 } 1134 Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty); 1135 return true; 1136 }; 1137 1138 /// Helper struct, will support ranges eventually. 1139 struct OffsetInfo { 1140 int64_t Offset = AA::PointerInfo::OffsetAndSize::Unknown; 1141 1142 bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; } 1143 }; 1144 1145 /// See AbstractAttribute::updateImpl(...). 1146 ChangeStatus updateImpl(Attributor &A) override { 1147 using namespace AA::PointerInfo; 1148 State S = getState(); 1149 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1150 Value &AssociatedValue = getAssociatedValue(); 1151 1152 const DataLayout &DL = A.getDataLayout(); 1153 DenseMap<Value *, OffsetInfo> OffsetInfoMap; 1154 OffsetInfoMap[&AssociatedValue] = OffsetInfo{0}; 1155 1156 auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI, 1157 bool &Follow) { 1158 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1159 UsrOI = PtrOI; 1160 Follow = true; 1161 return true; 1162 }; 1163 1164 const auto *TLI = getAnchorScope() 1165 ? A.getInfoCache().getTargetLibraryInfoForFunction( 1166 *getAnchorScope()) 1167 : nullptr; 1168 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 1169 Value *CurPtr = U.get(); 1170 User *Usr = U.getUser(); 1171 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " 1172 << *Usr << "\n"); 1173 1174 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1175 1176 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) { 1177 if (CE->isCast()) 1178 return HandlePassthroughUser(Usr, PtrOI, Follow); 1179 if (CE->isCompare()) 1180 return true; 1181 if (!CE->isGEPWithNoNotionalOverIndexing()) { 1182 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE 1183 << "\n"); 1184 return false; 1185 } 1186 } 1187 if (auto *GEP = dyn_cast<GEPOperator>(Usr)) { 1188 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1189 UsrOI = PtrOI; 1190 1191 // TODO: Use range information. 1192 if (PtrOI.Offset == OffsetAndSize::Unknown || 1193 !GEP->hasAllConstantIndices()) { 1194 UsrOI.Offset = OffsetAndSize::Unknown; 1195 Follow = true; 1196 return true; 1197 } 1198 1199 SmallVector<Value *, 8> Indices; 1200 for (Use &Idx : GEP->indices()) { 1201 if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) { 1202 Indices.push_back(CIdx); 1203 continue; 1204 } 1205 1206 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP 1207 << " : " << *Idx << "\n"); 1208 return false; 1209 } 1210 UsrOI.Offset = PtrOI.Offset + 1211 DL.getIndexedOffsetInType( 1212 CurPtr->getType()->getPointerElementType(), Indices); 1213 Follow = true; 1214 return true; 1215 } 1216 if (isa<CastInst>(Usr) || isa<SelectInst>(Usr)) 1217 return HandlePassthroughUser(Usr, PtrOI, Follow); 1218 1219 // For PHIs we need to take care of the recurrence explicitly as the value 1220 // might change while we iterate through a loop. For now, we give up if 1221 // the PHI is not invariant. 1222 if (isa<PHINode>(Usr)) { 1223 // Check if the PHI is invariant (so far). 1224 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1225 if (UsrOI == PtrOI) 1226 return true; 1227 1228 // Check if the PHI operand has already an unknown offset as we can't 1229 // improve on that anymore. 1230 if (PtrOI.Offset == OffsetAndSize::Unknown) { 1231 UsrOI = PtrOI; 1232 Follow = true; 1233 return true; 1234 } 1235 1236 // Check if the PHI operand is not dependent on the PHI itself. 1237 // TODO: This is not great as we look at the pointer type. However, it 1238 // is unclear where the Offset size comes from with typeless pointers. 1239 APInt Offset( 1240 DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()), 1241 0); 1242 if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets( 1243 DL, Offset, /* AllowNonInbounds */ true)) { 1244 if (Offset != PtrOI.Offset) { 1245 LLVM_DEBUG(dbgs() 1246 << "[AAPointerInfo] PHI operand pointer offset mismatch " 1247 << *CurPtr << " in " << *Usr << "\n"); 1248 return false; 1249 } 1250 return HandlePassthroughUser(Usr, PtrOI, Follow); 1251 } 1252 1253 // TODO: Approximate in case we know the direction of the recurrence. 1254 LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex " 1255 << *CurPtr << " in " << *Usr << "\n"); 1256 UsrOI = PtrOI; 1257 UsrOI.Offset = OffsetAndSize::Unknown; 1258 Follow = true; 1259 return true; 1260 } 1261 1262 if (auto *LoadI = dyn_cast<LoadInst>(Usr)) 1263 return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr, 1264 AccessKind::AK_READ, PtrOI.Offset, Changed, 1265 LoadI->getType()); 1266 if (auto *StoreI = dyn_cast<StoreInst>(Usr)) { 1267 if (StoreI->getValueOperand() == CurPtr) { 1268 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store " 1269 << *StoreI << "\n"); 1270 return false; 1271 } 1272 bool UsedAssumedInformation = false; 1273 Optional<Value *> Content = A.getAssumedSimplified( 1274 *StoreI->getValueOperand(), *this, UsedAssumedInformation); 1275 return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE, 1276 PtrOI.Offset, Changed, 1277 StoreI->getValueOperand()->getType()); 1278 } 1279 if (auto *CB = dyn_cast<CallBase>(Usr)) { 1280 if (CB->isLifetimeStartOrEnd()) 1281 return true; 1282 if (TLI && isFreeCall(CB, TLI)) 1283 return true; 1284 if (CB->isArgOperand(&U)) { 1285 unsigned ArgNo = CB->getArgOperandNo(&U); 1286 const auto &CSArgPI = A.getAAFor<AAPointerInfo>( 1287 *this, IRPosition::callsite_argument(*CB, ArgNo), 1288 DepClassTy::REQUIRED); 1289 Changed = translateAndAddCalleeState(A, CSArgPI, PtrOI.Offset, *CB) | 1290 Changed; 1291 return true; 1292 } 1293 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB 1294 << "\n"); 1295 // TODO: Allow some call uses 1296 return false; 1297 } 1298 1299 LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n"); 1300 return false; 1301 }; 1302 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { 1303 if (OffsetInfoMap.count(NewU)) 1304 return OffsetInfoMap[NewU] == OffsetInfoMap[OldU]; 1305 OffsetInfoMap[NewU] = OffsetInfoMap[OldU]; 1306 return true; 1307 }; 1308 if (!A.checkForAllUses(UsePred, *this, AssociatedValue, 1309 /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL, 1310 EquivalentUseCB)) 1311 return indicatePessimisticFixpoint(); 1312 1313 LLVM_DEBUG({ 1314 dbgs() << "Accesses by bin after update:\n"; 1315 for (auto &It : AccessBins) { 1316 dbgs() << "[" << It.first.getOffset() << "-" 1317 << It.first.getOffset() + It.first.getSize() 1318 << "] : " << It.getSecond().size() << "\n"; 1319 for (auto &Acc : It.getSecond()) { 1320 dbgs() << " - " << Acc.getKind() << " - " << *Acc.getLocalInst() 1321 << "\n"; 1322 if (Acc.getLocalInst() != Acc.getRemoteInst()) 1323 dbgs() << " --> " 1324 << *Acc.getRemoteInst() << "\n"; 1325 if (!Acc.isWrittenValueYetUndetermined()) 1326 dbgs() << " - " << Acc.getWrittenValue() << "\n"; 1327 } 1328 } 1329 }); 1330 1331 return Changed; 1332 } 1333 1334 /// See AbstractAttribute::trackStatistics() 1335 void trackStatistics() const override { 1336 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1337 } 1338 }; 1339 1340 struct AAPointerInfoReturned final : AAPointerInfoImpl { 1341 AAPointerInfoReturned(const IRPosition &IRP, Attributor &A) 1342 : AAPointerInfoImpl(IRP, A) {} 1343 1344 /// See AbstractAttribute::updateImpl(...). 1345 ChangeStatus updateImpl(Attributor &A) override { 1346 return indicatePessimisticFixpoint(); 1347 } 1348 1349 /// See AbstractAttribute::trackStatistics() 1350 void trackStatistics() const override { 1351 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1352 } 1353 }; 1354 1355 struct AAPointerInfoArgument final : AAPointerInfoFloating { 1356 AAPointerInfoArgument(const IRPosition &IRP, Attributor &A) 1357 : AAPointerInfoFloating(IRP, A) {} 1358 1359 /// See AbstractAttribute::initialize(...). 1360 void initialize(Attributor &A) override { 1361 AAPointerInfoFloating::initialize(A); 1362 if (getAnchorScope()->isDeclaration()) 1363 indicatePessimisticFixpoint(); 1364 } 1365 1366 /// See AbstractAttribute::trackStatistics() 1367 void trackStatistics() const override { 1368 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1369 } 1370 }; 1371 1372 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating { 1373 AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) 1374 : AAPointerInfoFloating(IRP, A) {} 1375 1376 /// See AbstractAttribute::updateImpl(...). 1377 ChangeStatus updateImpl(Attributor &A) override { 1378 using namespace AA::PointerInfo; 1379 // We handle memory intrinsics explicitly, at least the first (= 1380 // destination) and second (=source) arguments as we know how they are 1381 // accessed. 1382 if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) { 1383 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 1384 int64_t LengthVal = OffsetAndSize::Unknown; 1385 if (Length) 1386 LengthVal = Length->getSExtValue(); 1387 Value &Ptr = getAssociatedValue(); 1388 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 1389 ChangeStatus Changed; 1390 if (ArgNo == 0) { 1391 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed, 1392 nullptr, LengthVal); 1393 } else if (ArgNo == 1) { 1394 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed, 1395 nullptr, LengthVal); 1396 } else { 1397 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic " 1398 << *MI << "\n"); 1399 return indicatePessimisticFixpoint(); 1400 } 1401 return Changed; 1402 } 1403 1404 // TODO: Once we have call site specific value information we can provide 1405 // call site specific liveness information and then it makes 1406 // sense to specialize attributes for call sites arguments instead of 1407 // redirecting requests to the callee argument. 1408 Argument *Arg = getAssociatedArgument(); 1409 if (!Arg) 1410 return indicatePessimisticFixpoint(); 1411 const IRPosition &ArgPos = IRPosition::argument(*Arg); 1412 auto &ArgAA = 1413 A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED); 1414 return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI())); 1415 } 1416 1417 /// See AbstractAttribute::trackStatistics() 1418 void trackStatistics() const override { 1419 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1420 } 1421 }; 1422 1423 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating { 1424 AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) 1425 : AAPointerInfoFloating(IRP, A) {} 1426 1427 /// See AbstractAttribute::trackStatistics() 1428 void trackStatistics() const override { 1429 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1430 } 1431 }; 1432 1433 /// -----------------------NoUnwind Function Attribute-------------------------- 1434 1435 struct AANoUnwindImpl : AANoUnwind { 1436 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} 1437 1438 const std::string getAsStr() const override { 1439 return getAssumed() ? "nounwind" : "may-unwind"; 1440 } 1441 1442 /// See AbstractAttribute::updateImpl(...). 1443 ChangeStatus updateImpl(Attributor &A) override { 1444 auto Opcodes = { 1445 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 1446 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 1447 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 1448 1449 auto CheckForNoUnwind = [&](Instruction &I) { 1450 if (!I.mayThrow()) 1451 return true; 1452 1453 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1454 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( 1455 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 1456 return NoUnwindAA.isAssumedNoUnwind(); 1457 } 1458 return false; 1459 }; 1460 1461 bool UsedAssumedInformation = false; 1462 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes, 1463 UsedAssumedInformation)) 1464 return indicatePessimisticFixpoint(); 1465 1466 return ChangeStatus::UNCHANGED; 1467 } 1468 }; 1469 1470 struct AANoUnwindFunction final : public AANoUnwindImpl { 1471 AANoUnwindFunction(const IRPosition &IRP, Attributor &A) 1472 : AANoUnwindImpl(IRP, A) {} 1473 1474 /// See AbstractAttribute::trackStatistics() 1475 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } 1476 }; 1477 1478 /// NoUnwind attribute deduction for a call sites. 1479 struct AANoUnwindCallSite final : AANoUnwindImpl { 1480 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) 1481 : AANoUnwindImpl(IRP, A) {} 1482 1483 /// See AbstractAttribute::initialize(...). 1484 void initialize(Attributor &A) override { 1485 AANoUnwindImpl::initialize(A); 1486 Function *F = getAssociatedFunction(); 1487 if (!F || F->isDeclaration()) 1488 indicatePessimisticFixpoint(); 1489 } 1490 1491 /// See AbstractAttribute::updateImpl(...). 1492 ChangeStatus updateImpl(Attributor &A) override { 1493 // TODO: Once we have call site specific value information we can provide 1494 // call site specific liveness information and then it makes 1495 // sense to specialize attributes for call sites arguments instead of 1496 // redirecting requests to the callee argument. 1497 Function *F = getAssociatedFunction(); 1498 const IRPosition &FnPos = IRPosition::function(*F); 1499 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED); 1500 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1501 } 1502 1503 /// See AbstractAttribute::trackStatistics() 1504 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } 1505 }; 1506 1507 /// --------------------- Function Return Values ------------------------------- 1508 1509 /// "Attribute" that collects all potential returned values and the return 1510 /// instructions that they arise from. 1511 /// 1512 /// If there is a unique returned value R, the manifest method will: 1513 /// - mark R with the "returned" attribute, if R is an argument. 1514 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { 1515 1516 /// Mapping of values potentially returned by the associated function to the 1517 /// return instructions that might return them. 1518 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; 1519 1520 /// State flags 1521 /// 1522 ///{ 1523 bool IsFixed = false; 1524 bool IsValidState = true; 1525 ///} 1526 1527 public: 1528 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) 1529 : AAReturnedValues(IRP, A) {} 1530 1531 /// See AbstractAttribute::initialize(...). 1532 void initialize(Attributor &A) override { 1533 // Reset the state. 1534 IsFixed = false; 1535 IsValidState = true; 1536 ReturnedValues.clear(); 1537 1538 Function *F = getAssociatedFunction(); 1539 if (!F || F->isDeclaration()) { 1540 indicatePessimisticFixpoint(); 1541 return; 1542 } 1543 assert(!F->getReturnType()->isVoidTy() && 1544 "Did not expect a void return type!"); 1545 1546 // The map from instruction opcodes to those instructions in the function. 1547 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); 1548 1549 // Look through all arguments, if one is marked as returned we are done. 1550 for (Argument &Arg : F->args()) { 1551 if (Arg.hasReturnedAttr()) { 1552 auto &ReturnInstSet = ReturnedValues[&Arg]; 1553 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) 1554 for (Instruction *RI : *Insts) 1555 ReturnInstSet.insert(cast<ReturnInst>(RI)); 1556 1557 indicateOptimisticFixpoint(); 1558 return; 1559 } 1560 } 1561 1562 if (!A.isFunctionIPOAmendable(*F)) 1563 indicatePessimisticFixpoint(); 1564 } 1565 1566 /// See AbstractAttribute::manifest(...). 1567 ChangeStatus manifest(Attributor &A) override; 1568 1569 /// See AbstractAttribute::getState(...). 1570 AbstractState &getState() override { return *this; } 1571 1572 /// See AbstractAttribute::getState(...). 1573 const AbstractState &getState() const override { return *this; } 1574 1575 /// See AbstractAttribute::updateImpl(Attributor &A). 1576 ChangeStatus updateImpl(Attributor &A) override; 1577 1578 llvm::iterator_range<iterator> returned_values() override { 1579 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1580 } 1581 1582 llvm::iterator_range<const_iterator> returned_values() const override { 1583 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1584 } 1585 1586 /// Return the number of potential return values, -1 if unknown. 1587 size_t getNumReturnValues() const override { 1588 return isValidState() ? ReturnedValues.size() : -1; 1589 } 1590 1591 /// Return an assumed unique return value if a single candidate is found. If 1592 /// there cannot be one, return a nullptr. If it is not clear yet, return the 1593 /// Optional::NoneType. 1594 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 1595 1596 /// See AbstractState::checkForAllReturnedValues(...). 1597 bool checkForAllReturnedValuesAndReturnInsts( 1598 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1599 const override; 1600 1601 /// Pretty print the attribute similar to the IR representation. 1602 const std::string getAsStr() const override; 1603 1604 /// See AbstractState::isAtFixpoint(). 1605 bool isAtFixpoint() const override { return IsFixed; } 1606 1607 /// See AbstractState::isValidState(). 1608 bool isValidState() const override { return IsValidState; } 1609 1610 /// See AbstractState::indicateOptimisticFixpoint(...). 1611 ChangeStatus indicateOptimisticFixpoint() override { 1612 IsFixed = true; 1613 return ChangeStatus::UNCHANGED; 1614 } 1615 1616 ChangeStatus indicatePessimisticFixpoint() override { 1617 IsFixed = true; 1618 IsValidState = false; 1619 return ChangeStatus::CHANGED; 1620 } 1621 }; 1622 1623 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 1624 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1625 1626 // Bookkeeping. 1627 assert(isValidState()); 1628 STATS_DECLTRACK(KnownReturnValues, FunctionReturn, 1629 "Number of function with known return values"); 1630 1631 // Check if we have an assumed unique return value that we could manifest. 1632 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); 1633 1634 if (!UniqueRV.hasValue() || !UniqueRV.getValue()) 1635 return Changed; 1636 1637 // Bookkeeping. 1638 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, 1639 "Number of function with unique return"); 1640 // If the assumed unique return value is an argument, annotate it. 1641 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) { 1642 if (UniqueRVArg->getType()->canLosslesslyBitCastTo( 1643 getAssociatedFunction()->getReturnType())) { 1644 getIRPosition() = IRPosition::argument(*UniqueRVArg); 1645 Changed = IRAttribute::manifest(A); 1646 } 1647 } 1648 return Changed; 1649 } 1650 1651 const std::string AAReturnedValuesImpl::getAsStr() const { 1652 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 1653 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")"; 1654 } 1655 1656 Optional<Value *> 1657 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { 1658 // If checkForAllReturnedValues provides a unique value, ignoring potential 1659 // undef values that can also be present, it is assumed to be the actual 1660 // return value and forwarded to the caller of this method. If there are 1661 // multiple, a nullptr is returned indicating there cannot be a unique 1662 // returned value. 1663 Optional<Value *> UniqueRV; 1664 Type *Ty = getAssociatedFunction()->getReturnType(); 1665 1666 auto Pred = [&](Value &RV) -> bool { 1667 UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty); 1668 return UniqueRV != Optional<Value *>(nullptr); 1669 }; 1670 1671 if (!A.checkForAllReturnedValues(Pred, *this)) 1672 UniqueRV = nullptr; 1673 1674 return UniqueRV; 1675 } 1676 1677 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( 1678 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1679 const { 1680 if (!isValidState()) 1681 return false; 1682 1683 // Check all returned values but ignore call sites as long as we have not 1684 // encountered an overdefined one during an update. 1685 for (auto &It : ReturnedValues) { 1686 Value *RV = It.first; 1687 if (!Pred(*RV, It.second)) 1688 return false; 1689 } 1690 1691 return true; 1692 } 1693 1694 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 1695 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1696 1697 auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret, 1698 bool) -> bool { 1699 bool UsedAssumedInformation = false; 1700 Optional<Value *> SimpleRetVal = 1701 A.getAssumedSimplified(V, *this, UsedAssumedInformation); 1702 if (!SimpleRetVal.hasValue()) 1703 return true; 1704 if (!SimpleRetVal.getValue()) 1705 return false; 1706 Value *RetVal = *SimpleRetVal; 1707 assert(AA::isValidInScope(*RetVal, Ret.getFunction()) && 1708 "Assumed returned value should be valid in function scope!"); 1709 if (ReturnedValues[RetVal].insert(&Ret)) 1710 Changed = ChangeStatus::CHANGED; 1711 return true; 1712 }; 1713 1714 auto ReturnInstCB = [&](Instruction &I) { 1715 ReturnInst &Ret = cast<ReturnInst>(I); 1716 return genericValueTraversal<ReturnInst>( 1717 A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB, 1718 &I); 1719 }; 1720 1721 // Discover returned values from all live returned instructions in the 1722 // associated function. 1723 bool UsedAssumedInformation = false; 1724 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret}, 1725 UsedAssumedInformation)) 1726 return indicatePessimisticFixpoint(); 1727 return Changed; 1728 } 1729 1730 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { 1731 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) 1732 : AAReturnedValuesImpl(IRP, A) {} 1733 1734 /// See AbstractAttribute::trackStatistics() 1735 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } 1736 }; 1737 1738 /// Returned values information for a call sites. 1739 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { 1740 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) 1741 : AAReturnedValuesImpl(IRP, A) {} 1742 1743 /// See AbstractAttribute::initialize(...). 1744 void initialize(Attributor &A) override { 1745 // TODO: Once we have call site specific value information we can provide 1746 // call site specific liveness information and then it makes 1747 // sense to specialize attributes for call sites instead of 1748 // redirecting requests to the callee. 1749 llvm_unreachable("Abstract attributes for returned values are not " 1750 "supported for call sites yet!"); 1751 } 1752 1753 /// See AbstractAttribute::updateImpl(...). 1754 ChangeStatus updateImpl(Attributor &A) override { 1755 return indicatePessimisticFixpoint(); 1756 } 1757 1758 /// See AbstractAttribute::trackStatistics() 1759 void trackStatistics() const override {} 1760 }; 1761 1762 /// ------------------------ NoSync Function Attribute ------------------------- 1763 1764 struct AANoSyncImpl : AANoSync { 1765 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} 1766 1767 const std::string getAsStr() const override { 1768 return getAssumed() ? "nosync" : "may-sync"; 1769 } 1770 1771 /// See AbstractAttribute::updateImpl(...). 1772 ChangeStatus updateImpl(Attributor &A) override; 1773 1774 /// Helper function used to determine whether an instruction is non-relaxed 1775 /// atomic. In other words, if an atomic instruction does not have unordered 1776 /// or monotonic ordering 1777 static bool isNonRelaxedAtomic(Instruction *I); 1778 1779 /// Helper function specific for intrinsics which are potentially volatile 1780 static bool isNoSyncIntrinsic(Instruction *I); 1781 }; 1782 1783 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) { 1784 if (!I->isAtomic()) 1785 return false; 1786 1787 if (auto *FI = dyn_cast<FenceInst>(I)) 1788 // All legal orderings for fence are stronger than monotonic. 1789 return FI->getSyncScopeID() != SyncScope::SingleThread; 1790 else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) { 1791 // Unordered is not a legal ordering for cmpxchg. 1792 return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic || 1793 AI->getFailureOrdering() != AtomicOrdering::Monotonic); 1794 } 1795 1796 AtomicOrdering Ordering; 1797 switch (I->getOpcode()) { 1798 case Instruction::AtomicRMW: 1799 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 1800 break; 1801 case Instruction::Store: 1802 Ordering = cast<StoreInst>(I)->getOrdering(); 1803 break; 1804 case Instruction::Load: 1805 Ordering = cast<LoadInst>(I)->getOrdering(); 1806 break; 1807 default: 1808 llvm_unreachable( 1809 "New atomic operations need to be known in the attributor."); 1810 } 1811 1812 return (Ordering != AtomicOrdering::Unordered && 1813 Ordering != AtomicOrdering::Monotonic); 1814 } 1815 1816 /// Return true if this intrinsic is nosync. This is only used for intrinsics 1817 /// which would be nosync except that they have a volatile flag. All other 1818 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td. 1819 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) { 1820 if (auto *MI = dyn_cast<MemIntrinsic>(I)) 1821 return !MI->isVolatile(); 1822 return false; 1823 } 1824 1825 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { 1826 1827 auto CheckRWInstForNoSync = [&](Instruction &I) { 1828 /// We are looking for volatile instructions or Non-Relaxed atomics. 1829 1830 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1831 if (CB->hasFnAttr(Attribute::NoSync)) 1832 return true; 1833 1834 if (isNoSyncIntrinsic(&I)) 1835 return true; 1836 1837 const auto &NoSyncAA = A.getAAFor<AANoSync>( 1838 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 1839 return NoSyncAA.isAssumedNoSync(); 1840 } 1841 1842 if (!I.isVolatile() && !isNonRelaxedAtomic(&I)) 1843 return true; 1844 1845 return false; 1846 }; 1847 1848 auto CheckForNoSync = [&](Instruction &I) { 1849 // At this point we handled all read/write effects and they are all 1850 // nosync, so they can be skipped. 1851 if (I.mayReadOrWriteMemory()) 1852 return true; 1853 1854 // non-convergent and readnone imply nosync. 1855 return !cast<CallBase>(I).isConvergent(); 1856 }; 1857 1858 bool UsedAssumedInformation = false; 1859 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this, 1860 UsedAssumedInformation) || 1861 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this, 1862 UsedAssumedInformation)) 1863 return indicatePessimisticFixpoint(); 1864 1865 return ChangeStatus::UNCHANGED; 1866 } 1867 1868 struct AANoSyncFunction final : public AANoSyncImpl { 1869 AANoSyncFunction(const IRPosition &IRP, Attributor &A) 1870 : AANoSyncImpl(IRP, A) {} 1871 1872 /// See AbstractAttribute::trackStatistics() 1873 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } 1874 }; 1875 1876 /// NoSync attribute deduction for a call sites. 1877 struct AANoSyncCallSite final : AANoSyncImpl { 1878 AANoSyncCallSite(const IRPosition &IRP, Attributor &A) 1879 : AANoSyncImpl(IRP, A) {} 1880 1881 /// See AbstractAttribute::initialize(...). 1882 void initialize(Attributor &A) override { 1883 AANoSyncImpl::initialize(A); 1884 Function *F = getAssociatedFunction(); 1885 if (!F || F->isDeclaration()) 1886 indicatePessimisticFixpoint(); 1887 } 1888 1889 /// See AbstractAttribute::updateImpl(...). 1890 ChangeStatus updateImpl(Attributor &A) override { 1891 // TODO: Once we have call site specific value information we can provide 1892 // call site specific liveness information and then it makes 1893 // sense to specialize attributes for call sites arguments instead of 1894 // redirecting requests to the callee argument. 1895 Function *F = getAssociatedFunction(); 1896 const IRPosition &FnPos = IRPosition::function(*F); 1897 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED); 1898 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1899 } 1900 1901 /// See AbstractAttribute::trackStatistics() 1902 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } 1903 }; 1904 1905 /// ------------------------ No-Free Attributes ---------------------------- 1906 1907 struct AANoFreeImpl : public AANoFree { 1908 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} 1909 1910 /// See AbstractAttribute::updateImpl(...). 1911 ChangeStatus updateImpl(Attributor &A) override { 1912 auto CheckForNoFree = [&](Instruction &I) { 1913 const auto &CB = cast<CallBase>(I); 1914 if (CB.hasFnAttr(Attribute::NoFree)) 1915 return true; 1916 1917 const auto &NoFreeAA = A.getAAFor<AANoFree>( 1918 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 1919 return NoFreeAA.isAssumedNoFree(); 1920 }; 1921 1922 bool UsedAssumedInformation = false; 1923 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this, 1924 UsedAssumedInformation)) 1925 return indicatePessimisticFixpoint(); 1926 return ChangeStatus::UNCHANGED; 1927 } 1928 1929 /// See AbstractAttribute::getAsStr(). 1930 const std::string getAsStr() const override { 1931 return getAssumed() ? "nofree" : "may-free"; 1932 } 1933 }; 1934 1935 struct AANoFreeFunction final : public AANoFreeImpl { 1936 AANoFreeFunction(const IRPosition &IRP, Attributor &A) 1937 : AANoFreeImpl(IRP, A) {} 1938 1939 /// See AbstractAttribute::trackStatistics() 1940 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } 1941 }; 1942 1943 /// NoFree attribute deduction for a call sites. 1944 struct AANoFreeCallSite final : AANoFreeImpl { 1945 AANoFreeCallSite(const IRPosition &IRP, Attributor &A) 1946 : AANoFreeImpl(IRP, A) {} 1947 1948 /// See AbstractAttribute::initialize(...). 1949 void initialize(Attributor &A) override { 1950 AANoFreeImpl::initialize(A); 1951 Function *F = getAssociatedFunction(); 1952 if (!F || F->isDeclaration()) 1953 indicatePessimisticFixpoint(); 1954 } 1955 1956 /// See AbstractAttribute::updateImpl(...). 1957 ChangeStatus updateImpl(Attributor &A) override { 1958 // TODO: Once we have call site specific value information we can provide 1959 // call site specific liveness information and then it makes 1960 // sense to specialize attributes for call sites arguments instead of 1961 // redirecting requests to the callee argument. 1962 Function *F = getAssociatedFunction(); 1963 const IRPosition &FnPos = IRPosition::function(*F); 1964 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED); 1965 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1966 } 1967 1968 /// See AbstractAttribute::trackStatistics() 1969 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } 1970 }; 1971 1972 /// NoFree attribute for floating values. 1973 struct AANoFreeFloating : AANoFreeImpl { 1974 AANoFreeFloating(const IRPosition &IRP, Attributor &A) 1975 : AANoFreeImpl(IRP, A) {} 1976 1977 /// See AbstractAttribute::trackStatistics() 1978 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} 1979 1980 /// See Abstract Attribute::updateImpl(...). 1981 ChangeStatus updateImpl(Attributor &A) override { 1982 const IRPosition &IRP = getIRPosition(); 1983 1984 const auto &NoFreeAA = A.getAAFor<AANoFree>( 1985 *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL); 1986 if (NoFreeAA.isAssumedNoFree()) 1987 return ChangeStatus::UNCHANGED; 1988 1989 Value &AssociatedValue = getIRPosition().getAssociatedValue(); 1990 auto Pred = [&](const Use &U, bool &Follow) -> bool { 1991 Instruction *UserI = cast<Instruction>(U.getUser()); 1992 if (auto *CB = dyn_cast<CallBase>(UserI)) { 1993 if (CB->isBundleOperand(&U)) 1994 return false; 1995 if (!CB->isArgOperand(&U)) 1996 return true; 1997 unsigned ArgNo = CB->getArgOperandNo(&U); 1998 1999 const auto &NoFreeArg = A.getAAFor<AANoFree>( 2000 *this, IRPosition::callsite_argument(*CB, ArgNo), 2001 DepClassTy::REQUIRED); 2002 return NoFreeArg.isAssumedNoFree(); 2003 } 2004 2005 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 2006 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 2007 Follow = true; 2008 return true; 2009 } 2010 if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) || 2011 isa<ReturnInst>(UserI)) 2012 return true; 2013 2014 // Unknown user. 2015 return false; 2016 }; 2017 if (!A.checkForAllUses(Pred, *this, AssociatedValue)) 2018 return indicatePessimisticFixpoint(); 2019 2020 return ChangeStatus::UNCHANGED; 2021 } 2022 }; 2023 2024 /// NoFree attribute for a call site argument. 2025 struct AANoFreeArgument final : AANoFreeFloating { 2026 AANoFreeArgument(const IRPosition &IRP, Attributor &A) 2027 : AANoFreeFloating(IRP, A) {} 2028 2029 /// See AbstractAttribute::trackStatistics() 2030 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } 2031 }; 2032 2033 /// NoFree attribute for call site arguments. 2034 struct AANoFreeCallSiteArgument final : AANoFreeFloating { 2035 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) 2036 : AANoFreeFloating(IRP, A) {} 2037 2038 /// See AbstractAttribute::updateImpl(...). 2039 ChangeStatus updateImpl(Attributor &A) override { 2040 // TODO: Once we have call site specific value information we can provide 2041 // call site specific liveness information and then it makes 2042 // sense to specialize attributes for call sites arguments instead of 2043 // redirecting requests to the callee argument. 2044 Argument *Arg = getAssociatedArgument(); 2045 if (!Arg) 2046 return indicatePessimisticFixpoint(); 2047 const IRPosition &ArgPos = IRPosition::argument(*Arg); 2048 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED); 2049 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 2050 } 2051 2052 /// See AbstractAttribute::trackStatistics() 2053 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)}; 2054 }; 2055 2056 /// NoFree attribute for function return value. 2057 struct AANoFreeReturned final : AANoFreeFloating { 2058 AANoFreeReturned(const IRPosition &IRP, Attributor &A) 2059 : AANoFreeFloating(IRP, A) { 2060 llvm_unreachable("NoFree is not applicable to function returns!"); 2061 } 2062 2063 /// See AbstractAttribute::initialize(...). 2064 void initialize(Attributor &A) override { 2065 llvm_unreachable("NoFree is not applicable to function returns!"); 2066 } 2067 2068 /// See AbstractAttribute::updateImpl(...). 2069 ChangeStatus updateImpl(Attributor &A) override { 2070 llvm_unreachable("NoFree is not applicable to function returns!"); 2071 } 2072 2073 /// See AbstractAttribute::trackStatistics() 2074 void trackStatistics() const override {} 2075 }; 2076 2077 /// NoFree attribute deduction for a call site return value. 2078 struct AANoFreeCallSiteReturned final : AANoFreeFloating { 2079 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) 2080 : AANoFreeFloating(IRP, A) {} 2081 2082 ChangeStatus manifest(Attributor &A) override { 2083 return ChangeStatus::UNCHANGED; 2084 } 2085 /// See AbstractAttribute::trackStatistics() 2086 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } 2087 }; 2088 2089 /// ------------------------ NonNull Argument Attribute ------------------------ 2090 static int64_t getKnownNonNullAndDerefBytesForUse( 2091 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, 2092 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { 2093 TrackUse = false; 2094 2095 const Value *UseV = U->get(); 2096 if (!UseV->getType()->isPointerTy()) 2097 return 0; 2098 2099 // We need to follow common pointer manipulation uses to the accesses they 2100 // feed into. We can try to be smart to avoid looking through things we do not 2101 // like for now, e.g., non-inbounds GEPs. 2102 if (isa<CastInst>(I)) { 2103 TrackUse = true; 2104 return 0; 2105 } 2106 2107 if (isa<GetElementPtrInst>(I)) { 2108 TrackUse = true; 2109 return 0; 2110 } 2111 2112 Type *PtrTy = UseV->getType(); 2113 const Function *F = I->getFunction(); 2114 bool NullPointerIsDefined = 2115 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; 2116 const DataLayout &DL = A.getInfoCache().getDL(); 2117 if (const auto *CB = dyn_cast<CallBase>(I)) { 2118 if (CB->isBundleOperand(U)) { 2119 if (RetainedKnowledge RK = getKnowledgeFromUse( 2120 U, {Attribute::NonNull, Attribute::Dereferenceable})) { 2121 IsNonNull |= 2122 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); 2123 return RK.ArgValue; 2124 } 2125 return 0; 2126 } 2127 2128 if (CB->isCallee(U)) { 2129 IsNonNull |= !NullPointerIsDefined; 2130 return 0; 2131 } 2132 2133 unsigned ArgNo = CB->getArgOperandNo(U); 2134 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 2135 // As long as we only use known information there is no need to track 2136 // dependences here. 2137 auto &DerefAA = 2138 A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE); 2139 IsNonNull |= DerefAA.isKnownNonNull(); 2140 return DerefAA.getKnownDereferenceableBytes(); 2141 } 2142 2143 int64_t Offset; 2144 const Value *Base = 2145 getMinimalBaseOfAccessPointerOperand(A, QueryingAA, I, Offset, DL); 2146 if (Base) { 2147 if (Base == &AssociatedValue && 2148 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 2149 int64_t DerefBytes = 2150 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset; 2151 2152 IsNonNull |= !NullPointerIsDefined; 2153 return std::max(int64_t(0), DerefBytes); 2154 } 2155 } 2156 2157 /// Corner case when an offset is 0. 2158 Base = getBasePointerOfAccessPointerOperand(I, Offset, DL, 2159 /*AllowNonInbounds*/ true); 2160 if (Base) { 2161 if (Offset == 0 && Base == &AssociatedValue && 2162 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 2163 int64_t DerefBytes = 2164 (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()); 2165 IsNonNull |= !NullPointerIsDefined; 2166 return std::max(int64_t(0), DerefBytes); 2167 } 2168 } 2169 2170 return 0; 2171 } 2172 2173 struct AANonNullImpl : AANonNull { 2174 AANonNullImpl(const IRPosition &IRP, Attributor &A) 2175 : AANonNull(IRP, A), 2176 NullIsDefined(NullPointerIsDefined( 2177 getAnchorScope(), 2178 getAssociatedValue().getType()->getPointerAddressSpace())) {} 2179 2180 /// See AbstractAttribute::initialize(...). 2181 void initialize(Attributor &A) override { 2182 Value &V = getAssociatedValue(); 2183 if (!NullIsDefined && 2184 hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, 2185 /* IgnoreSubsumingPositions */ false, &A)) { 2186 indicateOptimisticFixpoint(); 2187 return; 2188 } 2189 2190 if (isa<ConstantPointerNull>(V)) { 2191 indicatePessimisticFixpoint(); 2192 return; 2193 } 2194 2195 AANonNull::initialize(A); 2196 2197 bool CanBeNull, CanBeFreed; 2198 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull, 2199 CanBeFreed)) { 2200 if (!CanBeNull) { 2201 indicateOptimisticFixpoint(); 2202 return; 2203 } 2204 } 2205 2206 if (isa<GlobalValue>(&getAssociatedValue())) { 2207 indicatePessimisticFixpoint(); 2208 return; 2209 } 2210 2211 if (Instruction *CtxI = getCtxI()) 2212 followUsesInMBEC(*this, A, getState(), *CtxI); 2213 } 2214 2215 /// See followUsesInMBEC 2216 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 2217 AANonNull::StateType &State) { 2218 bool IsNonNull = false; 2219 bool TrackUse = false; 2220 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, 2221 IsNonNull, TrackUse); 2222 State.setKnown(IsNonNull); 2223 return TrackUse; 2224 } 2225 2226 /// See AbstractAttribute::getAsStr(). 2227 const std::string getAsStr() const override { 2228 return getAssumed() ? "nonnull" : "may-null"; 2229 } 2230 2231 /// Flag to determine if the underlying value can be null and still allow 2232 /// valid accesses. 2233 const bool NullIsDefined; 2234 }; 2235 2236 /// NonNull attribute for a floating value. 2237 struct AANonNullFloating : public AANonNullImpl { 2238 AANonNullFloating(const IRPosition &IRP, Attributor &A) 2239 : AANonNullImpl(IRP, A) {} 2240 2241 /// See AbstractAttribute::updateImpl(...). 2242 ChangeStatus updateImpl(Attributor &A) override { 2243 const DataLayout &DL = A.getDataLayout(); 2244 2245 DominatorTree *DT = nullptr; 2246 AssumptionCache *AC = nullptr; 2247 InformationCache &InfoCache = A.getInfoCache(); 2248 if (const Function *Fn = getAnchorScope()) { 2249 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); 2250 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); 2251 } 2252 2253 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 2254 AANonNull::StateType &T, bool Stripped) -> bool { 2255 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V), 2256 DepClassTy::REQUIRED); 2257 if (!Stripped && this == &AA) { 2258 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) 2259 T.indicatePessimisticFixpoint(); 2260 } else { 2261 // Use abstract attribute information. 2262 const AANonNull::StateType &NS = AA.getState(); 2263 T ^= NS; 2264 } 2265 return T.isValidState(); 2266 }; 2267 2268 StateType T; 2269 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 2270 VisitValueCB, getCtxI())) 2271 return indicatePessimisticFixpoint(); 2272 2273 return clampStateAndIndicateChange(getState(), T); 2274 } 2275 2276 /// See AbstractAttribute::trackStatistics() 2277 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2278 }; 2279 2280 /// NonNull attribute for function return value. 2281 struct AANonNullReturned final 2282 : AAReturnedFromReturnedValues<AANonNull, AANonNull> { 2283 AANonNullReturned(const IRPosition &IRP, Attributor &A) 2284 : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {} 2285 2286 /// See AbstractAttribute::getAsStr(). 2287 const std::string getAsStr() const override { 2288 return getAssumed() ? "nonnull" : "may-null"; 2289 } 2290 2291 /// See AbstractAttribute::trackStatistics() 2292 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2293 }; 2294 2295 /// NonNull attribute for function argument. 2296 struct AANonNullArgument final 2297 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { 2298 AANonNullArgument(const IRPosition &IRP, Attributor &A) 2299 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} 2300 2301 /// See AbstractAttribute::trackStatistics() 2302 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } 2303 }; 2304 2305 struct AANonNullCallSiteArgument final : AANonNullFloating { 2306 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) 2307 : AANonNullFloating(IRP, A) {} 2308 2309 /// See AbstractAttribute::trackStatistics() 2310 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } 2311 }; 2312 2313 /// NonNull attribute for a call site return position. 2314 struct AANonNullCallSiteReturned final 2315 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { 2316 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) 2317 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} 2318 2319 /// See AbstractAttribute::trackStatistics() 2320 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } 2321 }; 2322 2323 /// ------------------------ No-Recurse Attributes ---------------------------- 2324 2325 struct AANoRecurseImpl : public AANoRecurse { 2326 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} 2327 2328 /// See AbstractAttribute::getAsStr() 2329 const std::string getAsStr() const override { 2330 return getAssumed() ? "norecurse" : "may-recurse"; 2331 } 2332 }; 2333 2334 struct AANoRecurseFunction final : AANoRecurseImpl { 2335 AANoRecurseFunction(const IRPosition &IRP, Attributor &A) 2336 : AANoRecurseImpl(IRP, A) {} 2337 2338 /// See AbstractAttribute::initialize(...). 2339 void initialize(Attributor &A) override { 2340 AANoRecurseImpl::initialize(A); 2341 // TODO: We should build a call graph ourselves to enable this in the module 2342 // pass as well. 2343 if (const Function *F = getAnchorScope()) 2344 if (A.getInfoCache().getSccSize(*F) != 1) 2345 indicatePessimisticFixpoint(); 2346 } 2347 2348 /// See AbstractAttribute::updateImpl(...). 2349 ChangeStatus updateImpl(Attributor &A) override { 2350 2351 // If all live call sites are known to be no-recurse, we are as well. 2352 auto CallSitePred = [&](AbstractCallSite ACS) { 2353 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 2354 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 2355 DepClassTy::NONE); 2356 return NoRecurseAA.isKnownNoRecurse(); 2357 }; 2358 bool AllCallSitesKnown; 2359 if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) { 2360 // If we know all call sites and all are known no-recurse, we are done. 2361 // If all known call sites, which might not be all that exist, are known 2362 // to be no-recurse, we are not done but we can continue to assume 2363 // no-recurse. If one of the call sites we have not visited will become 2364 // live, another update is triggered. 2365 if (AllCallSitesKnown) 2366 indicateOptimisticFixpoint(); 2367 return ChangeStatus::UNCHANGED; 2368 } 2369 2370 // If the above check does not hold anymore we look at the calls. 2371 auto CheckForNoRecurse = [&](Instruction &I) { 2372 const auto &CB = cast<CallBase>(I); 2373 if (CB.hasFnAttr(Attribute::NoRecurse)) 2374 return true; 2375 2376 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 2377 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 2378 if (!NoRecurseAA.isAssumedNoRecurse()) 2379 return false; 2380 2381 // Recursion to the same function 2382 if (CB.getCalledFunction() == getAnchorScope()) 2383 return false; 2384 2385 return true; 2386 }; 2387 2388 bool UsedAssumedInformation = false; 2389 if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this, 2390 UsedAssumedInformation)) 2391 return indicatePessimisticFixpoint(); 2392 return ChangeStatus::UNCHANGED; 2393 } 2394 2395 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } 2396 }; 2397 2398 /// NoRecurse attribute deduction for a call sites. 2399 struct AANoRecurseCallSite final : AANoRecurseImpl { 2400 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) 2401 : AANoRecurseImpl(IRP, A) {} 2402 2403 /// See AbstractAttribute::initialize(...). 2404 void initialize(Attributor &A) override { 2405 AANoRecurseImpl::initialize(A); 2406 Function *F = getAssociatedFunction(); 2407 if (!F || F->isDeclaration()) 2408 indicatePessimisticFixpoint(); 2409 } 2410 2411 /// See AbstractAttribute::updateImpl(...). 2412 ChangeStatus updateImpl(Attributor &A) override { 2413 // TODO: Once we have call site specific value information we can provide 2414 // call site specific liveness information and then it makes 2415 // sense to specialize attributes for call sites arguments instead of 2416 // redirecting requests to the callee argument. 2417 Function *F = getAssociatedFunction(); 2418 const IRPosition &FnPos = IRPosition::function(*F); 2419 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED); 2420 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2421 } 2422 2423 /// See AbstractAttribute::trackStatistics() 2424 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } 2425 }; 2426 2427 /// -------------------- Undefined-Behavior Attributes ------------------------ 2428 2429 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { 2430 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) 2431 : AAUndefinedBehavior(IRP, A) {} 2432 2433 /// See AbstractAttribute::updateImpl(...). 2434 // through a pointer (i.e. also branches etc.) 2435 ChangeStatus updateImpl(Attributor &A) override { 2436 const size_t UBPrevSize = KnownUBInsts.size(); 2437 const size_t NoUBPrevSize = AssumedNoUBInsts.size(); 2438 2439 auto InspectMemAccessInstForUB = [&](Instruction &I) { 2440 // Lang ref now states volatile store is not UB, let's skip them. 2441 if (I.isVolatile() && I.mayWriteToMemory()) 2442 return true; 2443 2444 // Skip instructions that are already saved. 2445 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2446 return true; 2447 2448 // If we reach here, we know we have an instruction 2449 // that accesses memory through a pointer operand, 2450 // for which getPointerOperand() should give it to us. 2451 Value *PtrOp = 2452 const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true)); 2453 assert(PtrOp && 2454 "Expected pointer operand of memory accessing instruction"); 2455 2456 // Either we stopped and the appropriate action was taken, 2457 // or we got back a simplified value to continue. 2458 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); 2459 if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue()) 2460 return true; 2461 const Value *PtrOpVal = SimplifiedPtrOp.getValue(); 2462 2463 // A memory access through a pointer is considered UB 2464 // only if the pointer has constant null value. 2465 // TODO: Expand it to not only check constant values. 2466 if (!isa<ConstantPointerNull>(PtrOpVal)) { 2467 AssumedNoUBInsts.insert(&I); 2468 return true; 2469 } 2470 const Type *PtrTy = PtrOpVal->getType(); 2471 2472 // Because we only consider instructions inside functions, 2473 // assume that a parent function exists. 2474 const Function *F = I.getFunction(); 2475 2476 // A memory access using constant null pointer is only considered UB 2477 // if null pointer is _not_ defined for the target platform. 2478 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) 2479 AssumedNoUBInsts.insert(&I); 2480 else 2481 KnownUBInsts.insert(&I); 2482 return true; 2483 }; 2484 2485 auto InspectBrInstForUB = [&](Instruction &I) { 2486 // A conditional branch instruction is considered UB if it has `undef` 2487 // condition. 2488 2489 // Skip instructions that are already saved. 2490 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2491 return true; 2492 2493 // We know we have a branch instruction. 2494 auto *BrInst = cast<BranchInst>(&I); 2495 2496 // Unconditional branches are never considered UB. 2497 if (BrInst->isUnconditional()) 2498 return true; 2499 2500 // Either we stopped and the appropriate action was taken, 2501 // or we got back a simplified value to continue. 2502 Optional<Value *> SimplifiedCond = 2503 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); 2504 if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue()) 2505 return true; 2506 AssumedNoUBInsts.insert(&I); 2507 return true; 2508 }; 2509 2510 auto InspectCallSiteForUB = [&](Instruction &I) { 2511 // Check whether a callsite always cause UB or not 2512 2513 // Skip instructions that are already saved. 2514 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2515 return true; 2516 2517 // Check nonnull and noundef argument attribute violation for each 2518 // callsite. 2519 CallBase &CB = cast<CallBase>(I); 2520 Function *Callee = CB.getCalledFunction(); 2521 if (!Callee) 2522 return true; 2523 for (unsigned idx = 0; idx < CB.arg_size(); idx++) { 2524 // If current argument is known to be simplified to null pointer and the 2525 // corresponding argument position is known to have nonnull attribute, 2526 // the argument is poison. Furthermore, if the argument is poison and 2527 // the position is known to have noundef attriubte, this callsite is 2528 // considered UB. 2529 if (idx >= Callee->arg_size()) 2530 break; 2531 Value *ArgVal = CB.getArgOperand(idx); 2532 if (!ArgVal) 2533 continue; 2534 // Here, we handle three cases. 2535 // (1) Not having a value means it is dead. (we can replace the value 2536 // with undef) 2537 // (2) Simplified to undef. The argument violate noundef attriubte. 2538 // (3) Simplified to null pointer where known to be nonnull. 2539 // The argument is a poison value and violate noundef attribute. 2540 IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx); 2541 auto &NoUndefAA = 2542 A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2543 if (!NoUndefAA.isKnownNoUndef()) 2544 continue; 2545 bool UsedAssumedInformation = false; 2546 Optional<Value *> SimplifiedVal = A.getAssumedSimplified( 2547 IRPosition::value(*ArgVal), *this, UsedAssumedInformation); 2548 if (UsedAssumedInformation) 2549 continue; 2550 if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue()) 2551 return true; 2552 if (!SimplifiedVal.hasValue() || 2553 isa<UndefValue>(*SimplifiedVal.getValue())) { 2554 KnownUBInsts.insert(&I); 2555 continue; 2556 } 2557 if (!ArgVal->getType()->isPointerTy() || 2558 !isa<ConstantPointerNull>(*SimplifiedVal.getValue())) 2559 continue; 2560 auto &NonNullAA = 2561 A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2562 if (NonNullAA.isKnownNonNull()) 2563 KnownUBInsts.insert(&I); 2564 } 2565 return true; 2566 }; 2567 2568 auto InspectReturnInstForUB = 2569 [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) { 2570 // Check if a return instruction always cause UB or not 2571 // Note: It is guaranteed that the returned position of the anchor 2572 // scope has noundef attribute when this is called. 2573 // We also ensure the return position is not "assumed dead" 2574 // because the returned value was then potentially simplified to 2575 // `undef` in AAReturnedValues without removing the `noundef` 2576 // attribute yet. 2577 2578 // When the returned position has noundef attriubte, UB occur in the 2579 // following cases. 2580 // (1) Returned value is known to be undef. 2581 // (2) The value is known to be a null pointer and the returned 2582 // position has nonnull attribute (because the returned value is 2583 // poison). 2584 bool FoundUB = false; 2585 if (isa<UndefValue>(V)) { 2586 FoundUB = true; 2587 } else { 2588 if (isa<ConstantPointerNull>(V)) { 2589 auto &NonNullAA = A.getAAFor<AANonNull>( 2590 *this, IRPosition::returned(*getAnchorScope()), 2591 DepClassTy::NONE); 2592 if (NonNullAA.isKnownNonNull()) 2593 FoundUB = true; 2594 } 2595 } 2596 2597 if (FoundUB) 2598 for (ReturnInst *RI : RetInsts) 2599 KnownUBInsts.insert(RI); 2600 return true; 2601 }; 2602 2603 bool UsedAssumedInformation = false; 2604 A.checkForAllInstructions(InspectMemAccessInstForUB, *this, 2605 {Instruction::Load, Instruction::Store, 2606 Instruction::AtomicCmpXchg, 2607 Instruction::AtomicRMW}, 2608 UsedAssumedInformation, 2609 /* CheckBBLivenessOnly */ true); 2610 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, 2611 UsedAssumedInformation, 2612 /* CheckBBLivenessOnly */ true); 2613 A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this, 2614 UsedAssumedInformation); 2615 2616 // If the returned position of the anchor scope has noundef attriubte, check 2617 // all returned instructions. 2618 if (!getAnchorScope()->getReturnType()->isVoidTy()) { 2619 const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope()); 2620 if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) { 2621 auto &RetPosNoUndefAA = 2622 A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE); 2623 if (RetPosNoUndefAA.isKnownNoUndef()) 2624 A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB, 2625 *this); 2626 } 2627 } 2628 2629 if (NoUBPrevSize != AssumedNoUBInsts.size() || 2630 UBPrevSize != KnownUBInsts.size()) 2631 return ChangeStatus::CHANGED; 2632 return ChangeStatus::UNCHANGED; 2633 } 2634 2635 bool isKnownToCauseUB(Instruction *I) const override { 2636 return KnownUBInsts.count(I); 2637 } 2638 2639 bool isAssumedToCauseUB(Instruction *I) const override { 2640 // In simple words, if an instruction is not in the assumed to _not_ 2641 // cause UB, then it is assumed UB (that includes those 2642 // in the KnownUBInsts set). The rest is boilerplate 2643 // is to ensure that it is one of the instructions we test 2644 // for UB. 2645 2646 switch (I->getOpcode()) { 2647 case Instruction::Load: 2648 case Instruction::Store: 2649 case Instruction::AtomicCmpXchg: 2650 case Instruction::AtomicRMW: 2651 return !AssumedNoUBInsts.count(I); 2652 case Instruction::Br: { 2653 auto BrInst = cast<BranchInst>(I); 2654 if (BrInst->isUnconditional()) 2655 return false; 2656 return !AssumedNoUBInsts.count(I); 2657 } break; 2658 default: 2659 return false; 2660 } 2661 return false; 2662 } 2663 2664 ChangeStatus manifest(Attributor &A) override { 2665 if (KnownUBInsts.empty()) 2666 return ChangeStatus::UNCHANGED; 2667 for (Instruction *I : KnownUBInsts) 2668 A.changeToUnreachableAfterManifest(I); 2669 return ChangeStatus::CHANGED; 2670 } 2671 2672 /// See AbstractAttribute::getAsStr() 2673 const std::string getAsStr() const override { 2674 return getAssumed() ? "undefined-behavior" : "no-ub"; 2675 } 2676 2677 /// Note: The correctness of this analysis depends on the fact that the 2678 /// following 2 sets will stop changing after some point. 2679 /// "Change" here means that their size changes. 2680 /// The size of each set is monotonically increasing 2681 /// (we only add items to them) and it is upper bounded by the number of 2682 /// instructions in the processed function (we can never save more 2683 /// elements in either set than this number). Hence, at some point, 2684 /// they will stop increasing. 2685 /// Consequently, at some point, both sets will have stopped 2686 /// changing, effectively making the analysis reach a fixpoint. 2687 2688 /// Note: These 2 sets are disjoint and an instruction can be considered 2689 /// one of 3 things: 2690 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in 2691 /// the KnownUBInsts set. 2692 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior 2693 /// has a reason to assume it). 2694 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior 2695 /// could not find a reason to assume or prove that it can cause UB, 2696 /// hence it assumes it doesn't. We have a set for these instructions 2697 /// so that we don't reprocess them in every update. 2698 /// Note however that instructions in this set may cause UB. 2699 2700 protected: 2701 /// A set of all live instructions _known_ to cause UB. 2702 SmallPtrSet<Instruction *, 8> KnownUBInsts; 2703 2704 private: 2705 /// A set of all the (live) instructions that are assumed to _not_ cause UB. 2706 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; 2707 2708 // Should be called on updates in which if we're processing an instruction 2709 // \p I that depends on a value \p V, one of the following has to happen: 2710 // - If the value is assumed, then stop. 2711 // - If the value is known but undef, then consider it UB. 2712 // - Otherwise, do specific processing with the simplified value. 2713 // We return None in the first 2 cases to signify that an appropriate 2714 // action was taken and the caller should stop. 2715 // Otherwise, we return the simplified value that the caller should 2716 // use for specific processing. 2717 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V, 2718 Instruction *I) { 2719 bool UsedAssumedInformation = false; 2720 Optional<Value *> SimplifiedV = A.getAssumedSimplified( 2721 IRPosition::value(*V), *this, UsedAssumedInformation); 2722 if (!UsedAssumedInformation) { 2723 // Don't depend on assumed values. 2724 if (!SimplifiedV.hasValue()) { 2725 // If it is known (which we tested above) but it doesn't have a value, 2726 // then we can assume `undef` and hence the instruction is UB. 2727 KnownUBInsts.insert(I); 2728 return llvm::None; 2729 } 2730 if (!SimplifiedV.getValue()) 2731 return nullptr; 2732 V = *SimplifiedV; 2733 } 2734 if (isa<UndefValue>(V)) { 2735 KnownUBInsts.insert(I); 2736 return llvm::None; 2737 } 2738 return V; 2739 } 2740 }; 2741 2742 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { 2743 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) 2744 : AAUndefinedBehaviorImpl(IRP, A) {} 2745 2746 /// See AbstractAttribute::trackStatistics() 2747 void trackStatistics() const override { 2748 STATS_DECL(UndefinedBehaviorInstruction, Instruction, 2749 "Number of instructions known to have UB"); 2750 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += 2751 KnownUBInsts.size(); 2752 } 2753 }; 2754 2755 /// ------------------------ Will-Return Attributes ---------------------------- 2756 2757 // Helper function that checks whether a function has any cycle which we don't 2758 // know if it is bounded or not. 2759 // Loops with maximum trip count are considered bounded, any other cycle not. 2760 static bool mayContainUnboundedCycle(Function &F, Attributor &A) { 2761 ScalarEvolution *SE = 2762 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); 2763 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); 2764 // If either SCEV or LoopInfo is not available for the function then we assume 2765 // any cycle to be unbounded cycle. 2766 // We use scc_iterator which uses Tarjan algorithm to find all the maximal 2767 // SCCs.To detect if there's a cycle, we only need to find the maximal ones. 2768 if (!SE || !LI) { 2769 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) 2770 if (SCCI.hasCycle()) 2771 return true; 2772 return false; 2773 } 2774 2775 // If there's irreducible control, the function may contain non-loop cycles. 2776 if (mayContainIrreducibleControl(F, LI)) 2777 return true; 2778 2779 // Any loop that does not have a max trip count is considered unbounded cycle. 2780 for (auto *L : LI->getLoopsInPreorder()) { 2781 if (!SE->getSmallConstantMaxTripCount(L)) 2782 return true; 2783 } 2784 return false; 2785 } 2786 2787 struct AAWillReturnImpl : public AAWillReturn { 2788 AAWillReturnImpl(const IRPosition &IRP, Attributor &A) 2789 : AAWillReturn(IRP, A) {} 2790 2791 /// See AbstractAttribute::initialize(...). 2792 void initialize(Attributor &A) override { 2793 AAWillReturn::initialize(A); 2794 2795 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) { 2796 indicateOptimisticFixpoint(); 2797 return; 2798 } 2799 } 2800 2801 /// Check for `mustprogress` and `readonly` as they imply `willreturn`. 2802 bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) { 2803 // Check for `mustprogress` in the scope and the associated function which 2804 // might be different if this is a call site. 2805 if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) && 2806 (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress())) 2807 return false; 2808 2809 const auto &MemAA = 2810 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 2811 if (!MemAA.isAssumedReadOnly()) 2812 return false; 2813 if (KnownOnly && !MemAA.isKnownReadOnly()) 2814 return false; 2815 if (!MemAA.isKnownReadOnly()) 2816 A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL); 2817 2818 return true; 2819 } 2820 2821 /// See AbstractAttribute::updateImpl(...). 2822 ChangeStatus updateImpl(Attributor &A) override { 2823 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 2824 return ChangeStatus::UNCHANGED; 2825 2826 auto CheckForWillReturn = [&](Instruction &I) { 2827 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); 2828 const auto &WillReturnAA = 2829 A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED); 2830 if (WillReturnAA.isKnownWillReturn()) 2831 return true; 2832 if (!WillReturnAA.isAssumedWillReturn()) 2833 return false; 2834 const auto &NoRecurseAA = 2835 A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED); 2836 return NoRecurseAA.isAssumedNoRecurse(); 2837 }; 2838 2839 bool UsedAssumedInformation = false; 2840 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this, 2841 UsedAssumedInformation)) 2842 return indicatePessimisticFixpoint(); 2843 2844 return ChangeStatus::UNCHANGED; 2845 } 2846 2847 /// See AbstractAttribute::getAsStr() 2848 const std::string getAsStr() const override { 2849 return getAssumed() ? "willreturn" : "may-noreturn"; 2850 } 2851 }; 2852 2853 struct AAWillReturnFunction final : AAWillReturnImpl { 2854 AAWillReturnFunction(const IRPosition &IRP, Attributor &A) 2855 : AAWillReturnImpl(IRP, A) {} 2856 2857 /// See AbstractAttribute::initialize(...). 2858 void initialize(Attributor &A) override { 2859 AAWillReturnImpl::initialize(A); 2860 2861 Function *F = getAnchorScope(); 2862 if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A)) 2863 indicatePessimisticFixpoint(); 2864 } 2865 2866 /// See AbstractAttribute::trackStatistics() 2867 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } 2868 }; 2869 2870 /// WillReturn attribute deduction for a call sites. 2871 struct AAWillReturnCallSite final : AAWillReturnImpl { 2872 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) 2873 : AAWillReturnImpl(IRP, A) {} 2874 2875 /// See AbstractAttribute::initialize(...). 2876 void initialize(Attributor &A) override { 2877 AAWillReturnImpl::initialize(A); 2878 Function *F = getAssociatedFunction(); 2879 if (!F || !A.isFunctionIPOAmendable(*F)) 2880 indicatePessimisticFixpoint(); 2881 } 2882 2883 /// See AbstractAttribute::updateImpl(...). 2884 ChangeStatus updateImpl(Attributor &A) override { 2885 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 2886 return ChangeStatus::UNCHANGED; 2887 2888 // TODO: Once we have call site specific value information we can provide 2889 // call site specific liveness information and then it makes 2890 // sense to specialize attributes for call sites arguments instead of 2891 // redirecting requests to the callee argument. 2892 Function *F = getAssociatedFunction(); 2893 const IRPosition &FnPos = IRPosition::function(*F); 2894 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED); 2895 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2896 } 2897 2898 /// See AbstractAttribute::trackStatistics() 2899 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } 2900 }; 2901 2902 /// -------------------AAReachability Attribute-------------------------- 2903 2904 struct AAReachabilityImpl : AAReachability { 2905 AAReachabilityImpl(const IRPosition &IRP, Attributor &A) 2906 : AAReachability(IRP, A) {} 2907 2908 const std::string getAsStr() const override { 2909 // TODO: Return the number of reachable queries. 2910 return "reachable"; 2911 } 2912 2913 /// See AbstractAttribute::updateImpl(...). 2914 ChangeStatus updateImpl(Attributor &A) override { 2915 return ChangeStatus::UNCHANGED; 2916 } 2917 }; 2918 2919 struct AAReachabilityFunction final : public AAReachabilityImpl { 2920 AAReachabilityFunction(const IRPosition &IRP, Attributor &A) 2921 : AAReachabilityImpl(IRP, A) {} 2922 2923 /// See AbstractAttribute::trackStatistics() 2924 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); } 2925 }; 2926 2927 /// ------------------------ NoAlias Argument Attribute ------------------------ 2928 2929 struct AANoAliasImpl : AANoAlias { 2930 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { 2931 assert(getAssociatedType()->isPointerTy() && 2932 "Noalias is a pointer attribute"); 2933 } 2934 2935 const std::string getAsStr() const override { 2936 return getAssumed() ? "noalias" : "may-alias"; 2937 } 2938 }; 2939 2940 /// NoAlias attribute for a floating value. 2941 struct AANoAliasFloating final : AANoAliasImpl { 2942 AANoAliasFloating(const IRPosition &IRP, Attributor &A) 2943 : AANoAliasImpl(IRP, A) {} 2944 2945 /// See AbstractAttribute::initialize(...). 2946 void initialize(Attributor &A) override { 2947 AANoAliasImpl::initialize(A); 2948 Value *Val = &getAssociatedValue(); 2949 do { 2950 CastInst *CI = dyn_cast<CastInst>(Val); 2951 if (!CI) 2952 break; 2953 Value *Base = CI->getOperand(0); 2954 if (!Base->hasOneUse()) 2955 break; 2956 Val = Base; 2957 } while (true); 2958 2959 if (!Val->getType()->isPointerTy()) { 2960 indicatePessimisticFixpoint(); 2961 return; 2962 } 2963 2964 if (isa<AllocaInst>(Val)) 2965 indicateOptimisticFixpoint(); 2966 else if (isa<ConstantPointerNull>(Val) && 2967 !NullPointerIsDefined(getAnchorScope(), 2968 Val->getType()->getPointerAddressSpace())) 2969 indicateOptimisticFixpoint(); 2970 else if (Val != &getAssociatedValue()) { 2971 const auto &ValNoAliasAA = A.getAAFor<AANoAlias>( 2972 *this, IRPosition::value(*Val), DepClassTy::OPTIONAL); 2973 if (ValNoAliasAA.isKnownNoAlias()) 2974 indicateOptimisticFixpoint(); 2975 } 2976 } 2977 2978 /// See AbstractAttribute::updateImpl(...). 2979 ChangeStatus updateImpl(Attributor &A) override { 2980 // TODO: Implement this. 2981 return indicatePessimisticFixpoint(); 2982 } 2983 2984 /// See AbstractAttribute::trackStatistics() 2985 void trackStatistics() const override { 2986 STATS_DECLTRACK_FLOATING_ATTR(noalias) 2987 } 2988 }; 2989 2990 /// NoAlias attribute for an argument. 2991 struct AANoAliasArgument final 2992 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { 2993 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; 2994 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 2995 2996 /// See AbstractAttribute::initialize(...). 2997 void initialize(Attributor &A) override { 2998 Base::initialize(A); 2999 // See callsite argument attribute and callee argument attribute. 3000 if (hasAttr({Attribute::ByVal})) 3001 indicateOptimisticFixpoint(); 3002 } 3003 3004 /// See AbstractAttribute::update(...). 3005 ChangeStatus updateImpl(Attributor &A) override { 3006 // We have to make sure no-alias on the argument does not break 3007 // synchronization when this is a callback argument, see also [1] below. 3008 // If synchronization cannot be affected, we delegate to the base updateImpl 3009 // function, otherwise we give up for now. 3010 3011 // If the function is no-sync, no-alias cannot break synchronization. 3012 const auto &NoSyncAA = 3013 A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()), 3014 DepClassTy::OPTIONAL); 3015 if (NoSyncAA.isAssumedNoSync()) 3016 return Base::updateImpl(A); 3017 3018 // If the argument is read-only, no-alias cannot break synchronization. 3019 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 3020 *this, getIRPosition(), DepClassTy::OPTIONAL); 3021 if (MemBehaviorAA.isAssumedReadOnly()) 3022 return Base::updateImpl(A); 3023 3024 // If the argument is never passed through callbacks, no-alias cannot break 3025 // synchronization. 3026 bool AllCallSitesKnown; 3027 if (A.checkForAllCallSites( 3028 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, 3029 true, AllCallSitesKnown)) 3030 return Base::updateImpl(A); 3031 3032 // TODO: add no-alias but make sure it doesn't break synchronization by 3033 // introducing fake uses. See: 3034 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, 3035 // International Workshop on OpenMP 2018, 3036 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf 3037 3038 return indicatePessimisticFixpoint(); 3039 } 3040 3041 /// See AbstractAttribute::trackStatistics() 3042 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } 3043 }; 3044 3045 struct AANoAliasCallSiteArgument final : AANoAliasImpl { 3046 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) 3047 : AANoAliasImpl(IRP, A) {} 3048 3049 /// See AbstractAttribute::initialize(...). 3050 void initialize(Attributor &A) override { 3051 // See callsite argument attribute and callee argument attribute. 3052 const auto &CB = cast<CallBase>(getAnchorValue()); 3053 if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias)) 3054 indicateOptimisticFixpoint(); 3055 Value &Val = getAssociatedValue(); 3056 if (isa<ConstantPointerNull>(Val) && 3057 !NullPointerIsDefined(getAnchorScope(), 3058 Val.getType()->getPointerAddressSpace())) 3059 indicateOptimisticFixpoint(); 3060 } 3061 3062 /// Determine if the underlying value may alias with the call site argument 3063 /// \p OtherArgNo of \p ICS (= the underlying call site). 3064 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, 3065 const AAMemoryBehavior &MemBehaviorAA, 3066 const CallBase &CB, unsigned OtherArgNo) { 3067 // We do not need to worry about aliasing with the underlying IRP. 3068 if (this->getCalleeArgNo() == (int)OtherArgNo) 3069 return false; 3070 3071 // If it is not a pointer or pointer vector we do not alias. 3072 const Value *ArgOp = CB.getArgOperand(OtherArgNo); 3073 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 3074 return false; 3075 3076 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 3077 *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE); 3078 3079 // If the argument is readnone, there is no read-write aliasing. 3080 if (CBArgMemBehaviorAA.isAssumedReadNone()) { 3081 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3082 return false; 3083 } 3084 3085 // If the argument is readonly and the underlying value is readonly, there 3086 // is no read-write aliasing. 3087 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); 3088 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { 3089 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3090 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3091 return false; 3092 } 3093 3094 // We have to utilize actual alias analysis queries so we need the object. 3095 if (!AAR) 3096 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); 3097 3098 // Try to rule it out at the call site. 3099 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); 3100 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " 3101 "callsite arguments: " 3102 << getAssociatedValue() << " " << *ArgOp << " => " 3103 << (IsAliasing ? "" : "no-") << "alias \n"); 3104 3105 return IsAliasing; 3106 } 3107 3108 bool 3109 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, 3110 const AAMemoryBehavior &MemBehaviorAA, 3111 const AANoAlias &NoAliasAA) { 3112 // We can deduce "noalias" if the following conditions hold. 3113 // (i) Associated value is assumed to be noalias in the definition. 3114 // (ii) Associated value is assumed to be no-capture in all the uses 3115 // possibly executed before this callsite. 3116 // (iii) There is no other pointer argument which could alias with the 3117 // value. 3118 3119 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); 3120 if (!AssociatedValueIsNoAliasAtDef) { 3121 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() 3122 << " is not no-alias at the definition\n"); 3123 return false; 3124 } 3125 3126 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); 3127 3128 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3129 const Function *ScopeFn = VIRP.getAnchorScope(); 3130 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE); 3131 // Check whether the value is captured in the scope using AANoCapture. 3132 // Look at CFG and check only uses possibly executed before this 3133 // callsite. 3134 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 3135 Instruction *UserI = cast<Instruction>(U.getUser()); 3136 3137 // If UserI is the curr instruction and there is a single potential use of 3138 // the value in UserI we allow the use. 3139 // TODO: We should inspect the operands and allow those that cannot alias 3140 // with the value. 3141 if (UserI == getCtxI() && UserI->getNumOperands() == 1) 3142 return true; 3143 3144 if (ScopeFn) { 3145 const auto &ReachabilityAA = A.getAAFor<AAReachability>( 3146 *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL); 3147 3148 if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI())) 3149 return true; 3150 3151 if (auto *CB = dyn_cast<CallBase>(UserI)) { 3152 if (CB->isArgOperand(&U)) { 3153 3154 unsigned ArgNo = CB->getArgOperandNo(&U); 3155 3156 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 3157 *this, IRPosition::callsite_argument(*CB, ArgNo), 3158 DepClassTy::OPTIONAL); 3159 3160 if (NoCaptureAA.isAssumedNoCapture()) 3161 return true; 3162 } 3163 } 3164 } 3165 3166 // For cases which can potentially have more users 3167 if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) || 3168 isa<SelectInst>(U)) { 3169 Follow = true; 3170 return true; 3171 } 3172 3173 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n"); 3174 return false; 3175 }; 3176 3177 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 3178 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { 3179 LLVM_DEBUG( 3180 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() 3181 << " cannot be noalias as it is potentially captured\n"); 3182 return false; 3183 } 3184 } 3185 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); 3186 3187 // Check there is no other pointer argument which could alias with the 3188 // value passed at this call site. 3189 // TODO: AbstractCallSite 3190 const auto &CB = cast<CallBase>(getAnchorValue()); 3191 for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++) 3192 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) 3193 return false; 3194 3195 return true; 3196 } 3197 3198 /// See AbstractAttribute::updateImpl(...). 3199 ChangeStatus updateImpl(Attributor &A) override { 3200 // If the argument is readnone we are done as there are no accesses via the 3201 // argument. 3202 auto &MemBehaviorAA = 3203 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 3204 if (MemBehaviorAA.isAssumedReadNone()) { 3205 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3206 return ChangeStatus::UNCHANGED; 3207 } 3208 3209 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3210 const auto &NoAliasAA = 3211 A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE); 3212 3213 AAResults *AAR = nullptr; 3214 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, 3215 NoAliasAA)) { 3216 LLVM_DEBUG( 3217 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"); 3218 return ChangeStatus::UNCHANGED; 3219 } 3220 3221 return indicatePessimisticFixpoint(); 3222 } 3223 3224 /// See AbstractAttribute::trackStatistics() 3225 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } 3226 }; 3227 3228 /// NoAlias attribute for function return value. 3229 struct AANoAliasReturned final : AANoAliasImpl { 3230 AANoAliasReturned(const IRPosition &IRP, Attributor &A) 3231 : AANoAliasImpl(IRP, A) {} 3232 3233 /// See AbstractAttribute::initialize(...). 3234 void initialize(Attributor &A) override { 3235 AANoAliasImpl::initialize(A); 3236 Function *F = getAssociatedFunction(); 3237 if (!F || F->isDeclaration()) 3238 indicatePessimisticFixpoint(); 3239 } 3240 3241 /// See AbstractAttribute::updateImpl(...). 3242 virtual ChangeStatus updateImpl(Attributor &A) override { 3243 3244 auto CheckReturnValue = [&](Value &RV) -> bool { 3245 if (Constant *C = dyn_cast<Constant>(&RV)) 3246 if (C->isNullValue() || isa<UndefValue>(C)) 3247 return true; 3248 3249 /// For now, we can only deduce noalias if we have call sites. 3250 /// FIXME: add more support. 3251 if (!isa<CallBase>(&RV)) 3252 return false; 3253 3254 const IRPosition &RVPos = IRPosition::value(RV); 3255 const auto &NoAliasAA = 3256 A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED); 3257 if (!NoAliasAA.isAssumedNoAlias()) 3258 return false; 3259 3260 const auto &NoCaptureAA = 3261 A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED); 3262 return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); 3263 }; 3264 3265 if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) 3266 return indicatePessimisticFixpoint(); 3267 3268 return ChangeStatus::UNCHANGED; 3269 } 3270 3271 /// See AbstractAttribute::trackStatistics() 3272 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } 3273 }; 3274 3275 /// NoAlias attribute deduction for a call site return value. 3276 struct AANoAliasCallSiteReturned final : AANoAliasImpl { 3277 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) 3278 : AANoAliasImpl(IRP, A) {} 3279 3280 /// See AbstractAttribute::initialize(...). 3281 void initialize(Attributor &A) override { 3282 AANoAliasImpl::initialize(A); 3283 Function *F = getAssociatedFunction(); 3284 if (!F || F->isDeclaration()) 3285 indicatePessimisticFixpoint(); 3286 } 3287 3288 /// See AbstractAttribute::updateImpl(...). 3289 ChangeStatus updateImpl(Attributor &A) override { 3290 // TODO: Once we have call site specific value information we can provide 3291 // call site specific liveness information and then it makes 3292 // sense to specialize attributes for call sites arguments instead of 3293 // redirecting requests to the callee argument. 3294 Function *F = getAssociatedFunction(); 3295 const IRPosition &FnPos = IRPosition::returned(*F); 3296 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED); 3297 return clampStateAndIndicateChange(getState(), FnAA.getState()); 3298 } 3299 3300 /// See AbstractAttribute::trackStatistics() 3301 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } 3302 }; 3303 3304 /// -------------------AAIsDead Function Attribute----------------------- 3305 3306 struct AAIsDeadValueImpl : public AAIsDead { 3307 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3308 3309 /// See AAIsDead::isAssumedDead(). 3310 bool isAssumedDead() const override { return isAssumed(IS_DEAD); } 3311 3312 /// See AAIsDead::isKnownDead(). 3313 bool isKnownDead() const override { return isKnown(IS_DEAD); } 3314 3315 /// See AAIsDead::isAssumedDead(BasicBlock *). 3316 bool isAssumedDead(const BasicBlock *BB) const override { return false; } 3317 3318 /// See AAIsDead::isKnownDead(BasicBlock *). 3319 bool isKnownDead(const BasicBlock *BB) const override { return false; } 3320 3321 /// See AAIsDead::isAssumedDead(Instruction *I). 3322 bool isAssumedDead(const Instruction *I) const override { 3323 return I == getCtxI() && isAssumedDead(); 3324 } 3325 3326 /// See AAIsDead::isKnownDead(Instruction *I). 3327 bool isKnownDead(const Instruction *I) const override { 3328 return isAssumedDead(I) && isKnownDead(); 3329 } 3330 3331 /// See AbstractAttribute::getAsStr(). 3332 const std::string getAsStr() const override { 3333 return isAssumedDead() ? "assumed-dead" : "assumed-live"; 3334 } 3335 3336 /// Check if all uses are assumed dead. 3337 bool areAllUsesAssumedDead(Attributor &A, Value &V) { 3338 // Callers might not check the type, void has no uses. 3339 if (V.getType()->isVoidTy()) 3340 return true; 3341 3342 // If we replace a value with a constant there are no uses left afterwards. 3343 if (!isa<Constant>(V)) { 3344 bool UsedAssumedInformation = false; 3345 Optional<Constant *> C = 3346 A.getAssumedConstant(V, *this, UsedAssumedInformation); 3347 if (!C.hasValue() || *C) 3348 return true; 3349 } 3350 3351 auto UsePred = [&](const Use &U, bool &Follow) { return false; }; 3352 // Explicitly set the dependence class to required because we want a long 3353 // chain of N dependent instructions to be considered live as soon as one is 3354 // without going through N update cycles. This is not required for 3355 // correctness. 3356 return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false, 3357 DepClassTy::REQUIRED); 3358 } 3359 3360 /// Determine if \p I is assumed to be side-effect free. 3361 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { 3362 if (!I || wouldInstructionBeTriviallyDead(I)) 3363 return true; 3364 3365 auto *CB = dyn_cast<CallBase>(I); 3366 if (!CB || isa<IntrinsicInst>(CB)) 3367 return false; 3368 3369 const IRPosition &CallIRP = IRPosition::callsite_function(*CB); 3370 const auto &NoUnwindAA = 3371 A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE); 3372 if (!NoUnwindAA.isAssumedNoUnwind()) 3373 return false; 3374 if (!NoUnwindAA.isKnownNoUnwind()) 3375 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); 3376 3377 const auto &MemBehaviorAA = 3378 A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE); 3379 if (MemBehaviorAA.isAssumedReadOnly()) { 3380 if (!MemBehaviorAA.isKnownReadOnly()) 3381 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3382 return true; 3383 } 3384 return false; 3385 } 3386 }; 3387 3388 struct AAIsDeadFloating : public AAIsDeadValueImpl { 3389 AAIsDeadFloating(const IRPosition &IRP, Attributor &A) 3390 : AAIsDeadValueImpl(IRP, A) {} 3391 3392 /// See AbstractAttribute::initialize(...). 3393 void initialize(Attributor &A) override { 3394 if (isa<UndefValue>(getAssociatedValue())) { 3395 indicatePessimisticFixpoint(); 3396 return; 3397 } 3398 3399 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3400 if (!isAssumedSideEffectFree(A, I)) { 3401 if (!isa_and_nonnull<StoreInst>(I)) 3402 indicatePessimisticFixpoint(); 3403 else 3404 removeAssumedBits(HAS_NO_EFFECT); 3405 } 3406 } 3407 3408 bool isDeadStore(Attributor &A, StoreInst &SI) { 3409 // Lang ref now states volatile store is not UB/dead, let's skip them. 3410 if (SI.isVolatile()) 3411 return false; 3412 3413 bool UsedAssumedInformation = false; 3414 SmallSetVector<Value *, 4> PotentialCopies; 3415 if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this, 3416 UsedAssumedInformation)) 3417 return false; 3418 return llvm::all_of(PotentialCopies, [&](Value *V) { 3419 return A.isAssumedDead(IRPosition::value(*V), this, nullptr, 3420 UsedAssumedInformation); 3421 }); 3422 } 3423 3424 /// See AbstractAttribute::updateImpl(...). 3425 ChangeStatus updateImpl(Attributor &A) override { 3426 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3427 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { 3428 if (!isDeadStore(A, *SI)) 3429 return indicatePessimisticFixpoint(); 3430 } else { 3431 if (!isAssumedSideEffectFree(A, I)) 3432 return indicatePessimisticFixpoint(); 3433 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3434 return indicatePessimisticFixpoint(); 3435 } 3436 return ChangeStatus::UNCHANGED; 3437 } 3438 3439 /// See AbstractAttribute::manifest(...). 3440 ChangeStatus manifest(Attributor &A) override { 3441 Value &V = getAssociatedValue(); 3442 if (auto *I = dyn_cast<Instruction>(&V)) { 3443 // If we get here we basically know the users are all dead. We check if 3444 // isAssumedSideEffectFree returns true here again because it might not be 3445 // the case and only the users are dead but the instruction (=call) is 3446 // still needed. 3447 if (isa<StoreInst>(I) || 3448 (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) { 3449 A.deleteAfterManifest(*I); 3450 return ChangeStatus::CHANGED; 3451 } 3452 } 3453 if (V.use_empty()) 3454 return ChangeStatus::UNCHANGED; 3455 3456 bool UsedAssumedInformation = false; 3457 Optional<Constant *> C = 3458 A.getAssumedConstant(V, *this, UsedAssumedInformation); 3459 if (C.hasValue() && C.getValue()) 3460 return ChangeStatus::UNCHANGED; 3461 3462 // Replace the value with undef as it is dead but keep droppable uses around 3463 // as they provide information we don't want to give up on just yet. 3464 UndefValue &UV = *UndefValue::get(V.getType()); 3465 bool AnyChange = 3466 A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false); 3467 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3468 } 3469 3470 /// See AbstractAttribute::trackStatistics() 3471 void trackStatistics() const override { 3472 STATS_DECLTRACK_FLOATING_ATTR(IsDead) 3473 } 3474 }; 3475 3476 struct AAIsDeadArgument : public AAIsDeadFloating { 3477 AAIsDeadArgument(const IRPosition &IRP, Attributor &A) 3478 : AAIsDeadFloating(IRP, A) {} 3479 3480 /// See AbstractAttribute::initialize(...). 3481 void initialize(Attributor &A) override { 3482 if (!A.isFunctionIPOAmendable(*getAnchorScope())) 3483 indicatePessimisticFixpoint(); 3484 } 3485 3486 /// See AbstractAttribute::manifest(...). 3487 ChangeStatus manifest(Attributor &A) override { 3488 ChangeStatus Changed = AAIsDeadFloating::manifest(A); 3489 Argument &Arg = *getAssociatedArgument(); 3490 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) 3491 if (A.registerFunctionSignatureRewrite( 3492 Arg, /* ReplacementTypes */ {}, 3493 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, 3494 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { 3495 Arg.dropDroppableUses(); 3496 return ChangeStatus::CHANGED; 3497 } 3498 return Changed; 3499 } 3500 3501 /// See AbstractAttribute::trackStatistics() 3502 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) } 3503 }; 3504 3505 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { 3506 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) 3507 : AAIsDeadValueImpl(IRP, A) {} 3508 3509 /// See AbstractAttribute::initialize(...). 3510 void initialize(Attributor &A) override { 3511 if (isa<UndefValue>(getAssociatedValue())) 3512 indicatePessimisticFixpoint(); 3513 } 3514 3515 /// See AbstractAttribute::updateImpl(...). 3516 ChangeStatus updateImpl(Attributor &A) override { 3517 // TODO: Once we have call site specific value information we can provide 3518 // call site specific liveness information and then it makes 3519 // sense to specialize attributes for call sites arguments instead of 3520 // redirecting requests to the callee argument. 3521 Argument *Arg = getAssociatedArgument(); 3522 if (!Arg) 3523 return indicatePessimisticFixpoint(); 3524 const IRPosition &ArgPos = IRPosition::argument(*Arg); 3525 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED); 3526 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 3527 } 3528 3529 /// See AbstractAttribute::manifest(...). 3530 ChangeStatus manifest(Attributor &A) override { 3531 CallBase &CB = cast<CallBase>(getAnchorValue()); 3532 Use &U = CB.getArgOperandUse(getCallSiteArgNo()); 3533 assert(!isa<UndefValue>(U.get()) && 3534 "Expected undef values to be filtered out!"); 3535 UndefValue &UV = *UndefValue::get(U->getType()); 3536 if (A.changeUseAfterManifest(U, UV)) 3537 return ChangeStatus::CHANGED; 3538 return ChangeStatus::UNCHANGED; 3539 } 3540 3541 /// See AbstractAttribute::trackStatistics() 3542 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) } 3543 }; 3544 3545 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { 3546 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) 3547 : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {} 3548 3549 /// See AAIsDead::isAssumedDead(). 3550 bool isAssumedDead() const override { 3551 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; 3552 } 3553 3554 /// See AbstractAttribute::initialize(...). 3555 void initialize(Attributor &A) override { 3556 if (isa<UndefValue>(getAssociatedValue())) { 3557 indicatePessimisticFixpoint(); 3558 return; 3559 } 3560 3561 // We track this separately as a secondary state. 3562 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); 3563 } 3564 3565 /// See AbstractAttribute::updateImpl(...). 3566 ChangeStatus updateImpl(Attributor &A) override { 3567 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3568 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { 3569 IsAssumedSideEffectFree = false; 3570 Changed = ChangeStatus::CHANGED; 3571 } 3572 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3573 return indicatePessimisticFixpoint(); 3574 return Changed; 3575 } 3576 3577 /// See AbstractAttribute::trackStatistics() 3578 void trackStatistics() const override { 3579 if (IsAssumedSideEffectFree) 3580 STATS_DECLTRACK_CSRET_ATTR(IsDead) 3581 else 3582 STATS_DECLTRACK_CSRET_ATTR(UnusedResult) 3583 } 3584 3585 /// See AbstractAttribute::getAsStr(). 3586 const std::string getAsStr() const override { 3587 return isAssumedDead() 3588 ? "assumed-dead" 3589 : (getAssumed() ? "assumed-dead-users" : "assumed-live"); 3590 } 3591 3592 private: 3593 bool IsAssumedSideEffectFree; 3594 }; 3595 3596 struct AAIsDeadReturned : public AAIsDeadValueImpl { 3597 AAIsDeadReturned(const IRPosition &IRP, Attributor &A) 3598 : AAIsDeadValueImpl(IRP, A) {} 3599 3600 /// See AbstractAttribute::updateImpl(...). 3601 ChangeStatus updateImpl(Attributor &A) override { 3602 3603 bool UsedAssumedInformation = false; 3604 A.checkForAllInstructions([](Instruction &) { return true; }, *this, 3605 {Instruction::Ret}, UsedAssumedInformation); 3606 3607 auto PredForCallSite = [&](AbstractCallSite ACS) { 3608 if (ACS.isCallbackCall() || !ACS.getInstruction()) 3609 return false; 3610 return areAllUsesAssumedDead(A, *ACS.getInstruction()); 3611 }; 3612 3613 bool AllCallSitesKnown; 3614 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 3615 AllCallSitesKnown)) 3616 return indicatePessimisticFixpoint(); 3617 3618 return ChangeStatus::UNCHANGED; 3619 } 3620 3621 /// See AbstractAttribute::manifest(...). 3622 ChangeStatus manifest(Attributor &A) override { 3623 // TODO: Rewrite the signature to return void? 3624 bool AnyChange = false; 3625 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); 3626 auto RetInstPred = [&](Instruction &I) { 3627 ReturnInst &RI = cast<ReturnInst>(I); 3628 if (!isa<UndefValue>(RI.getReturnValue())) 3629 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); 3630 return true; 3631 }; 3632 bool UsedAssumedInformation = false; 3633 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}, 3634 UsedAssumedInformation); 3635 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3636 } 3637 3638 /// See AbstractAttribute::trackStatistics() 3639 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) } 3640 }; 3641 3642 struct AAIsDeadFunction : public AAIsDead { 3643 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3644 3645 /// See AbstractAttribute::initialize(...). 3646 void initialize(Attributor &A) override { 3647 const Function *F = getAnchorScope(); 3648 if (F && !F->isDeclaration()) { 3649 // We only want to compute liveness once. If the function is not part of 3650 // the SCC, skip it. 3651 if (A.isRunOn(*const_cast<Function *>(F))) { 3652 ToBeExploredFrom.insert(&F->getEntryBlock().front()); 3653 assumeLive(A, F->getEntryBlock()); 3654 } else { 3655 indicatePessimisticFixpoint(); 3656 } 3657 } 3658 } 3659 3660 /// See AbstractAttribute::getAsStr(). 3661 const std::string getAsStr() const override { 3662 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + 3663 std::to_string(getAnchorScope()->size()) + "][#TBEP " + 3664 std::to_string(ToBeExploredFrom.size()) + "][#KDE " + 3665 std::to_string(KnownDeadEnds.size()) + "]"; 3666 } 3667 3668 /// See AbstractAttribute::manifest(...). 3669 ChangeStatus manifest(Attributor &A) override { 3670 assert(getState().isValidState() && 3671 "Attempted to manifest an invalid state!"); 3672 3673 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 3674 Function &F = *getAnchorScope(); 3675 3676 if (AssumedLiveBlocks.empty()) { 3677 A.deleteAfterManifest(F); 3678 return ChangeStatus::CHANGED; 3679 } 3680 3681 // Flag to determine if we can change an invoke to a call assuming the 3682 // callee is nounwind. This is not possible if the personality of the 3683 // function allows to catch asynchronous exceptions. 3684 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 3685 3686 KnownDeadEnds.set_union(ToBeExploredFrom); 3687 for (const Instruction *DeadEndI : KnownDeadEnds) { 3688 auto *CB = dyn_cast<CallBase>(DeadEndI); 3689 if (!CB) 3690 continue; 3691 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3692 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 3693 bool MayReturn = !NoReturnAA.isAssumedNoReturn(); 3694 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) 3695 continue; 3696 3697 if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) 3698 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); 3699 else 3700 A.changeToUnreachableAfterManifest( 3701 const_cast<Instruction *>(DeadEndI->getNextNode())); 3702 HasChanged = ChangeStatus::CHANGED; 3703 } 3704 3705 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted."); 3706 for (BasicBlock &BB : F) 3707 if (!AssumedLiveBlocks.count(&BB)) { 3708 A.deleteAfterManifest(BB); 3709 ++BUILD_STAT_NAME(AAIsDead, BasicBlock); 3710 } 3711 3712 return HasChanged; 3713 } 3714 3715 /// See AbstractAttribute::updateImpl(...). 3716 ChangeStatus updateImpl(Attributor &A) override; 3717 3718 bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override { 3719 return !AssumedLiveEdges.count(std::make_pair(From, To)); 3720 } 3721 3722 /// See AbstractAttribute::trackStatistics() 3723 void trackStatistics() const override {} 3724 3725 /// Returns true if the function is assumed dead. 3726 bool isAssumedDead() const override { return false; } 3727 3728 /// See AAIsDead::isKnownDead(). 3729 bool isKnownDead() const override { return false; } 3730 3731 /// See AAIsDead::isAssumedDead(BasicBlock *). 3732 bool isAssumedDead(const BasicBlock *BB) const override { 3733 assert(BB->getParent() == getAnchorScope() && 3734 "BB must be in the same anchor scope function."); 3735 3736 if (!getAssumed()) 3737 return false; 3738 return !AssumedLiveBlocks.count(BB); 3739 } 3740 3741 /// See AAIsDead::isKnownDead(BasicBlock *). 3742 bool isKnownDead(const BasicBlock *BB) const override { 3743 return getKnown() && isAssumedDead(BB); 3744 } 3745 3746 /// See AAIsDead::isAssumed(Instruction *I). 3747 bool isAssumedDead(const Instruction *I) const override { 3748 assert(I->getParent()->getParent() == getAnchorScope() && 3749 "Instruction must be in the same anchor scope function."); 3750 3751 if (!getAssumed()) 3752 return false; 3753 3754 // If it is not in AssumedLiveBlocks then it for sure dead. 3755 // Otherwise, it can still be after noreturn call in a live block. 3756 if (!AssumedLiveBlocks.count(I->getParent())) 3757 return true; 3758 3759 // If it is not after a liveness barrier it is live. 3760 const Instruction *PrevI = I->getPrevNode(); 3761 while (PrevI) { 3762 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) 3763 return true; 3764 PrevI = PrevI->getPrevNode(); 3765 } 3766 return false; 3767 } 3768 3769 /// See AAIsDead::isKnownDead(Instruction *I). 3770 bool isKnownDead(const Instruction *I) const override { 3771 return getKnown() && isAssumedDead(I); 3772 } 3773 3774 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A 3775 /// that internal function called from \p BB should now be looked at. 3776 bool assumeLive(Attributor &A, const BasicBlock &BB) { 3777 if (!AssumedLiveBlocks.insert(&BB).second) 3778 return false; 3779 3780 // We assume that all of BB is (probably) live now and if there are calls to 3781 // internal functions we will assume that those are now live as well. This 3782 // is a performance optimization for blocks with calls to a lot of internal 3783 // functions. It can however cause dead functions to be treated as live. 3784 for (const Instruction &I : BB) 3785 if (const auto *CB = dyn_cast<CallBase>(&I)) 3786 if (const Function *F = CB->getCalledFunction()) 3787 if (F->hasLocalLinkage()) 3788 A.markLiveInternalFunction(*F); 3789 return true; 3790 } 3791 3792 /// Collection of instructions that need to be explored again, e.g., we 3793 /// did assume they do not transfer control to (one of their) successors. 3794 SmallSetVector<const Instruction *, 8> ToBeExploredFrom; 3795 3796 /// Collection of instructions that are known to not transfer control. 3797 SmallSetVector<const Instruction *, 8> KnownDeadEnds; 3798 3799 /// Collection of all assumed live edges 3800 DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges; 3801 3802 /// Collection of all assumed live BasicBlocks. 3803 DenseSet<const BasicBlock *> AssumedLiveBlocks; 3804 }; 3805 3806 static bool 3807 identifyAliveSuccessors(Attributor &A, const CallBase &CB, 3808 AbstractAttribute &AA, 3809 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3810 const IRPosition &IPos = IRPosition::callsite_function(CB); 3811 3812 const auto &NoReturnAA = 3813 A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL); 3814 if (NoReturnAA.isAssumedNoReturn()) 3815 return !NoReturnAA.isKnownNoReturn(); 3816 if (CB.isTerminator()) 3817 AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); 3818 else 3819 AliveSuccessors.push_back(CB.getNextNode()); 3820 return false; 3821 } 3822 3823 static bool 3824 identifyAliveSuccessors(Attributor &A, const InvokeInst &II, 3825 AbstractAttribute &AA, 3826 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3827 bool UsedAssumedInformation = 3828 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); 3829 3830 // First, determine if we can change an invoke to a call assuming the 3831 // callee is nounwind. This is not possible if the personality of the 3832 // function allows to catch asynchronous exceptions. 3833 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { 3834 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3835 } else { 3836 const IRPosition &IPos = IRPosition::callsite_function(II); 3837 const auto &AANoUnw = 3838 A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL); 3839 if (AANoUnw.isAssumedNoUnwind()) { 3840 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); 3841 } else { 3842 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3843 } 3844 } 3845 return UsedAssumedInformation; 3846 } 3847 3848 static bool 3849 identifyAliveSuccessors(Attributor &A, const BranchInst &BI, 3850 AbstractAttribute &AA, 3851 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3852 bool UsedAssumedInformation = false; 3853 if (BI.getNumSuccessors() == 1) { 3854 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3855 } else { 3856 Optional<Constant *> C = 3857 A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation); 3858 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) { 3859 // No value yet, assume both edges are dead. 3860 } else if (isa_and_nonnull<ConstantInt>(*C)) { 3861 const BasicBlock *SuccBB = 3862 BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue()); 3863 AliveSuccessors.push_back(&SuccBB->front()); 3864 } else { 3865 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3866 AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); 3867 UsedAssumedInformation = false; 3868 } 3869 } 3870 return UsedAssumedInformation; 3871 } 3872 3873 static bool 3874 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, 3875 AbstractAttribute &AA, 3876 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3877 bool UsedAssumedInformation = false; 3878 Optional<Constant *> C = 3879 A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation); 3880 if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) { 3881 // No value yet, assume all edges are dead. 3882 } else if (isa_and_nonnull<ConstantInt>(C.getValue())) { 3883 for (auto &CaseIt : SI.cases()) { 3884 if (CaseIt.getCaseValue() == C.getValue()) { 3885 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); 3886 return UsedAssumedInformation; 3887 } 3888 } 3889 AliveSuccessors.push_back(&SI.getDefaultDest()->front()); 3890 return UsedAssumedInformation; 3891 } else { 3892 for (const BasicBlock *SuccBB : successors(SI.getParent())) 3893 AliveSuccessors.push_back(&SuccBB->front()); 3894 } 3895 return UsedAssumedInformation; 3896 } 3897 3898 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { 3899 ChangeStatus Change = ChangeStatus::UNCHANGED; 3900 3901 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" 3902 << getAnchorScope()->size() << "] BBs and " 3903 << ToBeExploredFrom.size() << " exploration points and " 3904 << KnownDeadEnds.size() << " known dead ends\n"); 3905 3906 // Copy and clear the list of instructions we need to explore from. It is 3907 // refilled with instructions the next update has to look at. 3908 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), 3909 ToBeExploredFrom.end()); 3910 decltype(ToBeExploredFrom) NewToBeExploredFrom; 3911 3912 SmallVector<const Instruction *, 8> AliveSuccessors; 3913 while (!Worklist.empty()) { 3914 const Instruction *I = Worklist.pop_back_val(); 3915 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"); 3916 3917 // Fast forward for uninteresting instructions. We could look for UB here 3918 // though. 3919 while (!I->isTerminator() && !isa<CallBase>(I)) 3920 I = I->getNextNode(); 3921 3922 AliveSuccessors.clear(); 3923 3924 bool UsedAssumedInformation = false; 3925 switch (I->getOpcode()) { 3926 // TODO: look for (assumed) UB to backwards propagate "deadness". 3927 default: 3928 assert(I->isTerminator() && 3929 "Expected non-terminators to be handled already!"); 3930 for (const BasicBlock *SuccBB : successors(I->getParent())) 3931 AliveSuccessors.push_back(&SuccBB->front()); 3932 break; 3933 case Instruction::Call: 3934 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), 3935 *this, AliveSuccessors); 3936 break; 3937 case Instruction::Invoke: 3938 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), 3939 *this, AliveSuccessors); 3940 break; 3941 case Instruction::Br: 3942 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), 3943 *this, AliveSuccessors); 3944 break; 3945 case Instruction::Switch: 3946 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), 3947 *this, AliveSuccessors); 3948 break; 3949 } 3950 3951 if (UsedAssumedInformation) { 3952 NewToBeExploredFrom.insert(I); 3953 } else if (AliveSuccessors.empty() || 3954 (I->isTerminator() && 3955 AliveSuccessors.size() < I->getNumSuccessors())) { 3956 if (KnownDeadEnds.insert(I)) 3957 Change = ChangeStatus::CHANGED; 3958 } 3959 3960 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: " 3961 << AliveSuccessors.size() << " UsedAssumedInformation: " 3962 << UsedAssumedInformation << "\n"); 3963 3964 for (const Instruction *AliveSuccessor : AliveSuccessors) { 3965 if (!I->isTerminator()) { 3966 assert(AliveSuccessors.size() == 1 && 3967 "Non-terminator expected to have a single successor!"); 3968 Worklist.push_back(AliveSuccessor); 3969 } else { 3970 // record the assumed live edge 3971 auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent()); 3972 if (AssumedLiveEdges.insert(Edge).second) 3973 Change = ChangeStatus::CHANGED; 3974 if (assumeLive(A, *AliveSuccessor->getParent())) 3975 Worklist.push_back(AliveSuccessor); 3976 } 3977 } 3978 } 3979 3980 // Check if the content of ToBeExploredFrom changed, ignore the order. 3981 if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() || 3982 llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) { 3983 return !ToBeExploredFrom.count(I); 3984 })) { 3985 Change = ChangeStatus::CHANGED; 3986 ToBeExploredFrom = std::move(NewToBeExploredFrom); 3987 } 3988 3989 // If we know everything is live there is no need to query for liveness. 3990 // Instead, indicating a pessimistic fixpoint will cause the state to be 3991 // "invalid" and all queries to be answered conservatively without lookups. 3992 // To be in this state we have to (1) finished the exploration and (3) not 3993 // discovered any non-trivial dead end and (2) not ruled unreachable code 3994 // dead. 3995 if (ToBeExploredFrom.empty() && 3996 getAnchorScope()->size() == AssumedLiveBlocks.size() && 3997 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { 3998 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; 3999 })) 4000 return indicatePessimisticFixpoint(); 4001 return Change; 4002 } 4003 4004 /// Liveness information for a call sites. 4005 struct AAIsDeadCallSite final : AAIsDeadFunction { 4006 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) 4007 : AAIsDeadFunction(IRP, A) {} 4008 4009 /// See AbstractAttribute::initialize(...). 4010 void initialize(Attributor &A) override { 4011 // TODO: Once we have call site specific value information we can provide 4012 // call site specific liveness information and then it makes 4013 // sense to specialize attributes for call sites instead of 4014 // redirecting requests to the callee. 4015 llvm_unreachable("Abstract attributes for liveness are not " 4016 "supported for call sites yet!"); 4017 } 4018 4019 /// See AbstractAttribute::updateImpl(...). 4020 ChangeStatus updateImpl(Attributor &A) override { 4021 return indicatePessimisticFixpoint(); 4022 } 4023 4024 /// See AbstractAttribute::trackStatistics() 4025 void trackStatistics() const override {} 4026 }; 4027 4028 /// -------------------- Dereferenceable Argument Attribute -------------------- 4029 4030 struct AADereferenceableImpl : AADereferenceable { 4031 AADereferenceableImpl(const IRPosition &IRP, Attributor &A) 4032 : AADereferenceable(IRP, A) {} 4033 using StateType = DerefState; 4034 4035 /// See AbstractAttribute::initialize(...). 4036 void initialize(Attributor &A) override { 4037 SmallVector<Attribute, 4> Attrs; 4038 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, 4039 Attrs, /* IgnoreSubsumingPositions */ false, &A); 4040 for (const Attribute &Attr : Attrs) 4041 takeKnownDerefBytesMaximum(Attr.getValueAsInt()); 4042 4043 const IRPosition &IRP = this->getIRPosition(); 4044 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE); 4045 4046 bool CanBeNull, CanBeFreed; 4047 takeKnownDerefBytesMaximum( 4048 IRP.getAssociatedValue().getPointerDereferenceableBytes( 4049 A.getDataLayout(), CanBeNull, CanBeFreed)); 4050 4051 bool IsFnInterface = IRP.isFnInterfaceKind(); 4052 Function *FnScope = IRP.getAnchorScope(); 4053 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { 4054 indicatePessimisticFixpoint(); 4055 return; 4056 } 4057 4058 if (Instruction *CtxI = getCtxI()) 4059 followUsesInMBEC(*this, A, getState(), *CtxI); 4060 } 4061 4062 /// See AbstractAttribute::getState() 4063 /// { 4064 StateType &getState() override { return *this; } 4065 const StateType &getState() const override { return *this; } 4066 /// } 4067 4068 /// Helper function for collecting accessed bytes in must-be-executed-context 4069 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, 4070 DerefState &State) { 4071 const Value *UseV = U->get(); 4072 if (!UseV->getType()->isPointerTy()) 4073 return; 4074 4075 Type *PtrTy = UseV->getType(); 4076 const DataLayout &DL = A.getDataLayout(); 4077 int64_t Offset; 4078 if (const Value *Base = getBasePointerOfAccessPointerOperand( 4079 I, Offset, DL, /*AllowNonInbounds*/ true)) { 4080 if (Base == &getAssociatedValue() && 4081 getPointerOperand(I, /* AllowVolatile */ false) == UseV) { 4082 uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType()); 4083 State.addAccessedBytes(Offset, Size); 4084 } 4085 } 4086 } 4087 4088 /// See followUsesInMBEC 4089 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4090 AADereferenceable::StateType &State) { 4091 bool IsNonNull = false; 4092 bool TrackUse = false; 4093 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( 4094 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); 4095 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes 4096 << " for instruction " << *I << "\n"); 4097 4098 addAccessedBytesForUse(A, U, I, State); 4099 State.takeKnownDerefBytesMaximum(DerefBytes); 4100 return TrackUse; 4101 } 4102 4103 /// See AbstractAttribute::manifest(...). 4104 ChangeStatus manifest(Attributor &A) override { 4105 ChangeStatus Change = AADereferenceable::manifest(A); 4106 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { 4107 removeAttrs({Attribute::DereferenceableOrNull}); 4108 return ChangeStatus::CHANGED; 4109 } 4110 return Change; 4111 } 4112 4113 void getDeducedAttributes(LLVMContext &Ctx, 4114 SmallVectorImpl<Attribute> &Attrs) const override { 4115 // TODO: Add *_globally support 4116 if (isAssumedNonNull()) 4117 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 4118 Ctx, getAssumedDereferenceableBytes())); 4119 else 4120 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 4121 Ctx, getAssumedDereferenceableBytes())); 4122 } 4123 4124 /// See AbstractAttribute::getAsStr(). 4125 const std::string getAsStr() const override { 4126 if (!getAssumedDereferenceableBytes()) 4127 return "unknown-dereferenceable"; 4128 return std::string("dereferenceable") + 4129 (isAssumedNonNull() ? "" : "_or_null") + 4130 (isAssumedGlobal() ? "_globally" : "") + "<" + 4131 std::to_string(getKnownDereferenceableBytes()) + "-" + 4132 std::to_string(getAssumedDereferenceableBytes()) + ">"; 4133 } 4134 }; 4135 4136 /// Dereferenceable attribute for a floating value. 4137 struct AADereferenceableFloating : AADereferenceableImpl { 4138 AADereferenceableFloating(const IRPosition &IRP, Attributor &A) 4139 : AADereferenceableImpl(IRP, A) {} 4140 4141 /// See AbstractAttribute::updateImpl(...). 4142 ChangeStatus updateImpl(Attributor &A) override { 4143 const DataLayout &DL = A.getDataLayout(); 4144 4145 auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T, 4146 bool Stripped) -> bool { 4147 unsigned IdxWidth = 4148 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 4149 APInt Offset(IdxWidth, 0); 4150 const Value *Base = 4151 stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false); 4152 4153 const auto &AA = A.getAAFor<AADereferenceable>( 4154 *this, IRPosition::value(*Base), DepClassTy::REQUIRED); 4155 int64_t DerefBytes = 0; 4156 if (!Stripped && this == &AA) { 4157 // Use IR information if we did not strip anything. 4158 // TODO: track globally. 4159 bool CanBeNull, CanBeFreed; 4160 DerefBytes = 4161 Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); 4162 T.GlobalState.indicatePessimisticFixpoint(); 4163 } else { 4164 const DerefState &DS = AA.getState(); 4165 DerefBytes = DS.DerefBytesState.getAssumed(); 4166 T.GlobalState &= DS.GlobalState; 4167 } 4168 4169 // For now we do not try to "increase" dereferenceability due to negative 4170 // indices as we first have to come up with code to deal with loops and 4171 // for overflows of the dereferenceable bytes. 4172 int64_t OffsetSExt = Offset.getSExtValue(); 4173 if (OffsetSExt < 0) 4174 OffsetSExt = 0; 4175 4176 T.takeAssumedDerefBytesMinimum( 4177 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4178 4179 if (this == &AA) { 4180 if (!Stripped) { 4181 // If nothing was stripped IR information is all we got. 4182 T.takeKnownDerefBytesMaximum( 4183 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4184 T.indicatePessimisticFixpoint(); 4185 } else if (OffsetSExt > 0) { 4186 // If something was stripped but there is circular reasoning we look 4187 // for the offset. If it is positive we basically decrease the 4188 // dereferenceable bytes in a circluar loop now, which will simply 4189 // drive them down to the known value in a very slow way which we 4190 // can accelerate. 4191 T.indicatePessimisticFixpoint(); 4192 } 4193 } 4194 4195 return T.isValidState(); 4196 }; 4197 4198 DerefState T; 4199 if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T, 4200 VisitValueCB, getCtxI())) 4201 return indicatePessimisticFixpoint(); 4202 4203 return clampStateAndIndicateChange(getState(), T); 4204 } 4205 4206 /// See AbstractAttribute::trackStatistics() 4207 void trackStatistics() const override { 4208 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) 4209 } 4210 }; 4211 4212 /// Dereferenceable attribute for a return value. 4213 struct AADereferenceableReturned final 4214 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { 4215 AADereferenceableReturned(const IRPosition &IRP, Attributor &A) 4216 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( 4217 IRP, A) {} 4218 4219 /// See AbstractAttribute::trackStatistics() 4220 void trackStatistics() const override { 4221 STATS_DECLTRACK_FNRET_ATTR(dereferenceable) 4222 } 4223 }; 4224 4225 /// Dereferenceable attribute for an argument 4226 struct AADereferenceableArgument final 4227 : AAArgumentFromCallSiteArguments<AADereferenceable, 4228 AADereferenceableImpl> { 4229 using Base = 4230 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; 4231 AADereferenceableArgument(const IRPosition &IRP, Attributor &A) 4232 : Base(IRP, A) {} 4233 4234 /// See AbstractAttribute::trackStatistics() 4235 void trackStatistics() const override { 4236 STATS_DECLTRACK_ARG_ATTR(dereferenceable) 4237 } 4238 }; 4239 4240 /// Dereferenceable attribute for a call site argument. 4241 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { 4242 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) 4243 : AADereferenceableFloating(IRP, A) {} 4244 4245 /// See AbstractAttribute::trackStatistics() 4246 void trackStatistics() const override { 4247 STATS_DECLTRACK_CSARG_ATTR(dereferenceable) 4248 } 4249 }; 4250 4251 /// Dereferenceable attribute deduction for a call site return value. 4252 struct AADereferenceableCallSiteReturned final 4253 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { 4254 using Base = 4255 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; 4256 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) 4257 : Base(IRP, A) {} 4258 4259 /// See AbstractAttribute::trackStatistics() 4260 void trackStatistics() const override { 4261 STATS_DECLTRACK_CS_ATTR(dereferenceable); 4262 } 4263 }; 4264 4265 // ------------------------ Align Argument Attribute ------------------------ 4266 4267 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA, 4268 Value &AssociatedValue, const Use *U, 4269 const Instruction *I, bool &TrackUse) { 4270 // We need to follow common pointer manipulation uses to the accesses they 4271 // feed into. 4272 if (isa<CastInst>(I)) { 4273 // Follow all but ptr2int casts. 4274 TrackUse = !isa<PtrToIntInst>(I); 4275 return 0; 4276 } 4277 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 4278 if (GEP->hasAllConstantIndices()) 4279 TrackUse = true; 4280 return 0; 4281 } 4282 4283 MaybeAlign MA; 4284 if (const auto *CB = dyn_cast<CallBase>(I)) { 4285 if (CB->isBundleOperand(U) || CB->isCallee(U)) 4286 return 0; 4287 4288 unsigned ArgNo = CB->getArgOperandNo(U); 4289 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 4290 // As long as we only use known information there is no need to track 4291 // dependences here. 4292 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE); 4293 MA = MaybeAlign(AlignAA.getKnownAlign()); 4294 } 4295 4296 const DataLayout &DL = A.getDataLayout(); 4297 const Value *UseV = U->get(); 4298 if (auto *SI = dyn_cast<StoreInst>(I)) { 4299 if (SI->getPointerOperand() == UseV) 4300 MA = SI->getAlign(); 4301 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 4302 if (LI->getPointerOperand() == UseV) 4303 MA = LI->getAlign(); 4304 } 4305 4306 if (!MA || *MA <= QueryingAA.getKnownAlign()) 4307 return 0; 4308 4309 unsigned Alignment = MA->value(); 4310 int64_t Offset; 4311 4312 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { 4313 if (Base == &AssociatedValue) { 4314 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4315 // So we can say that the maximum power of two which is a divisor of 4316 // gcd(Offset, Alignment) is an alignment. 4317 4318 uint32_t gcd = 4319 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment); 4320 Alignment = llvm::PowerOf2Floor(gcd); 4321 } 4322 } 4323 4324 return Alignment; 4325 } 4326 4327 struct AAAlignImpl : AAAlign { 4328 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} 4329 4330 /// See AbstractAttribute::initialize(...). 4331 void initialize(Attributor &A) override { 4332 SmallVector<Attribute, 4> Attrs; 4333 getAttrs({Attribute::Alignment}, Attrs); 4334 for (const Attribute &Attr : Attrs) 4335 takeKnownMaximum(Attr.getValueAsInt()); 4336 4337 Value &V = getAssociatedValue(); 4338 // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int 4339 // use of the function pointer. This was caused by D73131. We want to 4340 // avoid this for function pointers especially because we iterate 4341 // their uses and int2ptr is not handled. It is not a correctness 4342 // problem though! 4343 if (!V.getType()->getPointerElementType()->isFunctionTy()) 4344 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value()); 4345 4346 if (getIRPosition().isFnInterfaceKind() && 4347 (!getAnchorScope() || 4348 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { 4349 indicatePessimisticFixpoint(); 4350 return; 4351 } 4352 4353 if (Instruction *CtxI = getCtxI()) 4354 followUsesInMBEC(*this, A, getState(), *CtxI); 4355 } 4356 4357 /// See AbstractAttribute::manifest(...). 4358 ChangeStatus manifest(Attributor &A) override { 4359 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; 4360 4361 // Check for users that allow alignment annotations. 4362 Value &AssociatedValue = getAssociatedValue(); 4363 for (const Use &U : AssociatedValue.uses()) { 4364 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { 4365 if (SI->getPointerOperand() == &AssociatedValue) 4366 if (SI->getAlignment() < getAssumedAlign()) { 4367 STATS_DECLTRACK(AAAlign, Store, 4368 "Number of times alignment added to a store"); 4369 SI->setAlignment(Align(getAssumedAlign())); 4370 LoadStoreChanged = ChangeStatus::CHANGED; 4371 } 4372 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { 4373 if (LI->getPointerOperand() == &AssociatedValue) 4374 if (LI->getAlignment() < getAssumedAlign()) { 4375 LI->setAlignment(Align(getAssumedAlign())); 4376 STATS_DECLTRACK(AAAlign, Load, 4377 "Number of times alignment added to a load"); 4378 LoadStoreChanged = ChangeStatus::CHANGED; 4379 } 4380 } 4381 } 4382 4383 ChangeStatus Changed = AAAlign::manifest(A); 4384 4385 Align InheritAlign = 4386 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4387 if (InheritAlign >= getAssumedAlign()) 4388 return LoadStoreChanged; 4389 return Changed | LoadStoreChanged; 4390 } 4391 4392 // TODO: Provide a helper to determine the implied ABI alignment and check in 4393 // the existing manifest method and a new one for AAAlignImpl that value 4394 // to avoid making the alignment explicit if it did not improve. 4395 4396 /// See AbstractAttribute::getDeducedAttributes 4397 virtual void 4398 getDeducedAttributes(LLVMContext &Ctx, 4399 SmallVectorImpl<Attribute> &Attrs) const override { 4400 if (getAssumedAlign() > 1) 4401 Attrs.emplace_back( 4402 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); 4403 } 4404 4405 /// See followUsesInMBEC 4406 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4407 AAAlign::StateType &State) { 4408 bool TrackUse = false; 4409 4410 unsigned int KnownAlign = 4411 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); 4412 State.takeKnownMaximum(KnownAlign); 4413 4414 return TrackUse; 4415 } 4416 4417 /// See AbstractAttribute::getAsStr(). 4418 const std::string getAsStr() const override { 4419 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) + 4420 "-" + std::to_string(getAssumedAlign()) + ">") 4421 : "unknown-align"; 4422 } 4423 }; 4424 4425 /// Align attribute for a floating value. 4426 struct AAAlignFloating : AAAlignImpl { 4427 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} 4428 4429 /// See AbstractAttribute::updateImpl(...). 4430 ChangeStatus updateImpl(Attributor &A) override { 4431 const DataLayout &DL = A.getDataLayout(); 4432 4433 auto VisitValueCB = [&](Value &V, const Instruction *, 4434 AAAlign::StateType &T, bool Stripped) -> bool { 4435 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V), 4436 DepClassTy::REQUIRED); 4437 if (!Stripped && this == &AA) { 4438 int64_t Offset; 4439 unsigned Alignment = 1; 4440 if (const Value *Base = 4441 GetPointerBaseWithConstantOffset(&V, Offset, DL)) { 4442 Align PA = Base->getPointerAlignment(DL); 4443 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4444 // So we can say that the maximum power of two which is a divisor of 4445 // gcd(Offset, Alignment) is an alignment. 4446 4447 uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), 4448 uint32_t(PA.value())); 4449 Alignment = llvm::PowerOf2Floor(gcd); 4450 } else { 4451 Alignment = V.getPointerAlignment(DL).value(); 4452 } 4453 // Use only IR information if we did not strip anything. 4454 T.takeKnownMaximum(Alignment); 4455 T.indicatePessimisticFixpoint(); 4456 } else { 4457 // Use abstract attribute information. 4458 const AAAlign::StateType &DS = AA.getState(); 4459 T ^= DS; 4460 } 4461 return T.isValidState(); 4462 }; 4463 4464 StateType T; 4465 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 4466 VisitValueCB, getCtxI())) 4467 return indicatePessimisticFixpoint(); 4468 4469 // TODO: If we know we visited all incoming values, thus no are assumed 4470 // dead, we can take the known information from the state T. 4471 return clampStateAndIndicateChange(getState(), T); 4472 } 4473 4474 /// See AbstractAttribute::trackStatistics() 4475 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } 4476 }; 4477 4478 /// Align attribute for function return value. 4479 struct AAAlignReturned final 4480 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { 4481 using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>; 4482 AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4483 4484 /// See AbstractAttribute::initialize(...). 4485 void initialize(Attributor &A) override { 4486 Base::initialize(A); 4487 Function *F = getAssociatedFunction(); 4488 if (!F || F->isDeclaration()) 4489 indicatePessimisticFixpoint(); 4490 } 4491 4492 /// See AbstractAttribute::trackStatistics() 4493 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } 4494 }; 4495 4496 /// Align attribute for function argument. 4497 struct AAAlignArgument final 4498 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { 4499 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; 4500 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4501 4502 /// See AbstractAttribute::manifest(...). 4503 ChangeStatus manifest(Attributor &A) override { 4504 // If the associated argument is involved in a must-tail call we give up 4505 // because we would need to keep the argument alignments of caller and 4506 // callee in-sync. Just does not seem worth the trouble right now. 4507 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) 4508 return ChangeStatus::UNCHANGED; 4509 return Base::manifest(A); 4510 } 4511 4512 /// See AbstractAttribute::trackStatistics() 4513 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } 4514 }; 4515 4516 struct AAAlignCallSiteArgument final : AAAlignFloating { 4517 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) 4518 : AAAlignFloating(IRP, A) {} 4519 4520 /// See AbstractAttribute::manifest(...). 4521 ChangeStatus manifest(Attributor &A) override { 4522 // If the associated argument is involved in a must-tail call we give up 4523 // because we would need to keep the argument alignments of caller and 4524 // callee in-sync. Just does not seem worth the trouble right now. 4525 if (Argument *Arg = getAssociatedArgument()) 4526 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) 4527 return ChangeStatus::UNCHANGED; 4528 ChangeStatus Changed = AAAlignImpl::manifest(A); 4529 Align InheritAlign = 4530 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4531 if (InheritAlign >= getAssumedAlign()) 4532 Changed = ChangeStatus::UNCHANGED; 4533 return Changed; 4534 } 4535 4536 /// See AbstractAttribute::updateImpl(Attributor &A). 4537 ChangeStatus updateImpl(Attributor &A) override { 4538 ChangeStatus Changed = AAAlignFloating::updateImpl(A); 4539 if (Argument *Arg = getAssociatedArgument()) { 4540 // We only take known information from the argument 4541 // so we do not need to track a dependence. 4542 const auto &ArgAlignAA = A.getAAFor<AAAlign>( 4543 *this, IRPosition::argument(*Arg), DepClassTy::NONE); 4544 takeKnownMaximum(ArgAlignAA.getKnownAlign()); 4545 } 4546 return Changed; 4547 } 4548 4549 /// See AbstractAttribute::trackStatistics() 4550 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } 4551 }; 4552 4553 /// Align attribute deduction for a call site return value. 4554 struct AAAlignCallSiteReturned final 4555 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { 4556 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; 4557 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) 4558 : Base(IRP, A) {} 4559 4560 /// See AbstractAttribute::initialize(...). 4561 void initialize(Attributor &A) override { 4562 Base::initialize(A); 4563 Function *F = getAssociatedFunction(); 4564 if (!F || F->isDeclaration()) 4565 indicatePessimisticFixpoint(); 4566 } 4567 4568 /// See AbstractAttribute::trackStatistics() 4569 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } 4570 }; 4571 4572 /// ------------------ Function No-Return Attribute ---------------------------- 4573 struct AANoReturnImpl : public AANoReturn { 4574 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} 4575 4576 /// See AbstractAttribute::initialize(...). 4577 void initialize(Attributor &A) override { 4578 AANoReturn::initialize(A); 4579 Function *F = getAssociatedFunction(); 4580 if (!F || F->isDeclaration()) 4581 indicatePessimisticFixpoint(); 4582 } 4583 4584 /// See AbstractAttribute::getAsStr(). 4585 const std::string getAsStr() const override { 4586 return getAssumed() ? "noreturn" : "may-return"; 4587 } 4588 4589 /// See AbstractAttribute::updateImpl(Attributor &A). 4590 virtual ChangeStatus updateImpl(Attributor &A) override { 4591 auto CheckForNoReturn = [](Instruction &) { return false; }; 4592 bool UsedAssumedInformation = false; 4593 if (!A.checkForAllInstructions(CheckForNoReturn, *this, 4594 {(unsigned)Instruction::Ret}, 4595 UsedAssumedInformation)) 4596 return indicatePessimisticFixpoint(); 4597 return ChangeStatus::UNCHANGED; 4598 } 4599 }; 4600 4601 struct AANoReturnFunction final : AANoReturnImpl { 4602 AANoReturnFunction(const IRPosition &IRP, Attributor &A) 4603 : AANoReturnImpl(IRP, A) {} 4604 4605 /// See AbstractAttribute::trackStatistics() 4606 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } 4607 }; 4608 4609 /// NoReturn attribute deduction for a call sites. 4610 struct AANoReturnCallSite final : AANoReturnImpl { 4611 AANoReturnCallSite(const IRPosition &IRP, Attributor &A) 4612 : AANoReturnImpl(IRP, A) {} 4613 4614 /// See AbstractAttribute::initialize(...). 4615 void initialize(Attributor &A) override { 4616 AANoReturnImpl::initialize(A); 4617 if (Function *F = getAssociatedFunction()) { 4618 const IRPosition &FnPos = IRPosition::function(*F); 4619 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4620 if (!FnAA.isAssumedNoReturn()) 4621 indicatePessimisticFixpoint(); 4622 } 4623 } 4624 4625 /// See AbstractAttribute::updateImpl(...). 4626 ChangeStatus updateImpl(Attributor &A) override { 4627 // TODO: Once we have call site specific value information we can provide 4628 // call site specific liveness information and then it makes 4629 // sense to specialize attributes for call sites arguments instead of 4630 // redirecting requests to the callee argument. 4631 Function *F = getAssociatedFunction(); 4632 const IRPosition &FnPos = IRPosition::function(*F); 4633 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4634 return clampStateAndIndicateChange(getState(), FnAA.getState()); 4635 } 4636 4637 /// See AbstractAttribute::trackStatistics() 4638 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } 4639 }; 4640 4641 /// ----------------------- Variable Capturing --------------------------------- 4642 4643 /// A class to hold the state of for no-capture attributes. 4644 struct AANoCaptureImpl : public AANoCapture { 4645 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} 4646 4647 /// See AbstractAttribute::initialize(...). 4648 void initialize(Attributor &A) override { 4649 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { 4650 indicateOptimisticFixpoint(); 4651 return; 4652 } 4653 Function *AnchorScope = getAnchorScope(); 4654 if (isFnInterfaceKind() && 4655 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { 4656 indicatePessimisticFixpoint(); 4657 return; 4658 } 4659 4660 // You cannot "capture" null in the default address space. 4661 if (isa<ConstantPointerNull>(getAssociatedValue()) && 4662 getAssociatedValue().getType()->getPointerAddressSpace() == 0) { 4663 indicateOptimisticFixpoint(); 4664 return; 4665 } 4666 4667 const Function *F = 4668 isArgumentPosition() ? getAssociatedFunction() : AnchorScope; 4669 4670 // Check what state the associated function can actually capture. 4671 if (F) 4672 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 4673 else 4674 indicatePessimisticFixpoint(); 4675 } 4676 4677 /// See AbstractAttribute::updateImpl(...). 4678 ChangeStatus updateImpl(Attributor &A) override; 4679 4680 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). 4681 virtual void 4682 getDeducedAttributes(LLVMContext &Ctx, 4683 SmallVectorImpl<Attribute> &Attrs) const override { 4684 if (!isAssumedNoCaptureMaybeReturned()) 4685 return; 4686 4687 if (isArgumentPosition()) { 4688 if (isAssumedNoCapture()) 4689 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); 4690 else if (ManifestInternal) 4691 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); 4692 } 4693 } 4694 4695 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known 4696 /// depending on the ability of the function associated with \p IRP to capture 4697 /// state in memory and through "returning/throwing", respectively. 4698 static void determineFunctionCaptureCapabilities(const IRPosition &IRP, 4699 const Function &F, 4700 BitIntegerState &State) { 4701 // TODO: Once we have memory behavior attributes we should use them here. 4702 4703 // If we know we cannot communicate or write to memory, we do not care about 4704 // ptr2int anymore. 4705 if (F.onlyReadsMemory() && F.doesNotThrow() && 4706 F.getReturnType()->isVoidTy()) { 4707 State.addKnownBits(NO_CAPTURE); 4708 return; 4709 } 4710 4711 // A function cannot capture state in memory if it only reads memory, it can 4712 // however return/throw state and the state might be influenced by the 4713 // pointer value, e.g., loading from a returned pointer might reveal a bit. 4714 if (F.onlyReadsMemory()) 4715 State.addKnownBits(NOT_CAPTURED_IN_MEM); 4716 4717 // A function cannot communicate state back if it does not through 4718 // exceptions and doesn not return values. 4719 if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) 4720 State.addKnownBits(NOT_CAPTURED_IN_RET); 4721 4722 // Check existing "returned" attributes. 4723 int ArgNo = IRP.getCalleeArgNo(); 4724 if (F.doesNotThrow() && ArgNo >= 0) { 4725 for (unsigned u = 0, e = F.arg_size(); u < e; ++u) 4726 if (F.hasParamAttribute(u, Attribute::Returned)) { 4727 if (u == unsigned(ArgNo)) 4728 State.removeAssumedBits(NOT_CAPTURED_IN_RET); 4729 else if (F.onlyReadsMemory()) 4730 State.addKnownBits(NO_CAPTURE); 4731 else 4732 State.addKnownBits(NOT_CAPTURED_IN_RET); 4733 break; 4734 } 4735 } 4736 } 4737 4738 /// See AbstractState::getAsStr(). 4739 const std::string getAsStr() const override { 4740 if (isKnownNoCapture()) 4741 return "known not-captured"; 4742 if (isAssumedNoCapture()) 4743 return "assumed not-captured"; 4744 if (isKnownNoCaptureMaybeReturned()) 4745 return "known not-captured-maybe-returned"; 4746 if (isAssumedNoCaptureMaybeReturned()) 4747 return "assumed not-captured-maybe-returned"; 4748 return "assumed-captured"; 4749 } 4750 }; 4751 4752 /// Attributor-aware capture tracker. 4753 struct AACaptureUseTracker final : public CaptureTracker { 4754 4755 /// Create a capture tracker that can lookup in-flight abstract attributes 4756 /// through the Attributor \p A. 4757 /// 4758 /// If a use leads to a potential capture, \p CapturedInMemory is set and the 4759 /// search is stopped. If a use leads to a return instruction, 4760 /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed. 4761 /// If a use leads to a ptr2int which may capture the value, 4762 /// \p CapturedInInteger is set. If a use is found that is currently assumed 4763 /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies 4764 /// set. All values in \p PotentialCopies are later tracked as well. For every 4765 /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0, 4766 /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger 4767 /// conservatively set to true. 4768 AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA, 4769 const AAIsDead &IsDeadAA, AANoCapture::StateType &State, 4770 SmallSetVector<Value *, 4> &PotentialCopies, 4771 unsigned &RemainingUsesToExplore) 4772 : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State), 4773 PotentialCopies(PotentialCopies), 4774 RemainingUsesToExplore(RemainingUsesToExplore) {} 4775 4776 /// Determine if \p V maybe captured. *Also updates the state!* 4777 bool valueMayBeCaptured(const Value *V) { 4778 if (V->getType()->isPointerTy()) { 4779 PointerMayBeCaptured(V, this); 4780 } else { 4781 State.indicatePessimisticFixpoint(); 4782 } 4783 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4784 } 4785 4786 /// See CaptureTracker::tooManyUses(). 4787 void tooManyUses() override { 4788 State.removeAssumedBits(AANoCapture::NO_CAPTURE); 4789 } 4790 4791 bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override { 4792 if (CaptureTracker::isDereferenceableOrNull(O, DL)) 4793 return true; 4794 const auto &DerefAA = A.getAAFor<AADereferenceable>( 4795 NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL); 4796 return DerefAA.getAssumedDereferenceableBytes(); 4797 } 4798 4799 /// See CaptureTracker::captured(...). 4800 bool captured(const Use *U) override { 4801 Instruction *UInst = cast<Instruction>(U->getUser()); 4802 LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst 4803 << "\n"); 4804 4805 // Because we may reuse the tracker multiple times we keep track of the 4806 // number of explored uses ourselves as well. 4807 if (RemainingUsesToExplore-- == 0) { 4808 LLVM_DEBUG(dbgs() << " - too many uses to explore!\n"); 4809 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4810 /* Return */ true); 4811 } 4812 4813 // Deal with ptr2int by following uses. 4814 if (isa<PtrToIntInst>(UInst)) { 4815 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n"); 4816 return valueMayBeCaptured(UInst); 4817 } 4818 4819 // For stores we check if we can follow the value through memory or not. 4820 if (auto *SI = dyn_cast<StoreInst>(UInst)) { 4821 if (SI->isVolatile()) 4822 return isCapturedIn(/* Memory */ true, /* Integer */ false, 4823 /* Return */ false); 4824 bool UsedAssumedInformation = false; 4825 if (!AA::getPotentialCopiesOfStoredValue( 4826 A, *SI, PotentialCopies, NoCaptureAA, UsedAssumedInformation)) 4827 return isCapturedIn(/* Memory */ true, /* Integer */ false, 4828 /* Return */ false); 4829 // Not captured directly, potential copies will be checked. 4830 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4831 /* Return */ false); 4832 } 4833 4834 // Explicitly catch return instructions. 4835 if (isa<ReturnInst>(UInst)) { 4836 if (UInst->getFunction() == NoCaptureAA.getAnchorScope()) 4837 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4838 /* Return */ true); 4839 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4840 /* Return */ true); 4841 } 4842 4843 // For now we only use special logic for call sites. However, the tracker 4844 // itself knows about a lot of other non-capturing cases already. 4845 auto *CB = dyn_cast<CallBase>(UInst); 4846 if (!CB || !CB->isArgOperand(U)) 4847 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4848 /* Return */ true); 4849 4850 unsigned ArgNo = CB->getArgOperandNo(U); 4851 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); 4852 // If we have a abstract no-capture attribute for the argument we can use 4853 // it to justify a non-capture attribute here. This allows recursion! 4854 auto &ArgNoCaptureAA = 4855 A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED); 4856 if (ArgNoCaptureAA.isAssumedNoCapture()) 4857 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4858 /* Return */ false); 4859 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 4860 addPotentialCopy(*CB); 4861 return isCapturedIn(/* Memory */ false, /* Integer */ false, 4862 /* Return */ false); 4863 } 4864 4865 // Lastly, we could not find a reason no-capture can be assumed so we don't. 4866 return isCapturedIn(/* Memory */ true, /* Integer */ true, 4867 /* Return */ true); 4868 } 4869 4870 /// Register \p CS as potential copy of the value we are checking. 4871 void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); } 4872 4873 /// See CaptureTracker::shouldExplore(...). 4874 bool shouldExplore(const Use *U) override { 4875 // Check liveness and ignore droppable users. 4876 bool UsedAssumedInformation = false; 4877 return !U->getUser()->isDroppable() && 4878 !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA, 4879 UsedAssumedInformation); 4880 } 4881 4882 /// Update the state according to \p CapturedInMem, \p CapturedInInt, and 4883 /// \p CapturedInRet, then return the appropriate value for use in the 4884 /// CaptureTracker::captured() interface. 4885 bool isCapturedIn(bool CapturedInMem, bool CapturedInInt, 4886 bool CapturedInRet) { 4887 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " 4888 << CapturedInInt << "|Ret " << CapturedInRet << "]\n"); 4889 if (CapturedInMem) 4890 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); 4891 if (CapturedInInt) 4892 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); 4893 if (CapturedInRet) 4894 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); 4895 return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 4896 } 4897 4898 private: 4899 /// The attributor providing in-flight abstract attributes. 4900 Attributor &A; 4901 4902 /// The abstract attribute currently updated. 4903 AANoCapture &NoCaptureAA; 4904 4905 /// The abstract liveness state. 4906 const AAIsDead &IsDeadAA; 4907 4908 /// The state currently updated. 4909 AANoCapture::StateType &State; 4910 4911 /// Set of potential copies of the tracked value. 4912 SmallSetVector<Value *, 4> &PotentialCopies; 4913 4914 /// Global counter to limit the number of explored uses. 4915 unsigned &RemainingUsesToExplore; 4916 }; 4917 4918 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { 4919 const IRPosition &IRP = getIRPosition(); 4920 Value *V = isArgumentPosition() ? IRP.getAssociatedArgument() 4921 : &IRP.getAssociatedValue(); 4922 if (!V) 4923 return indicatePessimisticFixpoint(); 4924 4925 const Function *F = 4926 isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 4927 assert(F && "Expected a function!"); 4928 const IRPosition &FnPos = IRPosition::function(*F); 4929 const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE); 4930 4931 AANoCapture::StateType T; 4932 4933 // Readonly means we cannot capture through memory. 4934 const auto &FnMemAA = 4935 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE); 4936 if (FnMemAA.isAssumedReadOnly()) { 4937 T.addKnownBits(NOT_CAPTURED_IN_MEM); 4938 if (FnMemAA.isKnownReadOnly()) 4939 addKnownBits(NOT_CAPTURED_IN_MEM); 4940 else 4941 A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL); 4942 } 4943 4944 // Make sure all returned values are different than the underlying value. 4945 // TODO: we could do this in a more sophisticated way inside 4946 // AAReturnedValues, e.g., track all values that escape through returns 4947 // directly somehow. 4948 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { 4949 bool SeenConstant = false; 4950 for (auto &It : RVAA.returned_values()) { 4951 if (isa<Constant>(It.first)) { 4952 if (SeenConstant) 4953 return false; 4954 SeenConstant = true; 4955 } else if (!isa<Argument>(It.first) || 4956 It.first == getAssociatedArgument()) 4957 return false; 4958 } 4959 return true; 4960 }; 4961 4962 const auto &NoUnwindAA = 4963 A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL); 4964 if (NoUnwindAA.isAssumedNoUnwind()) { 4965 bool IsVoidTy = F->getReturnType()->isVoidTy(); 4966 const AAReturnedValues *RVAA = 4967 IsVoidTy ? nullptr 4968 : &A.getAAFor<AAReturnedValues>(*this, FnPos, 4969 4970 DepClassTy::OPTIONAL); 4971 if (IsVoidTy || CheckReturnedArgs(*RVAA)) { 4972 T.addKnownBits(NOT_CAPTURED_IN_RET); 4973 if (T.isKnown(NOT_CAPTURED_IN_MEM)) 4974 return ChangeStatus::UNCHANGED; 4975 if (NoUnwindAA.isKnownNoUnwind() && 4976 (IsVoidTy || RVAA->getState().isAtFixpoint())) { 4977 addKnownBits(NOT_CAPTURED_IN_RET); 4978 if (isKnown(NOT_CAPTURED_IN_MEM)) 4979 return indicateOptimisticFixpoint(); 4980 } 4981 } 4982 } 4983 4984 // Use the CaptureTracker interface and logic with the specialized tracker, 4985 // defined in AACaptureUseTracker, that can look at in-flight abstract 4986 // attributes and directly updates the assumed state. 4987 SmallSetVector<Value *, 4> PotentialCopies; 4988 unsigned RemainingUsesToExplore = 4989 getDefaultMaxUsesToExploreForCaptureTracking(); 4990 AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies, 4991 RemainingUsesToExplore); 4992 4993 // Check all potential copies of the associated value until we can assume 4994 // none will be captured or we have to assume at least one might be. 4995 unsigned Idx = 0; 4996 PotentialCopies.insert(V); 4997 while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size()) 4998 Tracker.valueMayBeCaptured(PotentialCopies[Idx++]); 4999 5000 AANoCapture::StateType &S = getState(); 5001 auto Assumed = S.getAssumed(); 5002 S.intersectAssumedBits(T.getAssumed()); 5003 if (!isAssumedNoCaptureMaybeReturned()) 5004 return indicatePessimisticFixpoint(); 5005 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 5006 : ChangeStatus::CHANGED; 5007 } 5008 5009 /// NoCapture attribute for function arguments. 5010 struct AANoCaptureArgument final : AANoCaptureImpl { 5011 AANoCaptureArgument(const IRPosition &IRP, Attributor &A) 5012 : AANoCaptureImpl(IRP, A) {} 5013 5014 /// See AbstractAttribute::trackStatistics() 5015 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } 5016 }; 5017 5018 /// NoCapture attribute for call site arguments. 5019 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { 5020 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) 5021 : AANoCaptureImpl(IRP, A) {} 5022 5023 /// See AbstractAttribute::initialize(...). 5024 void initialize(Attributor &A) override { 5025 if (Argument *Arg = getAssociatedArgument()) 5026 if (Arg->hasByValAttr()) 5027 indicateOptimisticFixpoint(); 5028 AANoCaptureImpl::initialize(A); 5029 } 5030 5031 /// See AbstractAttribute::updateImpl(...). 5032 ChangeStatus updateImpl(Attributor &A) override { 5033 // TODO: Once we have call site specific value information we can provide 5034 // call site specific liveness information and then it makes 5035 // sense to specialize attributes for call sites arguments instead of 5036 // redirecting requests to the callee argument. 5037 Argument *Arg = getAssociatedArgument(); 5038 if (!Arg) 5039 return indicatePessimisticFixpoint(); 5040 const IRPosition &ArgPos = IRPosition::argument(*Arg); 5041 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED); 5042 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 5043 } 5044 5045 /// See AbstractAttribute::trackStatistics() 5046 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)}; 5047 }; 5048 5049 /// NoCapture attribute for floating values. 5050 struct AANoCaptureFloating final : AANoCaptureImpl { 5051 AANoCaptureFloating(const IRPosition &IRP, Attributor &A) 5052 : AANoCaptureImpl(IRP, A) {} 5053 5054 /// See AbstractAttribute::trackStatistics() 5055 void trackStatistics() const override { 5056 STATS_DECLTRACK_FLOATING_ATTR(nocapture) 5057 } 5058 }; 5059 5060 /// NoCapture attribute for function return value. 5061 struct AANoCaptureReturned final : AANoCaptureImpl { 5062 AANoCaptureReturned(const IRPosition &IRP, Attributor &A) 5063 : AANoCaptureImpl(IRP, A) { 5064 llvm_unreachable("NoCapture is not applicable to function returns!"); 5065 } 5066 5067 /// See AbstractAttribute::initialize(...). 5068 void initialize(Attributor &A) override { 5069 llvm_unreachable("NoCapture is not applicable to function returns!"); 5070 } 5071 5072 /// See AbstractAttribute::updateImpl(...). 5073 ChangeStatus updateImpl(Attributor &A) override { 5074 llvm_unreachable("NoCapture is not applicable to function returns!"); 5075 } 5076 5077 /// See AbstractAttribute::trackStatistics() 5078 void trackStatistics() const override {} 5079 }; 5080 5081 /// NoCapture attribute deduction for a call site return value. 5082 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { 5083 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) 5084 : AANoCaptureImpl(IRP, A) {} 5085 5086 /// See AbstractAttribute::initialize(...). 5087 void initialize(Attributor &A) override { 5088 const Function *F = getAnchorScope(); 5089 // Check what state the associated function can actually capture. 5090 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 5091 } 5092 5093 /// See AbstractAttribute::trackStatistics() 5094 void trackStatistics() const override { 5095 STATS_DECLTRACK_CSRET_ATTR(nocapture) 5096 } 5097 }; 5098 } // namespace 5099 5100 /// ------------------ Value Simplify Attribute ---------------------------- 5101 5102 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) { 5103 // FIXME: Add a typecast support. 5104 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5105 SimplifiedAssociatedValue, Other, Ty); 5106 if (SimplifiedAssociatedValue == Optional<Value *>(nullptr)) 5107 return false; 5108 5109 LLVM_DEBUG({ 5110 if (SimplifiedAssociatedValue.hasValue()) 5111 dbgs() << "[ValueSimplify] is assumed to be " 5112 << **SimplifiedAssociatedValue << "\n"; 5113 else 5114 dbgs() << "[ValueSimplify] is assumed to be <none>\n"; 5115 }); 5116 return true; 5117 } 5118 5119 namespace { 5120 struct AAValueSimplifyImpl : AAValueSimplify { 5121 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) 5122 : AAValueSimplify(IRP, A) {} 5123 5124 /// See AbstractAttribute::initialize(...). 5125 void initialize(Attributor &A) override { 5126 if (getAssociatedValue().getType()->isVoidTy()) 5127 indicatePessimisticFixpoint(); 5128 if (A.hasSimplificationCallback(getIRPosition())) 5129 indicatePessimisticFixpoint(); 5130 } 5131 5132 /// See AbstractAttribute::getAsStr(). 5133 const std::string getAsStr() const override { 5134 LLVM_DEBUG({ 5135 errs() << "SAV: " << SimplifiedAssociatedValue << " "; 5136 if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue) 5137 errs() << "SAV: " << **SimplifiedAssociatedValue << " "; 5138 }); 5139 return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple") 5140 : "not-simple"; 5141 } 5142 5143 /// See AbstractAttribute::trackStatistics() 5144 void trackStatistics() const override {} 5145 5146 /// See AAValueSimplify::getAssumedSimplifiedValue() 5147 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5148 return SimplifiedAssociatedValue; 5149 } 5150 5151 /// Return a value we can use as replacement for the associated one, or 5152 /// nullptr if we don't have one that makes sense. 5153 Value *getReplacementValue(Attributor &A) const { 5154 Value *NewV; 5155 NewV = SimplifiedAssociatedValue.hasValue() 5156 ? SimplifiedAssociatedValue.getValue() 5157 : UndefValue::get(getAssociatedType()); 5158 if (!NewV) 5159 return nullptr; 5160 NewV = AA::getWithType(*NewV, *getAssociatedType()); 5161 if (!NewV || NewV == &getAssociatedValue()) 5162 return nullptr; 5163 const Instruction *CtxI = getCtxI(); 5164 if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache())) 5165 return nullptr; 5166 if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope())) 5167 return nullptr; 5168 return NewV; 5169 } 5170 5171 /// Helper function for querying AAValueSimplify and updating candicate. 5172 /// \param IRP The value position we are trying to unify with SimplifiedValue 5173 bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, 5174 const IRPosition &IRP, bool Simplify = true) { 5175 bool UsedAssumedInformation = false; 5176 Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue(); 5177 if (Simplify) 5178 QueryingValueSimplified = 5179 A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation); 5180 return unionAssumed(QueryingValueSimplified); 5181 } 5182 5183 /// Returns a candidate is found or not 5184 template <typename AAType> bool askSimplifiedValueFor(Attributor &A) { 5185 if (!getAssociatedValue().getType()->isIntegerTy()) 5186 return false; 5187 5188 // This will also pass the call base context. 5189 const auto &AA = 5190 A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE); 5191 5192 Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A); 5193 5194 if (!COpt.hasValue()) { 5195 SimplifiedAssociatedValue = llvm::None; 5196 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5197 return true; 5198 } 5199 if (auto *C = COpt.getValue()) { 5200 SimplifiedAssociatedValue = C; 5201 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5202 return true; 5203 } 5204 return false; 5205 } 5206 5207 bool askSimplifiedValueForOtherAAs(Attributor &A) { 5208 if (askSimplifiedValueFor<AAValueConstantRange>(A)) 5209 return true; 5210 if (askSimplifiedValueFor<AAPotentialValues>(A)) 5211 return true; 5212 return false; 5213 } 5214 5215 /// See AbstractAttribute::manifest(...). 5216 ChangeStatus manifest(Attributor &A) override { 5217 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5218 if (getAssociatedValue().user_empty()) 5219 return Changed; 5220 5221 if (auto *NewV = getReplacementValue(A)) { 5222 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> " 5223 << *NewV << " :: " << *this << "\n"); 5224 if (A.changeValueAfterManifest(getAssociatedValue(), *NewV)) 5225 Changed = ChangeStatus::CHANGED; 5226 } 5227 5228 return Changed | AAValueSimplify::manifest(A); 5229 } 5230 5231 /// See AbstractState::indicatePessimisticFixpoint(...). 5232 ChangeStatus indicatePessimisticFixpoint() override { 5233 SimplifiedAssociatedValue = &getAssociatedValue(); 5234 return AAValueSimplify::indicatePessimisticFixpoint(); 5235 } 5236 5237 static bool handleLoad(Attributor &A, const AbstractAttribute &AA, 5238 LoadInst &L, function_ref<bool(Value &)> Union) { 5239 auto UnionWrapper = [&](Value &V, Value &Obj) { 5240 if (isa<AllocaInst>(Obj)) 5241 return Union(V); 5242 if (!AA::isDynamicallyUnique(A, AA, V)) 5243 return false; 5244 if (!AA::isValidAtPosition(V, L, A.getInfoCache())) 5245 return false; 5246 return Union(V); 5247 }; 5248 5249 Value &Ptr = *L.getPointerOperand(); 5250 SmallVector<Value *, 8> Objects; 5251 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L)) 5252 return false; 5253 5254 const auto *TLI = 5255 A.getInfoCache().getTargetLibraryInfoForFunction(*L.getFunction()); 5256 for (Value *Obj : Objects) { 5257 LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n"); 5258 if (isa<UndefValue>(Obj)) 5259 continue; 5260 if (isa<ConstantPointerNull>(Obj)) { 5261 // A null pointer access can be undefined but any offset from null may 5262 // be OK. We do not try to optimize the latter. 5263 bool UsedAssumedInformation = false; 5264 if (!NullPointerIsDefined(L.getFunction(), 5265 Ptr.getType()->getPointerAddressSpace()) && 5266 A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj) 5267 continue; 5268 return false; 5269 } 5270 if (!isa<AllocaInst>(Obj) && !isa<GlobalVariable>(Obj) && 5271 !isNoAliasFn(Obj, TLI)) 5272 return false; 5273 Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType(), TLI); 5274 if (!InitialVal || !Union(*InitialVal)) 5275 return false; 5276 5277 LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store " 5278 "propagation, checking accesses next.\n"); 5279 5280 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) { 5281 LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n"); 5282 if (!Acc.isWrite()) 5283 return true; 5284 if (Acc.isWrittenValueYetUndetermined()) 5285 return true; 5286 Value *Content = Acc.getWrittenValue(); 5287 if (!Content) 5288 return false; 5289 Value *CastedContent = 5290 AA::getWithType(*Content, *AA.getAssociatedType()); 5291 if (!CastedContent) 5292 return false; 5293 if (IsExact) 5294 return UnionWrapper(*CastedContent, *Obj); 5295 if (auto *C = dyn_cast<Constant>(CastedContent)) 5296 if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C)) 5297 return UnionWrapper(*CastedContent, *Obj); 5298 return false; 5299 }; 5300 5301 auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj), 5302 DepClassTy::REQUIRED); 5303 if (!PI.forallInterferingAccesses(L, CheckAccess)) 5304 return false; 5305 } 5306 return true; 5307 } 5308 }; 5309 5310 struct AAValueSimplifyArgument final : AAValueSimplifyImpl { 5311 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) 5312 : AAValueSimplifyImpl(IRP, A) {} 5313 5314 void initialize(Attributor &A) override { 5315 AAValueSimplifyImpl::initialize(A); 5316 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) 5317 indicatePessimisticFixpoint(); 5318 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated, 5319 Attribute::StructRet, Attribute::Nest, Attribute::ByVal}, 5320 /* IgnoreSubsumingPositions */ true)) 5321 indicatePessimisticFixpoint(); 5322 5323 // FIXME: This is a hack to prevent us from propagating function poiner in 5324 // the new pass manager CGSCC pass as it creates call edges the 5325 // CallGraphUpdater cannot handle yet. 5326 Value &V = getAssociatedValue(); 5327 if (V.getType()->isPointerTy() && 5328 V.getType()->getPointerElementType()->isFunctionTy() && 5329 !A.isModulePass()) 5330 indicatePessimisticFixpoint(); 5331 } 5332 5333 /// See AbstractAttribute::updateImpl(...). 5334 ChangeStatus updateImpl(Attributor &A) override { 5335 // Byval is only replacable if it is readonly otherwise we would write into 5336 // the replaced value and not the copy that byval creates implicitly. 5337 Argument *Arg = getAssociatedArgument(); 5338 if (Arg->hasByValAttr()) { 5339 // TODO: We probably need to verify synchronization is not an issue, e.g., 5340 // there is no race by not copying a constant byval. 5341 const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), 5342 DepClassTy::REQUIRED); 5343 if (!MemAA.isAssumedReadOnly()) 5344 return indicatePessimisticFixpoint(); 5345 } 5346 5347 auto Before = SimplifiedAssociatedValue; 5348 5349 auto PredForCallSite = [&](AbstractCallSite ACS) { 5350 const IRPosition &ACSArgPos = 5351 IRPosition::callsite_argument(ACS, getCallSiteArgNo()); 5352 // Check if a coresponding argument was found or if it is on not 5353 // associated (which can happen for callback calls). 5354 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 5355 return false; 5356 5357 // Simplify the argument operand explicitly and check if the result is 5358 // valid in the current scope. This avoids refering to simplified values 5359 // in other functions, e.g., we don't want to say a an argument in a 5360 // static function is actually an argument in a different function. 5361 bool UsedAssumedInformation = false; 5362 Optional<Constant *> SimpleArgOp = 5363 A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation); 5364 if (!SimpleArgOp.hasValue()) 5365 return true; 5366 if (!SimpleArgOp.getValue()) 5367 return false; 5368 if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp)) 5369 return false; 5370 return unionAssumed(*SimpleArgOp); 5371 }; 5372 5373 // Generate a answer specific to a call site context. 5374 bool Success; 5375 bool AllCallSitesKnown; 5376 if (hasCallBaseContext() && 5377 getCallBaseContext()->getCalledFunction() == Arg->getParent()) 5378 Success = PredForCallSite( 5379 AbstractCallSite(&getCallBaseContext()->getCalledOperandUse())); 5380 else 5381 Success = A.checkForAllCallSites(PredForCallSite, *this, true, 5382 AllCallSitesKnown); 5383 5384 if (!Success) 5385 if (!askSimplifiedValueForOtherAAs(A)) 5386 return indicatePessimisticFixpoint(); 5387 5388 // If a candicate was found in this update, return CHANGED. 5389 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5390 : ChangeStatus ::CHANGED; 5391 } 5392 5393 /// See AbstractAttribute::trackStatistics() 5394 void trackStatistics() const override { 5395 STATS_DECLTRACK_ARG_ATTR(value_simplify) 5396 } 5397 }; 5398 5399 struct AAValueSimplifyReturned : AAValueSimplifyImpl { 5400 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) 5401 : AAValueSimplifyImpl(IRP, A) {} 5402 5403 /// See AAValueSimplify::getAssumedSimplifiedValue() 5404 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5405 if (!isValidState()) 5406 return nullptr; 5407 return SimplifiedAssociatedValue; 5408 } 5409 5410 /// See AbstractAttribute::updateImpl(...). 5411 ChangeStatus updateImpl(Attributor &A) override { 5412 auto Before = SimplifiedAssociatedValue; 5413 5414 auto PredForReturned = [&](Value &V) { 5415 return checkAndUpdate(A, *this, 5416 IRPosition::value(V, getCallBaseContext())); 5417 }; 5418 5419 if (!A.checkForAllReturnedValues(PredForReturned, *this)) 5420 if (!askSimplifiedValueForOtherAAs(A)) 5421 return indicatePessimisticFixpoint(); 5422 5423 // If a candicate was found in this update, return CHANGED. 5424 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5425 : ChangeStatus ::CHANGED; 5426 } 5427 5428 ChangeStatus manifest(Attributor &A) override { 5429 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5430 5431 if (auto *NewV = getReplacementValue(A)) { 5432 auto PredForReturned = 5433 [&](Value &, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 5434 for (ReturnInst *RI : RetInsts) { 5435 Value *ReturnedVal = RI->getReturnValue(); 5436 if (ReturnedVal == NewV || isa<UndefValue>(ReturnedVal)) 5437 return true; 5438 assert(RI->getFunction() == getAnchorScope() && 5439 "ReturnInst in wrong function!"); 5440 LLVM_DEBUG(dbgs() 5441 << "[ValueSimplify] " << *ReturnedVal << " -> " 5442 << *NewV << " in " << *RI << " :: " << *this << "\n"); 5443 if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV)) 5444 Changed = ChangeStatus::CHANGED; 5445 } 5446 return true; 5447 }; 5448 A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this); 5449 } 5450 5451 return Changed | AAValueSimplify::manifest(A); 5452 } 5453 5454 /// See AbstractAttribute::trackStatistics() 5455 void trackStatistics() const override { 5456 STATS_DECLTRACK_FNRET_ATTR(value_simplify) 5457 } 5458 }; 5459 5460 struct AAValueSimplifyFloating : AAValueSimplifyImpl { 5461 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) 5462 : AAValueSimplifyImpl(IRP, A) {} 5463 5464 /// See AbstractAttribute::initialize(...). 5465 void initialize(Attributor &A) override { 5466 AAValueSimplifyImpl::initialize(A); 5467 Value &V = getAnchorValue(); 5468 5469 // TODO: add other stuffs 5470 if (isa<Constant>(V)) 5471 indicatePessimisticFixpoint(); 5472 } 5473 5474 /// Check if \p Cmp is a comparison we can simplify. 5475 /// 5476 /// We handle multiple cases, one in which at least one operand is an 5477 /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other 5478 /// operand. Return true if successful, in that case SimplifiedAssociatedValue 5479 /// will be updated. 5480 bool handleCmp(Attributor &A, CmpInst &Cmp) { 5481 auto Union = [&](Value &V) { 5482 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5483 SimplifiedAssociatedValue, &V, V.getType()); 5484 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5485 }; 5486 5487 Value *LHS = Cmp.getOperand(0); 5488 Value *RHS = Cmp.getOperand(1); 5489 5490 // Simplify the operands first. 5491 bool UsedAssumedInformation = false; 5492 const auto &SimplifiedLHS = 5493 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 5494 *this, UsedAssumedInformation); 5495 if (!SimplifiedLHS.hasValue()) 5496 return true; 5497 if (!SimplifiedLHS.getValue()) 5498 return false; 5499 LHS = *SimplifiedLHS; 5500 5501 const auto &SimplifiedRHS = 5502 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 5503 *this, UsedAssumedInformation); 5504 if (!SimplifiedRHS.hasValue()) 5505 return true; 5506 if (!SimplifiedRHS.getValue()) 5507 return false; 5508 RHS = *SimplifiedRHS; 5509 5510 LLVMContext &Ctx = Cmp.getContext(); 5511 // Handle the trivial case first in which we don't even need to think about 5512 // null or non-null. 5513 if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) { 5514 Constant *NewVal = 5515 ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual()); 5516 if (!Union(*NewVal)) 5517 return false; 5518 if (!UsedAssumedInformation) 5519 indicateOptimisticFixpoint(); 5520 return true; 5521 } 5522 5523 // From now on we only handle equalities (==, !=). 5524 ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp); 5525 if (!ICmp || !ICmp->isEquality()) 5526 return false; 5527 5528 bool LHSIsNull = isa<ConstantPointerNull>(LHS); 5529 bool RHSIsNull = isa<ConstantPointerNull>(RHS); 5530 if (!LHSIsNull && !RHSIsNull) 5531 return false; 5532 5533 // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the 5534 // non-nullptr operand and if we assume it's non-null we can conclude the 5535 // result of the comparison. 5536 assert((LHSIsNull || RHSIsNull) && 5537 "Expected nullptr versus non-nullptr comparison at this point"); 5538 5539 // The index is the operand that we assume is not null. 5540 unsigned PtrIdx = LHSIsNull; 5541 auto &PtrNonNullAA = A.getAAFor<AANonNull>( 5542 *this, IRPosition::value(*ICmp->getOperand(PtrIdx)), 5543 DepClassTy::REQUIRED); 5544 if (!PtrNonNullAA.isAssumedNonNull()) 5545 return false; 5546 UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull(); 5547 5548 // The new value depends on the predicate, true for != and false for ==. 5549 Constant *NewVal = ConstantInt::get( 5550 Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE); 5551 if (!Union(*NewVal)) 5552 return false; 5553 5554 if (!UsedAssumedInformation) 5555 indicateOptimisticFixpoint(); 5556 5557 return true; 5558 } 5559 5560 bool updateWithLoad(Attributor &A, LoadInst &L) { 5561 auto Union = [&](Value &V) { 5562 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5563 SimplifiedAssociatedValue, &V, L.getType()); 5564 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5565 }; 5566 return handleLoad(A, *this, L, Union); 5567 } 5568 5569 /// Use the generic, non-optimistic InstSimplfy functionality if we managed to 5570 /// simplify any operand of the instruction \p I. Return true if successful, 5571 /// in that case SimplifiedAssociatedValue will be updated. 5572 bool handleGenericInst(Attributor &A, Instruction &I) { 5573 bool SomeSimplified = false; 5574 bool UsedAssumedInformation = false; 5575 5576 SmallVector<Value *, 8> NewOps(I.getNumOperands()); 5577 int Idx = 0; 5578 for (Value *Op : I.operands()) { 5579 const auto &SimplifiedOp = 5580 A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()), 5581 *this, UsedAssumedInformation); 5582 // If we are not sure about any operand we are not sure about the entire 5583 // instruction, we'll wait. 5584 if (!SimplifiedOp.hasValue()) 5585 return true; 5586 5587 if (SimplifiedOp.getValue()) 5588 NewOps[Idx] = SimplifiedOp.getValue(); 5589 else 5590 NewOps[Idx] = Op; 5591 5592 SomeSimplified |= (NewOps[Idx] != Op); 5593 ++Idx; 5594 } 5595 5596 // We won't bother with the InstSimplify interface if we didn't simplify any 5597 // operand ourselves. 5598 if (!SomeSimplified) 5599 return false; 5600 5601 InformationCache &InfoCache = A.getInfoCache(); 5602 Function *F = I.getFunction(); 5603 const auto *DT = 5604 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 5605 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5606 auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 5607 OptimizationRemarkEmitter *ORE = nullptr; 5608 5609 const DataLayout &DL = I.getModule()->getDataLayout(); 5610 SimplifyQuery Q(DL, TLI, DT, AC, &I); 5611 if (Value *SimplifiedI = 5612 SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) { 5613 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5614 SimplifiedAssociatedValue, SimplifiedI, I.getType()); 5615 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5616 } 5617 return false; 5618 } 5619 5620 /// See AbstractAttribute::updateImpl(...). 5621 ChangeStatus updateImpl(Attributor &A) override { 5622 auto Before = SimplifiedAssociatedValue; 5623 5624 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &, 5625 bool Stripped) -> bool { 5626 auto &AA = A.getAAFor<AAValueSimplify>( 5627 *this, IRPosition::value(V, getCallBaseContext()), 5628 DepClassTy::REQUIRED); 5629 if (!Stripped && this == &AA) { 5630 5631 if (auto *I = dyn_cast<Instruction>(&V)) { 5632 if (auto *LI = dyn_cast<LoadInst>(&V)) 5633 if (updateWithLoad(A, *LI)) 5634 return true; 5635 if (auto *Cmp = dyn_cast<CmpInst>(&V)) 5636 if (handleCmp(A, *Cmp)) 5637 return true; 5638 if (handleGenericInst(A, *I)) 5639 return true; 5640 } 5641 // TODO: Look the instruction and check recursively. 5642 5643 LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V 5644 << "\n"); 5645 return false; 5646 } 5647 return checkAndUpdate(A, *this, 5648 IRPosition::value(V, getCallBaseContext())); 5649 }; 5650 5651 bool Dummy = false; 5652 if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy, 5653 VisitValueCB, getCtxI(), 5654 /* UseValueSimplify */ false)) 5655 if (!askSimplifiedValueForOtherAAs(A)) 5656 return indicatePessimisticFixpoint(); 5657 5658 // If a candicate was found in this update, return CHANGED. 5659 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5660 : ChangeStatus ::CHANGED; 5661 } 5662 5663 /// See AbstractAttribute::trackStatistics() 5664 void trackStatistics() const override { 5665 STATS_DECLTRACK_FLOATING_ATTR(value_simplify) 5666 } 5667 }; 5668 5669 struct AAValueSimplifyFunction : AAValueSimplifyImpl { 5670 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) 5671 : AAValueSimplifyImpl(IRP, A) {} 5672 5673 /// See AbstractAttribute::initialize(...). 5674 void initialize(Attributor &A) override { 5675 SimplifiedAssociatedValue = nullptr; 5676 indicateOptimisticFixpoint(); 5677 } 5678 /// See AbstractAttribute::initialize(...). 5679 ChangeStatus updateImpl(Attributor &A) override { 5680 llvm_unreachable( 5681 "AAValueSimplify(Function|CallSite)::updateImpl will not be called"); 5682 } 5683 /// See AbstractAttribute::trackStatistics() 5684 void trackStatistics() const override { 5685 STATS_DECLTRACK_FN_ATTR(value_simplify) 5686 } 5687 }; 5688 5689 struct AAValueSimplifyCallSite : AAValueSimplifyFunction { 5690 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) 5691 : AAValueSimplifyFunction(IRP, A) {} 5692 /// See AbstractAttribute::trackStatistics() 5693 void trackStatistics() const override { 5694 STATS_DECLTRACK_CS_ATTR(value_simplify) 5695 } 5696 }; 5697 5698 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl { 5699 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) 5700 : AAValueSimplifyImpl(IRP, A) {} 5701 5702 void initialize(Attributor &A) override { 5703 AAValueSimplifyImpl::initialize(A); 5704 if (!getAssociatedFunction()) 5705 indicatePessimisticFixpoint(); 5706 } 5707 5708 /// See AbstractAttribute::updateImpl(...). 5709 ChangeStatus updateImpl(Attributor &A) override { 5710 auto Before = SimplifiedAssociatedValue; 5711 auto &RetAA = A.getAAFor<AAReturnedValues>( 5712 *this, IRPosition::function(*getAssociatedFunction()), 5713 DepClassTy::REQUIRED); 5714 auto PredForReturned = 5715 [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 5716 bool UsedAssumedInformation = false; 5717 Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent( 5718 &RetVal, *cast<CallBase>(getCtxI()), *this, 5719 UsedAssumedInformation); 5720 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5721 SimplifiedAssociatedValue, CSRetVal, getAssociatedType()); 5722 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5723 }; 5724 if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned)) 5725 if (!askSimplifiedValueForOtherAAs(A)) 5726 return indicatePessimisticFixpoint(); 5727 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5728 : ChangeStatus ::CHANGED; 5729 } 5730 5731 void trackStatistics() const override { 5732 STATS_DECLTRACK_CSRET_ATTR(value_simplify) 5733 } 5734 }; 5735 5736 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { 5737 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) 5738 : AAValueSimplifyFloating(IRP, A) {} 5739 5740 /// See AbstractAttribute::manifest(...). 5741 ChangeStatus manifest(Attributor &A) override { 5742 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5743 5744 if (auto *NewV = getReplacementValue(A)) { 5745 Use &U = cast<CallBase>(&getAnchorValue()) 5746 ->getArgOperandUse(getCallSiteArgNo()); 5747 if (A.changeUseAfterManifest(U, *NewV)) 5748 Changed = ChangeStatus::CHANGED; 5749 } 5750 5751 return Changed | AAValueSimplify::manifest(A); 5752 } 5753 5754 void trackStatistics() const override { 5755 STATS_DECLTRACK_CSARG_ATTR(value_simplify) 5756 } 5757 }; 5758 5759 /// ----------------------- Heap-To-Stack Conversion --------------------------- 5760 struct AAHeapToStackFunction final : public AAHeapToStack { 5761 5762 struct AllocationInfo { 5763 /// The call that allocates the memory. 5764 CallBase *const CB; 5765 5766 /// The kind of allocation. 5767 const enum class AllocationKind { 5768 MALLOC, 5769 CALLOC, 5770 ALIGNED_ALLOC, 5771 } Kind; 5772 5773 /// The library function id for the allocation. 5774 LibFunc LibraryFunctionId = NotLibFunc; 5775 5776 /// The status wrt. a rewrite. 5777 enum { 5778 STACK_DUE_TO_USE, 5779 STACK_DUE_TO_FREE, 5780 INVALID, 5781 } Status = STACK_DUE_TO_USE; 5782 5783 /// Flag to indicate if we encountered a use that might free this allocation 5784 /// but which is not in the deallocation infos. 5785 bool HasPotentiallyFreeingUnknownUses = false; 5786 5787 /// The set of free calls that use this allocation. 5788 SmallPtrSet<CallBase *, 1> PotentialFreeCalls{}; 5789 }; 5790 5791 struct DeallocationInfo { 5792 /// The call that deallocates the memory. 5793 CallBase *const CB; 5794 5795 /// Flag to indicate if we don't know all objects this deallocation might 5796 /// free. 5797 bool MightFreeUnknownObjects = false; 5798 5799 /// The set of allocation calls that are potentially freed. 5800 SmallPtrSet<CallBase *, 1> PotentialAllocationCalls{}; 5801 }; 5802 5803 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) 5804 : AAHeapToStack(IRP, A) {} 5805 5806 ~AAHeapToStackFunction() { 5807 // Ensure we call the destructor so we release any memory allocated in the 5808 // sets. 5809 for (auto &It : AllocationInfos) 5810 It.getSecond()->~AllocationInfo(); 5811 for (auto &It : DeallocationInfos) 5812 It.getSecond()->~DeallocationInfo(); 5813 } 5814 5815 void initialize(Attributor &A) override { 5816 AAHeapToStack::initialize(A); 5817 5818 const Function *F = getAnchorScope(); 5819 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5820 5821 auto AllocationIdentifierCB = [&](Instruction &I) { 5822 CallBase *CB = dyn_cast<CallBase>(&I); 5823 if (!CB) 5824 return true; 5825 if (isFreeCall(CB, TLI)) { 5826 DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB}; 5827 return true; 5828 } 5829 bool IsMalloc = isMallocLikeFn(CB, TLI); 5830 bool IsAlignedAllocLike = !IsMalloc && isAlignedAllocLikeFn(CB, TLI); 5831 bool IsCalloc = 5832 !IsMalloc && !IsAlignedAllocLike && isCallocLikeFn(CB, TLI); 5833 if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) 5834 return true; 5835 auto Kind = 5836 IsMalloc ? AllocationInfo::AllocationKind::MALLOC 5837 : (IsCalloc ? AllocationInfo::AllocationKind::CALLOC 5838 : AllocationInfo::AllocationKind::ALIGNED_ALLOC); 5839 5840 AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB, Kind}; 5841 AllocationInfos[CB] = AI; 5842 TLI->getLibFunc(*CB, AI->LibraryFunctionId); 5843 return true; 5844 }; 5845 5846 bool UsedAssumedInformation = false; 5847 bool Success = A.checkForAllCallLikeInstructions( 5848 AllocationIdentifierCB, *this, UsedAssumedInformation, 5849 /* CheckBBLivenessOnly */ false, 5850 /* CheckPotentiallyDead */ true); 5851 (void)Success; 5852 assert(Success && "Did not expect the call base visit callback to fail!"); 5853 } 5854 5855 const std::string getAsStr() const override { 5856 unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0; 5857 for (const auto &It : AllocationInfos) { 5858 if (It.second->Status == AllocationInfo::INVALID) 5859 ++NumInvalidMallocs; 5860 else 5861 ++NumH2SMallocs; 5862 } 5863 return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" + 5864 std::to_string(NumInvalidMallocs); 5865 } 5866 5867 /// See AbstractAttribute::trackStatistics(). 5868 void trackStatistics() const override { 5869 STATS_DECL( 5870 MallocCalls, Function, 5871 "Number of malloc/calloc/aligned_alloc calls converted to allocas"); 5872 for (auto &It : AllocationInfos) 5873 if (It.second->Status != AllocationInfo::INVALID) 5874 ++BUILD_STAT_NAME(MallocCalls, Function); 5875 } 5876 5877 bool isAssumedHeapToStack(const CallBase &CB) const override { 5878 if (isValidState()) 5879 if (AllocationInfo *AI = AllocationInfos.lookup(&CB)) 5880 return AI->Status != AllocationInfo::INVALID; 5881 return false; 5882 } 5883 5884 bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override { 5885 if (!isValidState()) 5886 return false; 5887 5888 for (auto &It : AllocationInfos) { 5889 AllocationInfo &AI = *It.second; 5890 if (AI.Status == AllocationInfo::INVALID) 5891 continue; 5892 5893 if (AI.PotentialFreeCalls.count(&CB)) 5894 return true; 5895 } 5896 5897 return false; 5898 } 5899 5900 ChangeStatus manifest(Attributor &A) override { 5901 assert(getState().isValidState() && 5902 "Attempted to manifest an invalid state!"); 5903 5904 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 5905 Function *F = getAnchorScope(); 5906 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5907 5908 for (auto &It : AllocationInfos) { 5909 AllocationInfo &AI = *It.second; 5910 if (AI.Status == AllocationInfo::INVALID) 5911 continue; 5912 5913 for (CallBase *FreeCall : AI.PotentialFreeCalls) { 5914 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); 5915 A.deleteAfterManifest(*FreeCall); 5916 HasChanged = ChangeStatus::CHANGED; 5917 } 5918 5919 LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB 5920 << "\n"); 5921 5922 auto Remark = [&](OptimizationRemark OR) { 5923 LibFunc IsAllocShared; 5924 if (TLI->getLibFunc(*AI.CB, IsAllocShared)) 5925 if (IsAllocShared == LibFunc___kmpc_alloc_shared) 5926 return OR << "Moving globalized variable to the stack."; 5927 return OR << "Moving memory allocation from the heap to the stack."; 5928 }; 5929 if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 5930 A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark); 5931 else 5932 A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark); 5933 5934 Value *Size; 5935 Optional<APInt> SizeAPI = getSize(A, *this, AI); 5936 if (SizeAPI.hasValue()) { 5937 Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI); 5938 } else if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) { 5939 auto *Num = AI.CB->getOperand(0); 5940 auto *SizeT = AI.CB->getOperand(1); 5941 IRBuilder<> B(AI.CB); 5942 Size = B.CreateMul(Num, SizeT, "h2s.calloc.size"); 5943 } else if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) { 5944 Size = AI.CB->getOperand(1); 5945 } else { 5946 Size = AI.CB->getOperand(0); 5947 } 5948 5949 Align Alignment(1); 5950 if (MaybeAlign RetAlign = AI.CB->getRetAlign()) 5951 Alignment = max(Alignment, RetAlign); 5952 if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) { 5953 Optional<APInt> AlignmentAPI = 5954 getAPInt(A, *this, *AI.CB->getArgOperand(0)); 5955 assert(AlignmentAPI.hasValue() && 5956 "Expected an alignment during manifest!"); 5957 Alignment = 5958 max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue())); 5959 } 5960 5961 unsigned AS = cast<PointerType>(AI.CB->getType())->getAddressSpace(); 5962 Instruction *Alloca = 5963 new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment, 5964 "", AI.CB->getNextNode()); 5965 5966 if (Alloca->getType() != AI.CB->getType()) 5967 Alloca = new BitCastInst(Alloca, AI.CB->getType(), "malloc_bc", 5968 Alloca->getNextNode()); 5969 5970 A.changeValueAfterManifest(*AI.CB, *Alloca); 5971 5972 if (auto *II = dyn_cast<InvokeInst>(AI.CB)) { 5973 auto *NBB = II->getNormalDest(); 5974 BranchInst::Create(NBB, AI.CB->getParent()); 5975 A.deleteAfterManifest(*AI.CB); 5976 } else { 5977 A.deleteAfterManifest(*AI.CB); 5978 } 5979 5980 // Zero out the allocated memory if it was a calloc. 5981 if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) { 5982 auto *BI = new BitCastInst(Alloca, AI.CB->getType(), "calloc_bc", 5983 Alloca->getNextNode()); 5984 Value *Ops[] = { 5985 BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size, 5986 ConstantInt::get(Type::getInt1Ty(F->getContext()), false)}; 5987 5988 Type *Tys[] = {BI->getType(), AI.CB->getOperand(0)->getType()}; 5989 Module *M = F->getParent(); 5990 Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys); 5991 CallInst::Create(Fn, Ops, "", BI->getNextNode()); 5992 } 5993 HasChanged = ChangeStatus::CHANGED; 5994 } 5995 5996 return HasChanged; 5997 } 5998 5999 Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA, 6000 Value &V) { 6001 bool UsedAssumedInformation = false; 6002 Optional<Constant *> SimpleV = 6003 A.getAssumedConstant(V, AA, UsedAssumedInformation); 6004 if (!SimpleV.hasValue()) 6005 return APInt(64, 0); 6006 if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue())) 6007 return CI->getValue(); 6008 return llvm::None; 6009 } 6010 6011 Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA, 6012 AllocationInfo &AI) { 6013 6014 if (AI.Kind == AllocationInfo::AllocationKind::MALLOC) 6015 return getAPInt(A, AA, *AI.CB->getArgOperand(0)); 6016 6017 if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) 6018 // Only if the alignment is also constant we return a size. 6019 return getAPInt(A, AA, *AI.CB->getArgOperand(0)).hasValue() 6020 ? getAPInt(A, AA, *AI.CB->getArgOperand(1)) 6021 : llvm::None; 6022 6023 assert(AI.Kind == AllocationInfo::AllocationKind::CALLOC && 6024 "Expected only callocs are left"); 6025 Optional<APInt> Num = getAPInt(A, AA, *AI.CB->getArgOperand(0)); 6026 Optional<APInt> Size = getAPInt(A, AA, *AI.CB->getArgOperand(1)); 6027 if (!Num.hasValue() || !Size.hasValue()) 6028 return llvm::None; 6029 bool Overflow = false; 6030 Size = Size.getValue().umul_ov(Num.getValue(), Overflow); 6031 return Overflow ? llvm::None : Size; 6032 } 6033 6034 /// Collection of all malloc-like calls in a function with associated 6035 /// information. 6036 DenseMap<CallBase *, AllocationInfo *> AllocationInfos; 6037 6038 /// Collection of all free-like calls in a function with associated 6039 /// information. 6040 DenseMap<CallBase *, DeallocationInfo *> DeallocationInfos; 6041 6042 ChangeStatus updateImpl(Attributor &A) override; 6043 }; 6044 6045 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) { 6046 ChangeStatus Changed = ChangeStatus::UNCHANGED; 6047 const Function *F = getAnchorScope(); 6048 6049 const auto &LivenessAA = 6050 A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE); 6051 6052 MustBeExecutedContextExplorer &Explorer = 6053 A.getInfoCache().getMustBeExecutedContextExplorer(); 6054 6055 bool StackIsAccessibleByOtherThreads = 6056 A.getInfoCache().stackIsAccessibleByOtherThreads(); 6057 6058 // Flag to ensure we update our deallocation information at most once per 6059 // updateImpl call and only if we use the free check reasoning. 6060 bool HasUpdatedFrees = false; 6061 6062 auto UpdateFrees = [&]() { 6063 HasUpdatedFrees = true; 6064 6065 for (auto &It : DeallocationInfos) { 6066 DeallocationInfo &DI = *It.second; 6067 // For now we cannot use deallocations that have unknown inputs, skip 6068 // them. 6069 if (DI.MightFreeUnknownObjects) 6070 continue; 6071 6072 // No need to analyze dead calls, ignore them instead. 6073 bool UsedAssumedInformation = false; 6074 if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation, 6075 /* CheckBBLivenessOnly */ true)) 6076 continue; 6077 6078 // Use the optimistic version to get the freed objects, ignoring dead 6079 // branches etc. 6080 SmallVector<Value *, 8> Objects; 6081 if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects, 6082 *this, DI.CB)) { 6083 LLVM_DEBUG( 6084 dbgs() 6085 << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n"); 6086 DI.MightFreeUnknownObjects = true; 6087 continue; 6088 } 6089 6090 // Check each object explicitly. 6091 for (auto *Obj : Objects) { 6092 // Free of null and undef can be ignored as no-ops (or UB in the latter 6093 // case). 6094 if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj)) 6095 continue; 6096 6097 CallBase *ObjCB = dyn_cast<CallBase>(Obj); 6098 if (!ObjCB) { 6099 LLVM_DEBUG(dbgs() 6100 << "[H2S] Free of a non-call object: " << *Obj << "\n"); 6101 DI.MightFreeUnknownObjects = true; 6102 continue; 6103 } 6104 6105 AllocationInfo *AI = AllocationInfos.lookup(ObjCB); 6106 if (!AI) { 6107 LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj 6108 << "\n"); 6109 DI.MightFreeUnknownObjects = true; 6110 continue; 6111 } 6112 6113 DI.PotentialAllocationCalls.insert(ObjCB); 6114 } 6115 } 6116 }; 6117 6118 auto FreeCheck = [&](AllocationInfo &AI) { 6119 // If the stack is not accessible by other threads, the "must-free" logic 6120 // doesn't apply as the pointer could be shared and needs to be places in 6121 // "shareable" memory. 6122 if (!StackIsAccessibleByOtherThreads) { 6123 auto &NoSyncAA = 6124 A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL); 6125 if (!NoSyncAA.isAssumedNoSync()) { 6126 LLVM_DEBUG( 6127 dbgs() << "[H2S] found an escaping use, stack is not accessible by " 6128 "other threads and function is not nosync:\n"); 6129 return false; 6130 } 6131 } 6132 if (!HasUpdatedFrees) 6133 UpdateFrees(); 6134 6135 // TODO: Allow multi exit functions that have different free calls. 6136 if (AI.PotentialFreeCalls.size() != 1) { 6137 LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but " 6138 << AI.PotentialFreeCalls.size() << "\n"); 6139 return false; 6140 } 6141 CallBase *UniqueFree = *AI.PotentialFreeCalls.begin(); 6142 DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree); 6143 if (!DI) { 6144 LLVM_DEBUG( 6145 dbgs() << "[H2S] unique free call was not known as deallocation call " 6146 << *UniqueFree << "\n"); 6147 return false; 6148 } 6149 if (DI->MightFreeUnknownObjects) { 6150 LLVM_DEBUG( 6151 dbgs() << "[H2S] unique free call might free unknown allocations\n"); 6152 return false; 6153 } 6154 if (DI->PotentialAllocationCalls.size() > 1) { 6155 LLVM_DEBUG(dbgs() << "[H2S] unique free call might free " 6156 << DI->PotentialAllocationCalls.size() 6157 << " different allocations\n"); 6158 return false; 6159 } 6160 if (*DI->PotentialAllocationCalls.begin() != AI.CB) { 6161 LLVM_DEBUG( 6162 dbgs() 6163 << "[H2S] unique free call not known to free this allocation but " 6164 << **DI->PotentialAllocationCalls.begin() << "\n"); 6165 return false; 6166 } 6167 Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode(); 6168 if (!Explorer.findInContextOf(UniqueFree, CtxI)) { 6169 LLVM_DEBUG( 6170 dbgs() 6171 << "[H2S] unique free call might not be executed with the allocation " 6172 << *UniqueFree << "\n"); 6173 return false; 6174 } 6175 return true; 6176 }; 6177 6178 auto UsesCheck = [&](AllocationInfo &AI) { 6179 bool ValidUsesOnly = true; 6180 6181 auto Pred = [&](const Use &U, bool &Follow) -> bool { 6182 Instruction *UserI = cast<Instruction>(U.getUser()); 6183 if (isa<LoadInst>(UserI)) 6184 return true; 6185 if (auto *SI = dyn_cast<StoreInst>(UserI)) { 6186 if (SI->getValueOperand() == U.get()) { 6187 LLVM_DEBUG(dbgs() 6188 << "[H2S] escaping store to memory: " << *UserI << "\n"); 6189 ValidUsesOnly = false; 6190 } else { 6191 // A store into the malloc'ed memory is fine. 6192 } 6193 return true; 6194 } 6195 if (auto *CB = dyn_cast<CallBase>(UserI)) { 6196 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) 6197 return true; 6198 if (DeallocationInfos.count(CB)) { 6199 AI.PotentialFreeCalls.insert(CB); 6200 return true; 6201 } 6202 6203 unsigned ArgNo = CB->getArgOperandNo(&U); 6204 6205 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 6206 *this, IRPosition::callsite_argument(*CB, ArgNo), 6207 DepClassTy::OPTIONAL); 6208 6209 // If a call site argument use is nofree, we are fine. 6210 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( 6211 *this, IRPosition::callsite_argument(*CB, ArgNo), 6212 DepClassTy::OPTIONAL); 6213 6214 bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture(); 6215 bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree(); 6216 if (MaybeCaptured || 6217 (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared && 6218 MaybeFreed)) { 6219 AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed; 6220 6221 // Emit a missed remark if this is missed OpenMP globalization. 6222 auto Remark = [&](OptimizationRemarkMissed ORM) { 6223 return ORM 6224 << "Could not move globalized variable to the stack. " 6225 "Variable is potentially captured in call. Mark " 6226 "parameter as `__attribute__((noescape))` to override."; 6227 }; 6228 6229 if (ValidUsesOnly && 6230 AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 6231 A.emitRemark<OptimizationRemarkMissed>(AI.CB, "OMP113", Remark); 6232 6233 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n"); 6234 ValidUsesOnly = false; 6235 } 6236 return true; 6237 } 6238 6239 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 6240 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 6241 Follow = true; 6242 return true; 6243 } 6244 // Unknown user for which we can not track uses further (in a way that 6245 // makes sense). 6246 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n"); 6247 ValidUsesOnly = false; 6248 return true; 6249 }; 6250 if (!A.checkForAllUses(Pred, *this, *AI.CB)) 6251 return false; 6252 return ValidUsesOnly; 6253 }; 6254 6255 // The actual update starts here. We look at all allocations and depending on 6256 // their status perform the appropriate check(s). 6257 for (auto &It : AllocationInfos) { 6258 AllocationInfo &AI = *It.second; 6259 if (AI.Status == AllocationInfo::INVALID) 6260 continue; 6261 6262 if (MaxHeapToStackSize == -1) { 6263 if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) 6264 if (!getAPInt(A, *this, *AI.CB->getArgOperand(0)).hasValue()) { 6265 LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB 6266 << "\n"); 6267 AI.Status = AllocationInfo::INVALID; 6268 Changed = ChangeStatus::CHANGED; 6269 continue; 6270 } 6271 } else { 6272 Optional<APInt> Size = getSize(A, *this, AI); 6273 if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) { 6274 LLVM_DEBUG({ 6275 if (!Size.hasValue()) 6276 dbgs() << "[H2S] Unknown allocation size (or alignment): " << *AI.CB 6277 << "\n"; 6278 else 6279 dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " 6280 << MaxHeapToStackSize << "\n"; 6281 }); 6282 6283 AI.Status = AllocationInfo::INVALID; 6284 Changed = ChangeStatus::CHANGED; 6285 continue; 6286 } 6287 } 6288 6289 switch (AI.Status) { 6290 case AllocationInfo::STACK_DUE_TO_USE: 6291 if (UsesCheck(AI)) 6292 continue; 6293 AI.Status = AllocationInfo::STACK_DUE_TO_FREE; 6294 LLVM_FALLTHROUGH; 6295 case AllocationInfo::STACK_DUE_TO_FREE: 6296 if (FreeCheck(AI)) 6297 continue; 6298 AI.Status = AllocationInfo::INVALID; 6299 Changed = ChangeStatus::CHANGED; 6300 continue; 6301 case AllocationInfo::INVALID: 6302 llvm_unreachable("Invalid allocations should never reach this point!"); 6303 }; 6304 } 6305 6306 return Changed; 6307 } 6308 6309 /// ----------------------- Privatizable Pointers ------------------------------ 6310 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { 6311 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) 6312 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} 6313 6314 ChangeStatus indicatePessimisticFixpoint() override { 6315 AAPrivatizablePtr::indicatePessimisticFixpoint(); 6316 PrivatizableType = nullptr; 6317 return ChangeStatus::CHANGED; 6318 } 6319 6320 /// Identify the type we can chose for a private copy of the underlying 6321 /// argument. None means it is not clear yet, nullptr means there is none. 6322 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; 6323 6324 /// Return a privatizable type that encloses both T0 and T1. 6325 /// TODO: This is merely a stub for now as we should manage a mapping as well. 6326 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { 6327 if (!T0.hasValue()) 6328 return T1; 6329 if (!T1.hasValue()) 6330 return T0; 6331 if (T0 == T1) 6332 return T0; 6333 return nullptr; 6334 } 6335 6336 Optional<Type *> getPrivatizableType() const override { 6337 return PrivatizableType; 6338 } 6339 6340 const std::string getAsStr() const override { 6341 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; 6342 } 6343 6344 protected: 6345 Optional<Type *> PrivatizableType; 6346 }; 6347 6348 // TODO: Do this for call site arguments (probably also other values) as well. 6349 6350 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { 6351 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) 6352 : AAPrivatizablePtrImpl(IRP, A) {} 6353 6354 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6355 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6356 // If this is a byval argument and we know all the call sites (so we can 6357 // rewrite them), there is no need to check them explicitly. 6358 bool AllCallSitesKnown; 6359 if (getIRPosition().hasAttr(Attribute::ByVal) && 6360 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, 6361 true, AllCallSitesKnown)) 6362 return getAssociatedValue().getType()->getPointerElementType(); 6363 6364 Optional<Type *> Ty; 6365 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 6366 6367 // Make sure the associated call site argument has the same type at all call 6368 // sites and it is an allocation we know is safe to privatize, for now that 6369 // means we only allow alloca instructions. 6370 // TODO: We can additionally analyze the accesses in the callee to create 6371 // the type from that information instead. That is a little more 6372 // involved and will be done in a follow up patch. 6373 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6374 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 6375 // Check if a coresponding argument was found or if it is one not 6376 // associated (which can happen for callback calls). 6377 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 6378 return false; 6379 6380 // Check that all call sites agree on a type. 6381 auto &PrivCSArgAA = 6382 A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED); 6383 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); 6384 6385 LLVM_DEBUG({ 6386 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; 6387 if (CSTy.hasValue() && CSTy.getValue()) 6388 CSTy.getValue()->print(dbgs()); 6389 else if (CSTy.hasValue()) 6390 dbgs() << "<nullptr>"; 6391 else 6392 dbgs() << "<none>"; 6393 }); 6394 6395 Ty = combineTypes(Ty, CSTy); 6396 6397 LLVM_DEBUG({ 6398 dbgs() << " : New Type: "; 6399 if (Ty.hasValue() && Ty.getValue()) 6400 Ty.getValue()->print(dbgs()); 6401 else if (Ty.hasValue()) 6402 dbgs() << "<nullptr>"; 6403 else 6404 dbgs() << "<none>"; 6405 dbgs() << "\n"; 6406 }); 6407 6408 return !Ty.hasValue() || Ty.getValue(); 6409 }; 6410 6411 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown)) 6412 return nullptr; 6413 return Ty; 6414 } 6415 6416 /// See AbstractAttribute::updateImpl(...). 6417 ChangeStatus updateImpl(Attributor &A) override { 6418 PrivatizableType = identifyPrivatizableType(A); 6419 if (!PrivatizableType.hasValue()) 6420 return ChangeStatus::UNCHANGED; 6421 if (!PrivatizableType.getValue()) 6422 return indicatePessimisticFixpoint(); 6423 6424 // The dependence is optional so we don't give up once we give up on the 6425 // alignment. 6426 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), 6427 DepClassTy::OPTIONAL); 6428 6429 // Avoid arguments with padding for now. 6430 if (!getIRPosition().hasAttr(Attribute::ByVal) && 6431 !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(), 6432 A.getInfoCache().getDL())) { 6433 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n"); 6434 return indicatePessimisticFixpoint(); 6435 } 6436 6437 // Collect the types that will replace the privatizable type in the function 6438 // signature. 6439 SmallVector<Type *, 16> ReplacementTypes; 6440 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 6441 6442 // Verify callee and caller agree on how the promoted argument would be 6443 // passed. 6444 Function &Fn = *getIRPosition().getAnchorScope(); 6445 const auto *TTI = 6446 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); 6447 if (!TTI) { 6448 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function " 6449 << Fn.getName() << "\n"); 6450 return indicatePessimisticFixpoint(); 6451 } 6452 6453 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6454 CallBase *CB = ACS.getInstruction(); 6455 return TTI->areTypesABICompatible( 6456 CB->getCaller(), CB->getCalledFunction(), ReplacementTypes); 6457 }; 6458 bool AllCallSitesKnown; 6459 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, 6460 AllCallSitesKnown)) { 6461 LLVM_DEBUG( 6462 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " 6463 << Fn.getName() << "\n"); 6464 return indicatePessimisticFixpoint(); 6465 } 6466 6467 // Register a rewrite of the argument. 6468 Argument *Arg = getAssociatedArgument(); 6469 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { 6470 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"); 6471 return indicatePessimisticFixpoint(); 6472 } 6473 6474 unsigned ArgNo = Arg->getArgNo(); 6475 6476 // Helper to check if for the given call site the associated argument is 6477 // passed to a callback where the privatization would be different. 6478 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { 6479 SmallVector<const Use *, 4> CallbackUses; 6480 AbstractCallSite::getCallbackUses(CB, CallbackUses); 6481 for (const Use *U : CallbackUses) { 6482 AbstractCallSite CBACS(U); 6483 assert(CBACS && CBACS.isCallbackCall()); 6484 for (Argument &CBArg : CBACS.getCalledFunction()->args()) { 6485 int CBArgNo = CBACS.getCallArgOperandNo(CBArg); 6486 6487 LLVM_DEBUG({ 6488 dbgs() 6489 << "[AAPrivatizablePtr] Argument " << *Arg 6490 << "check if can be privatized in the context of its parent (" 6491 << Arg->getParent()->getName() 6492 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6493 "callback (" 6494 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6495 << ")\n[AAPrivatizablePtr] " << CBArg << " : " 6496 << CBACS.getCallArgOperand(CBArg) << " vs " 6497 << CB.getArgOperand(ArgNo) << "\n" 6498 << "[AAPrivatizablePtr] " << CBArg << " : " 6499 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; 6500 }); 6501 6502 if (CBArgNo != int(ArgNo)) 6503 continue; 6504 const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6505 *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED); 6506 if (CBArgPrivAA.isValidState()) { 6507 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); 6508 if (!CBArgPrivTy.hasValue()) 6509 continue; 6510 if (CBArgPrivTy.getValue() == PrivatizableType) 6511 continue; 6512 } 6513 6514 LLVM_DEBUG({ 6515 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6516 << " cannot be privatized in the context of its parent (" 6517 << Arg->getParent()->getName() 6518 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6519 "callback (" 6520 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6521 << ").\n[AAPrivatizablePtr] for which the argument " 6522 "privatization is not compatible.\n"; 6523 }); 6524 return false; 6525 } 6526 } 6527 return true; 6528 }; 6529 6530 // Helper to check if for the given call site the associated argument is 6531 // passed to a direct call where the privatization would be different. 6532 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { 6533 CallBase *DC = cast<CallBase>(ACS.getInstruction()); 6534 int DCArgNo = ACS.getCallArgOperandNo(ArgNo); 6535 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() && 6536 "Expected a direct call operand for callback call operand"); 6537 6538 LLVM_DEBUG({ 6539 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6540 << " check if be privatized in the context of its parent (" 6541 << Arg->getParent()->getName() 6542 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6543 "direct call of (" 6544 << DCArgNo << "@" << DC->getCalledFunction()->getName() 6545 << ").\n"; 6546 }); 6547 6548 Function *DCCallee = DC->getCalledFunction(); 6549 if (unsigned(DCArgNo) < DCCallee->arg_size()) { 6550 const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6551 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)), 6552 DepClassTy::REQUIRED); 6553 if (DCArgPrivAA.isValidState()) { 6554 auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); 6555 if (!DCArgPrivTy.hasValue()) 6556 return true; 6557 if (DCArgPrivTy.getValue() == PrivatizableType) 6558 return true; 6559 } 6560 } 6561 6562 LLVM_DEBUG({ 6563 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6564 << " cannot be privatized in the context of its parent (" 6565 << Arg->getParent()->getName() 6566 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6567 "direct call of (" 6568 << ACS.getInstruction()->getCalledFunction()->getName() 6569 << ").\n[AAPrivatizablePtr] for which the argument " 6570 "privatization is not compatible.\n"; 6571 }); 6572 return false; 6573 }; 6574 6575 // Helper to check if the associated argument is used at the given abstract 6576 // call site in a way that is incompatible with the privatization assumed 6577 // here. 6578 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { 6579 if (ACS.isDirectCall()) 6580 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); 6581 if (ACS.isCallbackCall()) 6582 return IsCompatiblePrivArgOfDirectCS(ACS); 6583 return false; 6584 }; 6585 6586 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true, 6587 AllCallSitesKnown)) 6588 return indicatePessimisticFixpoint(); 6589 6590 return ChangeStatus::UNCHANGED; 6591 } 6592 6593 /// Given a type to private \p PrivType, collect the constituates (which are 6594 /// used) in \p ReplacementTypes. 6595 static void 6596 identifyReplacementTypes(Type *PrivType, 6597 SmallVectorImpl<Type *> &ReplacementTypes) { 6598 // TODO: For now we expand the privatization type to the fullest which can 6599 // lead to dead arguments that need to be removed later. 6600 assert(PrivType && "Expected privatizable type!"); 6601 6602 // Traverse the type, extract constituate types on the outermost level. 6603 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6604 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) 6605 ReplacementTypes.push_back(PrivStructType->getElementType(u)); 6606 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6607 ReplacementTypes.append(PrivArrayType->getNumElements(), 6608 PrivArrayType->getElementType()); 6609 } else { 6610 ReplacementTypes.push_back(PrivType); 6611 } 6612 } 6613 6614 /// Initialize \p Base according to the type \p PrivType at position \p IP. 6615 /// The values needed are taken from the arguments of \p F starting at 6616 /// position \p ArgNo. 6617 static void createInitialization(Type *PrivType, Value &Base, Function &F, 6618 unsigned ArgNo, Instruction &IP) { 6619 assert(PrivType && "Expected privatizable type!"); 6620 6621 IRBuilder<NoFolder> IRB(&IP); 6622 const DataLayout &DL = F.getParent()->getDataLayout(); 6623 6624 // Traverse the type, build GEPs and stores. 6625 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6626 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6627 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6628 Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo(); 6629 Value *Ptr = 6630 constructPointer(PointeeTy, PrivType, &Base, 6631 PrivStructLayout->getElementOffset(u), IRB, DL); 6632 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6633 } 6634 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6635 Type *PointeeTy = PrivArrayType->getElementType(); 6636 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6637 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6638 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6639 Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base, 6640 u * PointeeTySize, IRB, DL); 6641 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6642 } 6643 } else { 6644 new StoreInst(F.getArg(ArgNo), &Base, &IP); 6645 } 6646 } 6647 6648 /// Extract values from \p Base according to the type \p PrivType at the 6649 /// call position \p ACS. The values are appended to \p ReplacementValues. 6650 void createReplacementValues(Align Alignment, Type *PrivType, 6651 AbstractCallSite ACS, Value *Base, 6652 SmallVectorImpl<Value *> &ReplacementValues) { 6653 assert(Base && "Expected base value!"); 6654 assert(PrivType && "Expected privatizable type!"); 6655 Instruction *IP = ACS.getInstruction(); 6656 6657 IRBuilder<NoFolder> IRB(IP); 6658 const DataLayout &DL = IP->getModule()->getDataLayout(); 6659 6660 if (Base->getType()->getPointerElementType() != PrivType) 6661 Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(), 6662 "", ACS.getInstruction()); 6663 6664 // Traverse the type, build GEPs and loads. 6665 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6666 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6667 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6668 Type *PointeeTy = PrivStructType->getElementType(u); 6669 Value *Ptr = 6670 constructPointer(PointeeTy->getPointerTo(), PrivType, Base, 6671 PrivStructLayout->getElementOffset(u), IRB, DL); 6672 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6673 L->setAlignment(Alignment); 6674 ReplacementValues.push_back(L); 6675 } 6676 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6677 Type *PointeeTy = PrivArrayType->getElementType(); 6678 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6679 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6680 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6681 Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base, 6682 u * PointeeTySize, IRB, DL); 6683 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6684 L->setAlignment(Alignment); 6685 ReplacementValues.push_back(L); 6686 } 6687 } else { 6688 LoadInst *L = new LoadInst(PrivType, Base, "", IP); 6689 L->setAlignment(Alignment); 6690 ReplacementValues.push_back(L); 6691 } 6692 } 6693 6694 /// See AbstractAttribute::manifest(...) 6695 ChangeStatus manifest(Attributor &A) override { 6696 if (!PrivatizableType.hasValue()) 6697 return ChangeStatus::UNCHANGED; 6698 assert(PrivatizableType.getValue() && "Expected privatizable type!"); 6699 6700 // Collect all tail calls in the function as we cannot allow new allocas to 6701 // escape into tail recursion. 6702 // TODO: Be smarter about new allocas escaping into tail calls. 6703 SmallVector<CallInst *, 16> TailCalls; 6704 bool UsedAssumedInformation = false; 6705 if (!A.checkForAllInstructions( 6706 [&](Instruction &I) { 6707 CallInst &CI = cast<CallInst>(I); 6708 if (CI.isTailCall()) 6709 TailCalls.push_back(&CI); 6710 return true; 6711 }, 6712 *this, {Instruction::Call}, UsedAssumedInformation)) 6713 return ChangeStatus::UNCHANGED; 6714 6715 Argument *Arg = getAssociatedArgument(); 6716 // Query AAAlign attribute for alignment of associated argument to 6717 // determine the best alignment of loads. 6718 const auto &AlignAA = 6719 A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE); 6720 6721 // Callback to repair the associated function. A new alloca is placed at the 6722 // beginning and initialized with the values passed through arguments. The 6723 // new alloca replaces the use of the old pointer argument. 6724 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = 6725 [=](const Attributor::ArgumentReplacementInfo &ARI, 6726 Function &ReplacementFn, Function::arg_iterator ArgIt) { 6727 BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); 6728 Instruction *IP = &*EntryBB.getFirstInsertionPt(); 6729 Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0, 6730 Arg->getName() + ".priv", IP); 6731 createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn, 6732 ArgIt->getArgNo(), *IP); 6733 6734 if (AI->getType() != Arg->getType()) 6735 AI = 6736 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP); 6737 Arg->replaceAllUsesWith(AI); 6738 6739 for (CallInst *CI : TailCalls) 6740 CI->setTailCall(false); 6741 }; 6742 6743 // Callback to repair a call site of the associated function. The elements 6744 // of the privatizable type are loaded prior to the call and passed to the 6745 // new function version. 6746 Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = 6747 [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI, 6748 AbstractCallSite ACS, 6749 SmallVectorImpl<Value *> &NewArgOperands) { 6750 // When no alignment is specified for the load instruction, 6751 // natural alignment is assumed. 6752 createReplacementValues( 6753 assumeAligned(AlignAA.getAssumedAlign()), 6754 PrivatizableType.getValue(), ACS, 6755 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()), 6756 NewArgOperands); 6757 }; 6758 6759 // Collect the types that will replace the privatizable type in the function 6760 // signature. 6761 SmallVector<Type *, 16> ReplacementTypes; 6762 identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes); 6763 6764 // Register a rewrite of the argument. 6765 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes, 6766 std::move(FnRepairCB), 6767 std::move(ACSRepairCB))) 6768 return ChangeStatus::CHANGED; 6769 return ChangeStatus::UNCHANGED; 6770 } 6771 6772 /// See AbstractAttribute::trackStatistics() 6773 void trackStatistics() const override { 6774 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr); 6775 } 6776 }; 6777 6778 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { 6779 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) 6780 : AAPrivatizablePtrImpl(IRP, A) {} 6781 6782 /// See AbstractAttribute::initialize(...). 6783 virtual void initialize(Attributor &A) override { 6784 // TODO: We can privatize more than arguments. 6785 indicatePessimisticFixpoint(); 6786 } 6787 6788 ChangeStatus updateImpl(Attributor &A) override { 6789 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" 6790 "updateImpl will not be called"); 6791 } 6792 6793 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6794 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6795 Value *Obj = getUnderlyingObject(&getAssociatedValue()); 6796 if (!Obj) { 6797 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n"); 6798 return nullptr; 6799 } 6800 6801 if (auto *AI = dyn_cast<AllocaInst>(Obj)) 6802 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 6803 if (CI->isOne()) 6804 return Obj->getType()->getPointerElementType(); 6805 if (auto *Arg = dyn_cast<Argument>(Obj)) { 6806 auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>( 6807 *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED); 6808 if (PrivArgAA.isAssumedPrivatizablePtr()) 6809 return Obj->getType()->getPointerElementType(); 6810 } 6811 6812 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " 6813 "alloca nor privatizable argument: " 6814 << *Obj << "!\n"); 6815 return nullptr; 6816 } 6817 6818 /// See AbstractAttribute::trackStatistics() 6819 void trackStatistics() const override { 6820 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr); 6821 } 6822 }; 6823 6824 struct AAPrivatizablePtrCallSiteArgument final 6825 : public AAPrivatizablePtrFloating { 6826 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) 6827 : AAPrivatizablePtrFloating(IRP, A) {} 6828 6829 /// See AbstractAttribute::initialize(...). 6830 void initialize(Attributor &A) override { 6831 if (getIRPosition().hasAttr(Attribute::ByVal)) 6832 indicateOptimisticFixpoint(); 6833 } 6834 6835 /// See AbstractAttribute::updateImpl(...). 6836 ChangeStatus updateImpl(Attributor &A) override { 6837 PrivatizableType = identifyPrivatizableType(A); 6838 if (!PrivatizableType.hasValue()) 6839 return ChangeStatus::UNCHANGED; 6840 if (!PrivatizableType.getValue()) 6841 return indicatePessimisticFixpoint(); 6842 6843 const IRPosition &IRP = getIRPosition(); 6844 auto &NoCaptureAA = 6845 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED); 6846 if (!NoCaptureAA.isAssumedNoCapture()) { 6847 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n"); 6848 return indicatePessimisticFixpoint(); 6849 } 6850 6851 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED); 6852 if (!NoAliasAA.isAssumedNoAlias()) { 6853 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n"); 6854 return indicatePessimisticFixpoint(); 6855 } 6856 6857 const auto &MemBehaviorAA = 6858 A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED); 6859 if (!MemBehaviorAA.isAssumedReadOnly()) { 6860 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n"); 6861 return indicatePessimisticFixpoint(); 6862 } 6863 6864 return ChangeStatus::UNCHANGED; 6865 } 6866 6867 /// See AbstractAttribute::trackStatistics() 6868 void trackStatistics() const override { 6869 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr); 6870 } 6871 }; 6872 6873 struct AAPrivatizablePtrCallSiteReturned final 6874 : public AAPrivatizablePtrFloating { 6875 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) 6876 : AAPrivatizablePtrFloating(IRP, A) {} 6877 6878 /// See AbstractAttribute::initialize(...). 6879 void initialize(Attributor &A) override { 6880 // TODO: We can privatize more than arguments. 6881 indicatePessimisticFixpoint(); 6882 } 6883 6884 /// See AbstractAttribute::trackStatistics() 6885 void trackStatistics() const override { 6886 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr); 6887 } 6888 }; 6889 6890 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { 6891 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) 6892 : AAPrivatizablePtrFloating(IRP, A) {} 6893 6894 /// See AbstractAttribute::initialize(...). 6895 void initialize(Attributor &A) override { 6896 // TODO: We can privatize more than arguments. 6897 indicatePessimisticFixpoint(); 6898 } 6899 6900 /// See AbstractAttribute::trackStatistics() 6901 void trackStatistics() const override { 6902 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr); 6903 } 6904 }; 6905 6906 /// -------------------- Memory Behavior Attributes ---------------------------- 6907 /// Includes read-none, read-only, and write-only. 6908 /// ---------------------------------------------------------------------------- 6909 struct AAMemoryBehaviorImpl : public AAMemoryBehavior { 6910 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) 6911 : AAMemoryBehavior(IRP, A) {} 6912 6913 /// See AbstractAttribute::initialize(...). 6914 void initialize(Attributor &A) override { 6915 intersectAssumedBits(BEST_STATE); 6916 getKnownStateFromValue(getIRPosition(), getState()); 6917 AAMemoryBehavior::initialize(A); 6918 } 6919 6920 /// Return the memory behavior information encoded in the IR for \p IRP. 6921 static void getKnownStateFromValue(const IRPosition &IRP, 6922 BitIntegerState &State, 6923 bool IgnoreSubsumingPositions = false) { 6924 SmallVector<Attribute, 2> Attrs; 6925 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 6926 for (const Attribute &Attr : Attrs) { 6927 switch (Attr.getKindAsEnum()) { 6928 case Attribute::ReadNone: 6929 State.addKnownBits(NO_ACCESSES); 6930 break; 6931 case Attribute::ReadOnly: 6932 State.addKnownBits(NO_WRITES); 6933 break; 6934 case Attribute::WriteOnly: 6935 State.addKnownBits(NO_READS); 6936 break; 6937 default: 6938 llvm_unreachable("Unexpected attribute!"); 6939 } 6940 } 6941 6942 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) { 6943 if (!I->mayReadFromMemory()) 6944 State.addKnownBits(NO_READS); 6945 if (!I->mayWriteToMemory()) 6946 State.addKnownBits(NO_WRITES); 6947 } 6948 } 6949 6950 /// See AbstractAttribute::getDeducedAttributes(...). 6951 void getDeducedAttributes(LLVMContext &Ctx, 6952 SmallVectorImpl<Attribute> &Attrs) const override { 6953 assert(Attrs.size() == 0); 6954 if (isAssumedReadNone()) 6955 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 6956 else if (isAssumedReadOnly()) 6957 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly)); 6958 else if (isAssumedWriteOnly()) 6959 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly)); 6960 assert(Attrs.size() <= 1); 6961 } 6962 6963 /// See AbstractAttribute::manifest(...). 6964 ChangeStatus manifest(Attributor &A) override { 6965 if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true)) 6966 return ChangeStatus::UNCHANGED; 6967 6968 const IRPosition &IRP = getIRPosition(); 6969 6970 // Check if we would improve the existing attributes first. 6971 SmallVector<Attribute, 4> DeducedAttrs; 6972 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 6973 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 6974 return IRP.hasAttr(Attr.getKindAsEnum(), 6975 /* IgnoreSubsumingPositions */ true); 6976 })) 6977 return ChangeStatus::UNCHANGED; 6978 6979 // Clear existing attributes. 6980 IRP.removeAttrs(AttrKinds); 6981 6982 // Use the generic manifest method. 6983 return IRAttribute::manifest(A); 6984 } 6985 6986 /// See AbstractState::getAsStr(). 6987 const std::string getAsStr() const override { 6988 if (isAssumedReadNone()) 6989 return "readnone"; 6990 if (isAssumedReadOnly()) 6991 return "readonly"; 6992 if (isAssumedWriteOnly()) 6993 return "writeonly"; 6994 return "may-read/write"; 6995 } 6996 6997 /// The set of IR attributes AAMemoryBehavior deals with. 6998 static const Attribute::AttrKind AttrKinds[3]; 6999 }; 7000 7001 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { 7002 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; 7003 7004 /// Memory behavior attribute for a floating value. 7005 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { 7006 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) 7007 : AAMemoryBehaviorImpl(IRP, A) {} 7008 7009 /// See AbstractAttribute::updateImpl(...). 7010 ChangeStatus updateImpl(Attributor &A) override; 7011 7012 /// See AbstractAttribute::trackStatistics() 7013 void trackStatistics() const override { 7014 if (isAssumedReadNone()) 7015 STATS_DECLTRACK_FLOATING_ATTR(readnone) 7016 else if (isAssumedReadOnly()) 7017 STATS_DECLTRACK_FLOATING_ATTR(readonly) 7018 else if (isAssumedWriteOnly()) 7019 STATS_DECLTRACK_FLOATING_ATTR(writeonly) 7020 } 7021 7022 private: 7023 /// Return true if users of \p UserI might access the underlying 7024 /// variable/location described by \p U and should therefore be analyzed. 7025 bool followUsersOfUseIn(Attributor &A, const Use &U, 7026 const Instruction *UserI); 7027 7028 /// Update the state according to the effect of use \p U in \p UserI. 7029 void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI); 7030 }; 7031 7032 /// Memory behavior attribute for function argument. 7033 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { 7034 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) 7035 : AAMemoryBehaviorFloating(IRP, A) {} 7036 7037 /// See AbstractAttribute::initialize(...). 7038 void initialize(Attributor &A) override { 7039 intersectAssumedBits(BEST_STATE); 7040 const IRPosition &IRP = getIRPosition(); 7041 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we 7042 // can query it when we use has/getAttr. That would allow us to reuse the 7043 // initialize of the base class here. 7044 bool HasByVal = 7045 IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true); 7046 getKnownStateFromValue(IRP, getState(), 7047 /* IgnoreSubsumingPositions */ HasByVal); 7048 7049 // Initialize the use vector with all direct uses of the associated value. 7050 Argument *Arg = getAssociatedArgument(); 7051 if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) 7052 indicatePessimisticFixpoint(); 7053 } 7054 7055 ChangeStatus manifest(Attributor &A) override { 7056 // TODO: Pointer arguments are not supported on vectors of pointers yet. 7057 if (!getAssociatedValue().getType()->isPointerTy()) 7058 return ChangeStatus::UNCHANGED; 7059 7060 // TODO: From readattrs.ll: "inalloca parameters are always 7061 // considered written" 7062 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) { 7063 removeKnownBits(NO_WRITES); 7064 removeAssumedBits(NO_WRITES); 7065 } 7066 return AAMemoryBehaviorFloating::manifest(A); 7067 } 7068 7069 /// See AbstractAttribute::trackStatistics() 7070 void trackStatistics() const override { 7071 if (isAssumedReadNone()) 7072 STATS_DECLTRACK_ARG_ATTR(readnone) 7073 else if (isAssumedReadOnly()) 7074 STATS_DECLTRACK_ARG_ATTR(readonly) 7075 else if (isAssumedWriteOnly()) 7076 STATS_DECLTRACK_ARG_ATTR(writeonly) 7077 } 7078 }; 7079 7080 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { 7081 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) 7082 : AAMemoryBehaviorArgument(IRP, A) {} 7083 7084 /// See AbstractAttribute::initialize(...). 7085 void initialize(Attributor &A) override { 7086 // If we don't have an associated attribute this is either a variadic call 7087 // or an indirect call, either way, nothing to do here. 7088 Argument *Arg = getAssociatedArgument(); 7089 if (!Arg) { 7090 indicatePessimisticFixpoint(); 7091 return; 7092 } 7093 if (Arg->hasByValAttr()) { 7094 addKnownBits(NO_WRITES); 7095 removeKnownBits(NO_READS); 7096 removeAssumedBits(NO_READS); 7097 } 7098 AAMemoryBehaviorArgument::initialize(A); 7099 if (getAssociatedFunction()->isDeclaration()) 7100 indicatePessimisticFixpoint(); 7101 } 7102 7103 /// See AbstractAttribute::updateImpl(...). 7104 ChangeStatus updateImpl(Attributor &A) override { 7105 // TODO: Once we have call site specific value information we can provide 7106 // call site specific liveness liveness information and then it makes 7107 // sense to specialize attributes for call sites arguments instead of 7108 // redirecting requests to the callee argument. 7109 Argument *Arg = getAssociatedArgument(); 7110 const IRPosition &ArgPos = IRPosition::argument(*Arg); 7111 auto &ArgAA = 7112 A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED); 7113 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 7114 } 7115 7116 /// See AbstractAttribute::trackStatistics() 7117 void trackStatistics() const override { 7118 if (isAssumedReadNone()) 7119 STATS_DECLTRACK_CSARG_ATTR(readnone) 7120 else if (isAssumedReadOnly()) 7121 STATS_DECLTRACK_CSARG_ATTR(readonly) 7122 else if (isAssumedWriteOnly()) 7123 STATS_DECLTRACK_CSARG_ATTR(writeonly) 7124 } 7125 }; 7126 7127 /// Memory behavior attribute for a call site return position. 7128 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { 7129 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) 7130 : AAMemoryBehaviorFloating(IRP, A) {} 7131 7132 /// See AbstractAttribute::initialize(...). 7133 void initialize(Attributor &A) override { 7134 AAMemoryBehaviorImpl::initialize(A); 7135 Function *F = getAssociatedFunction(); 7136 if (!F || F->isDeclaration()) 7137 indicatePessimisticFixpoint(); 7138 } 7139 7140 /// See AbstractAttribute::manifest(...). 7141 ChangeStatus manifest(Attributor &A) override { 7142 // We do not annotate returned values. 7143 return ChangeStatus::UNCHANGED; 7144 } 7145 7146 /// See AbstractAttribute::trackStatistics() 7147 void trackStatistics() const override {} 7148 }; 7149 7150 /// An AA to represent the memory behavior function attributes. 7151 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { 7152 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) 7153 : AAMemoryBehaviorImpl(IRP, A) {} 7154 7155 /// See AbstractAttribute::updateImpl(Attributor &A). 7156 virtual ChangeStatus updateImpl(Attributor &A) override; 7157 7158 /// See AbstractAttribute::manifest(...). 7159 ChangeStatus manifest(Attributor &A) override { 7160 Function &F = cast<Function>(getAnchorValue()); 7161 if (isAssumedReadNone()) { 7162 F.removeFnAttr(Attribute::ArgMemOnly); 7163 F.removeFnAttr(Attribute::InaccessibleMemOnly); 7164 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); 7165 } 7166 return AAMemoryBehaviorImpl::manifest(A); 7167 } 7168 7169 /// See AbstractAttribute::trackStatistics() 7170 void trackStatistics() const override { 7171 if (isAssumedReadNone()) 7172 STATS_DECLTRACK_FN_ATTR(readnone) 7173 else if (isAssumedReadOnly()) 7174 STATS_DECLTRACK_FN_ATTR(readonly) 7175 else if (isAssumedWriteOnly()) 7176 STATS_DECLTRACK_FN_ATTR(writeonly) 7177 } 7178 }; 7179 7180 /// AAMemoryBehavior attribute for call sites. 7181 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl { 7182 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) 7183 : AAMemoryBehaviorImpl(IRP, A) {} 7184 7185 /// See AbstractAttribute::initialize(...). 7186 void initialize(Attributor &A) override { 7187 AAMemoryBehaviorImpl::initialize(A); 7188 Function *F = getAssociatedFunction(); 7189 if (!F || F->isDeclaration()) 7190 indicatePessimisticFixpoint(); 7191 } 7192 7193 /// See AbstractAttribute::updateImpl(...). 7194 ChangeStatus updateImpl(Attributor &A) override { 7195 // TODO: Once we have call site specific value information we can provide 7196 // call site specific liveness liveness information and then it makes 7197 // sense to specialize attributes for call sites arguments instead of 7198 // redirecting requests to the callee argument. 7199 Function *F = getAssociatedFunction(); 7200 const IRPosition &FnPos = IRPosition::function(*F); 7201 auto &FnAA = 7202 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED); 7203 return clampStateAndIndicateChange(getState(), FnAA.getState()); 7204 } 7205 7206 /// See AbstractAttribute::trackStatistics() 7207 void trackStatistics() const override { 7208 if (isAssumedReadNone()) 7209 STATS_DECLTRACK_CS_ATTR(readnone) 7210 else if (isAssumedReadOnly()) 7211 STATS_DECLTRACK_CS_ATTR(readonly) 7212 else if (isAssumedWriteOnly()) 7213 STATS_DECLTRACK_CS_ATTR(writeonly) 7214 } 7215 }; 7216 7217 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { 7218 7219 // The current assumed state used to determine a change. 7220 auto AssumedState = getAssumed(); 7221 7222 auto CheckRWInst = [&](Instruction &I) { 7223 // If the instruction has an own memory behavior state, use it to restrict 7224 // the local state. No further analysis is required as the other memory 7225 // state is as optimistic as it gets. 7226 if (const auto *CB = dyn_cast<CallBase>(&I)) { 7227 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 7228 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 7229 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7230 return !isAtFixpoint(); 7231 } 7232 7233 // Remove access kind modifiers if necessary. 7234 if (I.mayReadFromMemory()) 7235 removeAssumedBits(NO_READS); 7236 if (I.mayWriteToMemory()) 7237 removeAssumedBits(NO_WRITES); 7238 return !isAtFixpoint(); 7239 }; 7240 7241 bool UsedAssumedInformation = false; 7242 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 7243 UsedAssumedInformation)) 7244 return indicatePessimisticFixpoint(); 7245 7246 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7247 : ChangeStatus::UNCHANGED; 7248 } 7249 7250 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { 7251 7252 const IRPosition &IRP = getIRPosition(); 7253 const IRPosition &FnPos = IRPosition::function_scope(IRP); 7254 AAMemoryBehavior::StateType &S = getState(); 7255 7256 // First, check the function scope. We take the known information and we avoid 7257 // work if the assumed information implies the current assumed information for 7258 // this attribute. This is a valid for all but byval arguments. 7259 Argument *Arg = IRP.getAssociatedArgument(); 7260 AAMemoryBehavior::base_t FnMemAssumedState = 7261 AAMemoryBehavior::StateType::getWorstState(); 7262 if (!Arg || !Arg->hasByValAttr()) { 7263 const auto &FnMemAA = 7264 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL); 7265 FnMemAssumedState = FnMemAA.getAssumed(); 7266 S.addKnownBits(FnMemAA.getKnown()); 7267 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed()) 7268 return ChangeStatus::UNCHANGED; 7269 } 7270 7271 // The current assumed state used to determine a change. 7272 auto AssumedState = S.getAssumed(); 7273 7274 // Make sure the value is not captured (except through "return"), if 7275 // it is, any information derived would be irrelevant anyway as we cannot 7276 // check the potential aliases introduced by the capture. However, no need 7277 // to fall back to anythign less optimistic than the function state. 7278 const auto &ArgNoCaptureAA = 7279 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL); 7280 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 7281 S.intersectAssumedBits(FnMemAssumedState); 7282 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7283 : ChangeStatus::UNCHANGED; 7284 } 7285 7286 // Visit and expand uses until all are analyzed or a fixpoint is reached. 7287 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 7288 Instruction *UserI = cast<Instruction>(U.getUser()); 7289 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI 7290 << " \n"); 7291 7292 // Droppable users, e.g., llvm::assume does not actually perform any action. 7293 if (UserI->isDroppable()) 7294 return true; 7295 7296 // Check if the users of UserI should also be visited. 7297 Follow = followUsersOfUseIn(A, U, UserI); 7298 7299 // If UserI might touch memory we analyze the use in detail. 7300 if (UserI->mayReadOrWriteMemory()) 7301 analyzeUseIn(A, U, UserI); 7302 7303 return !isAtFixpoint(); 7304 }; 7305 7306 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) 7307 return indicatePessimisticFixpoint(); 7308 7309 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7310 : ChangeStatus::UNCHANGED; 7311 } 7312 7313 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U, 7314 const Instruction *UserI) { 7315 // The loaded value is unrelated to the pointer argument, no need to 7316 // follow the users of the load. 7317 if (isa<LoadInst>(UserI)) 7318 return false; 7319 7320 // By default we follow all uses assuming UserI might leak information on U, 7321 // we have special handling for call sites operands though. 7322 const auto *CB = dyn_cast<CallBase>(UserI); 7323 if (!CB || !CB->isArgOperand(&U)) 7324 return true; 7325 7326 // If the use is a call argument known not to be captured, the users of 7327 // the call do not need to be visited because they have to be unrelated to 7328 // the input. Note that this check is not trivial even though we disallow 7329 // general capturing of the underlying argument. The reason is that the 7330 // call might the argument "through return", which we allow and for which we 7331 // need to check call users. 7332 if (U.get()->getType()->isPointerTy()) { 7333 unsigned ArgNo = CB->getArgOperandNo(&U); 7334 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 7335 *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL); 7336 return !ArgNoCaptureAA.isAssumedNoCapture(); 7337 } 7338 7339 return true; 7340 } 7341 7342 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U, 7343 const Instruction *UserI) { 7344 assert(UserI->mayReadOrWriteMemory()); 7345 7346 switch (UserI->getOpcode()) { 7347 default: 7348 // TODO: Handle all atomics and other side-effect operations we know of. 7349 break; 7350 case Instruction::Load: 7351 // Loads cause the NO_READS property to disappear. 7352 removeAssumedBits(NO_READS); 7353 return; 7354 7355 case Instruction::Store: 7356 // Stores cause the NO_WRITES property to disappear if the use is the 7357 // pointer operand. Note that while capturing was taken care of somewhere 7358 // else we need to deal with stores of the value that is not looked through. 7359 if (cast<StoreInst>(UserI)->getPointerOperand() == U.get()) 7360 removeAssumedBits(NO_WRITES); 7361 else 7362 indicatePessimisticFixpoint(); 7363 return; 7364 7365 case Instruction::Call: 7366 case Instruction::CallBr: 7367 case Instruction::Invoke: { 7368 // For call sites we look at the argument memory behavior attribute (this 7369 // could be recursive!) in order to restrict our own state. 7370 const auto *CB = cast<CallBase>(UserI); 7371 7372 // Give up on operand bundles. 7373 if (CB->isBundleOperand(&U)) { 7374 indicatePessimisticFixpoint(); 7375 return; 7376 } 7377 7378 // Calling a function does read the function pointer, maybe write it if the 7379 // function is self-modifying. 7380 if (CB->isCallee(&U)) { 7381 removeAssumedBits(NO_READS); 7382 break; 7383 } 7384 7385 // Adjust the possible access behavior based on the information on the 7386 // argument. 7387 IRPosition Pos; 7388 if (U.get()->getType()->isPointerTy()) 7389 Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)); 7390 else 7391 Pos = IRPosition::callsite_function(*CB); 7392 const auto &MemBehaviorAA = 7393 A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL); 7394 // "assumed" has at most the same bits as the MemBehaviorAA assumed 7395 // and at least "known". 7396 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7397 return; 7398 } 7399 }; 7400 7401 // Generally, look at the "may-properties" and adjust the assumed state if we 7402 // did not trigger special handling before. 7403 if (UserI->mayReadFromMemory()) 7404 removeAssumedBits(NO_READS); 7405 if (UserI->mayWriteToMemory()) 7406 removeAssumedBits(NO_WRITES); 7407 } 7408 } // namespace 7409 7410 /// -------------------- Memory Locations Attributes --------------------------- 7411 /// Includes read-none, argmemonly, inaccessiblememonly, 7412 /// inaccessiblememorargmemonly 7413 /// ---------------------------------------------------------------------------- 7414 7415 std::string AAMemoryLocation::getMemoryLocationsAsStr( 7416 AAMemoryLocation::MemoryLocationsKind MLK) { 7417 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) 7418 return "all memory"; 7419 if (MLK == AAMemoryLocation::NO_LOCATIONS) 7420 return "no memory"; 7421 std::string S = "memory:"; 7422 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) 7423 S += "stack,"; 7424 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) 7425 S += "constant,"; 7426 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) 7427 S += "internal global,"; 7428 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) 7429 S += "external global,"; 7430 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) 7431 S += "argument,"; 7432 if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) 7433 S += "inaccessible,"; 7434 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) 7435 S += "malloced,"; 7436 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) 7437 S += "unknown,"; 7438 S.pop_back(); 7439 return S; 7440 } 7441 7442 namespace { 7443 struct AAMemoryLocationImpl : public AAMemoryLocation { 7444 7445 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) 7446 : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { 7447 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7448 AccessKind2Accesses[u] = nullptr; 7449 } 7450 7451 ~AAMemoryLocationImpl() { 7452 // The AccessSets are allocated via a BumpPtrAllocator, we call 7453 // the destructor manually. 7454 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7455 if (AccessKind2Accesses[u]) 7456 AccessKind2Accesses[u]->~AccessSet(); 7457 } 7458 7459 /// See AbstractAttribute::initialize(...). 7460 void initialize(Attributor &A) override { 7461 intersectAssumedBits(BEST_STATE); 7462 getKnownStateFromValue(A, getIRPosition(), getState()); 7463 AAMemoryLocation::initialize(A); 7464 } 7465 7466 /// Return the memory behavior information encoded in the IR for \p IRP. 7467 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, 7468 BitIntegerState &State, 7469 bool IgnoreSubsumingPositions = false) { 7470 // For internal functions we ignore `argmemonly` and 7471 // `inaccessiblememorargmemonly` as we might break it via interprocedural 7472 // constant propagation. It is unclear if this is the best way but it is 7473 // unlikely this will cause real performance problems. If we are deriving 7474 // attributes for the anchor function we even remove the attribute in 7475 // addition to ignoring it. 7476 bool UseArgMemOnly = true; 7477 Function *AnchorFn = IRP.getAnchorScope(); 7478 if (AnchorFn && A.isRunOn(*AnchorFn)) 7479 UseArgMemOnly = !AnchorFn->hasLocalLinkage(); 7480 7481 SmallVector<Attribute, 2> Attrs; 7482 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 7483 for (const Attribute &Attr : Attrs) { 7484 switch (Attr.getKindAsEnum()) { 7485 case Attribute::ReadNone: 7486 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM); 7487 break; 7488 case Attribute::InaccessibleMemOnly: 7489 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 7490 break; 7491 case Attribute::ArgMemOnly: 7492 if (UseArgMemOnly) 7493 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true)); 7494 else 7495 IRP.removeAttrs({Attribute::ArgMemOnly}); 7496 break; 7497 case Attribute::InaccessibleMemOrArgMemOnly: 7498 if (UseArgMemOnly) 7499 State.addKnownBits(inverseLocation( 7500 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 7501 else 7502 IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly}); 7503 break; 7504 default: 7505 llvm_unreachable("Unexpected attribute!"); 7506 } 7507 } 7508 } 7509 7510 /// See AbstractAttribute::getDeducedAttributes(...). 7511 void getDeducedAttributes(LLVMContext &Ctx, 7512 SmallVectorImpl<Attribute> &Attrs) const override { 7513 assert(Attrs.size() == 0); 7514 if (isAssumedReadNone()) { 7515 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 7516 } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { 7517 if (isAssumedInaccessibleMemOnly()) 7518 Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly)); 7519 else if (isAssumedArgMemOnly()) 7520 Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly)); 7521 else if (isAssumedInaccessibleOrArgMemOnly()) 7522 Attrs.push_back( 7523 Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly)); 7524 } 7525 assert(Attrs.size() <= 1); 7526 } 7527 7528 /// See AbstractAttribute::manifest(...). 7529 ChangeStatus manifest(Attributor &A) override { 7530 const IRPosition &IRP = getIRPosition(); 7531 7532 // Check if we would improve the existing attributes first. 7533 SmallVector<Attribute, 4> DeducedAttrs; 7534 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 7535 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 7536 return IRP.hasAttr(Attr.getKindAsEnum(), 7537 /* IgnoreSubsumingPositions */ true); 7538 })) 7539 return ChangeStatus::UNCHANGED; 7540 7541 // Clear existing attributes. 7542 IRP.removeAttrs(AttrKinds); 7543 if (isAssumedReadNone()) 7544 IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds); 7545 7546 // Use the generic manifest method. 7547 return IRAttribute::manifest(A); 7548 } 7549 7550 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). 7551 bool checkForAllAccessesToMemoryKind( 7552 function_ref<bool(const Instruction *, const Value *, AccessKind, 7553 MemoryLocationsKind)> 7554 Pred, 7555 MemoryLocationsKind RequestedMLK) const override { 7556 if (!isValidState()) 7557 return false; 7558 7559 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); 7560 if (AssumedMLK == NO_LOCATIONS) 7561 return true; 7562 7563 unsigned Idx = 0; 7564 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; 7565 CurMLK *= 2, ++Idx) { 7566 if (CurMLK & RequestedMLK) 7567 continue; 7568 7569 if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) 7570 for (const AccessInfo &AI : *Accesses) 7571 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) 7572 return false; 7573 } 7574 7575 return true; 7576 } 7577 7578 ChangeStatus indicatePessimisticFixpoint() override { 7579 // If we give up and indicate a pessimistic fixpoint this instruction will 7580 // become an access for all potential access kinds: 7581 // TODO: Add pointers for argmemonly and globals to improve the results of 7582 // checkForAllAccessesToMemoryKind. 7583 bool Changed = false; 7584 MemoryLocationsKind KnownMLK = getKnown(); 7585 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 7586 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) 7587 if (!(CurMLK & KnownMLK)) 7588 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed, 7589 getAccessKindFromInst(I)); 7590 return AAMemoryLocation::indicatePessimisticFixpoint(); 7591 } 7592 7593 protected: 7594 /// Helper struct to tie together an instruction that has a read or write 7595 /// effect with the pointer it accesses (if any). 7596 struct AccessInfo { 7597 7598 /// The instruction that caused the access. 7599 const Instruction *I; 7600 7601 /// The base pointer that is accessed, or null if unknown. 7602 const Value *Ptr; 7603 7604 /// The kind of access (read/write/read+write). 7605 AccessKind Kind; 7606 7607 bool operator==(const AccessInfo &RHS) const { 7608 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; 7609 } 7610 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { 7611 if (LHS.I != RHS.I) 7612 return LHS.I < RHS.I; 7613 if (LHS.Ptr != RHS.Ptr) 7614 return LHS.Ptr < RHS.Ptr; 7615 if (LHS.Kind != RHS.Kind) 7616 return LHS.Kind < RHS.Kind; 7617 return false; 7618 } 7619 }; 7620 7621 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the 7622 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. 7623 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; 7624 AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()]; 7625 7626 /// Categorize the pointer arguments of CB that might access memory in 7627 /// AccessedLoc and update the state and access map accordingly. 7628 void 7629 categorizeArgumentPointerLocations(Attributor &A, CallBase &CB, 7630 AAMemoryLocation::StateType &AccessedLocs, 7631 bool &Changed); 7632 7633 /// Return the kind(s) of location that may be accessed by \p V. 7634 AAMemoryLocation::MemoryLocationsKind 7635 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); 7636 7637 /// Return the access kind as determined by \p I. 7638 AccessKind getAccessKindFromInst(const Instruction *I) { 7639 AccessKind AK = READ_WRITE; 7640 if (I) { 7641 AK = I->mayReadFromMemory() ? READ : NONE; 7642 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); 7643 } 7644 return AK; 7645 } 7646 7647 /// Update the state \p State and the AccessKind2Accesses given that \p I is 7648 /// an access of kind \p AK to a \p MLK memory location with the access 7649 /// pointer \p Ptr. 7650 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, 7651 MemoryLocationsKind MLK, const Instruction *I, 7652 const Value *Ptr, bool &Changed, 7653 AccessKind AK = READ_WRITE) { 7654 7655 assert(isPowerOf2_32(MLK) && "Expected a single location set!"); 7656 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)]; 7657 if (!Accesses) 7658 Accesses = new (Allocator) AccessSet(); 7659 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second; 7660 State.removeAssumedBits(MLK); 7661 } 7662 7663 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or 7664 /// arguments, and update the state and access map accordingly. 7665 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, 7666 AAMemoryLocation::StateType &State, bool &Changed); 7667 7668 /// Used to allocate access sets. 7669 BumpPtrAllocator &Allocator; 7670 7671 /// The set of IR attributes AAMemoryLocation deals with. 7672 static const Attribute::AttrKind AttrKinds[4]; 7673 }; 7674 7675 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = { 7676 Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly, 7677 Attribute::InaccessibleMemOrArgMemOnly}; 7678 7679 void AAMemoryLocationImpl::categorizePtrValue( 7680 Attributor &A, const Instruction &I, const Value &Ptr, 7681 AAMemoryLocation::StateType &State, bool &Changed) { 7682 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for " 7683 << Ptr << " [" 7684 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n"); 7685 7686 SmallVector<Value *, 8> Objects; 7687 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I)) { 7688 LLVM_DEBUG( 7689 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"); 7690 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed, 7691 getAccessKindFromInst(&I)); 7692 return; 7693 } 7694 7695 for (Value *Obj : Objects) { 7696 // TODO: recognize the TBAA used for constant accesses. 7697 MemoryLocationsKind MLK = NO_LOCATIONS; 7698 assert(!isa<GEPOperator>(Obj) && "GEPs should have been stripped."); 7699 if (isa<UndefValue>(Obj)) 7700 continue; 7701 if (isa<Argument>(Obj)) { 7702 // TODO: For now we do not treat byval arguments as local copies performed 7703 // on the call edge, though, we should. To make that happen we need to 7704 // teach various passes, e.g., DSE, about the copy effect of a byval. That 7705 // would also allow us to mark functions only accessing byval arguments as 7706 // readnone again, atguably their acceses have no effect outside of the 7707 // function, like accesses to allocas. 7708 MLK = NO_ARGUMENT_MEM; 7709 } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) { 7710 // Reading constant memory is not treated as a read "effect" by the 7711 // function attr pass so we won't neither. Constants defined by TBAA are 7712 // similar. (We know we do not write it because it is constant.) 7713 if (auto *GVar = dyn_cast<GlobalVariable>(GV)) 7714 if (GVar->isConstant()) 7715 continue; 7716 7717 if (GV->hasLocalLinkage()) 7718 MLK = NO_GLOBAL_INTERNAL_MEM; 7719 else 7720 MLK = NO_GLOBAL_EXTERNAL_MEM; 7721 } else if (isa<ConstantPointerNull>(Obj) && 7722 !NullPointerIsDefined(getAssociatedFunction(), 7723 Ptr.getType()->getPointerAddressSpace())) { 7724 continue; 7725 } else if (isa<AllocaInst>(Obj)) { 7726 MLK = NO_LOCAL_MEM; 7727 } else if (const auto *CB = dyn_cast<CallBase>(Obj)) { 7728 const auto &NoAliasAA = A.getAAFor<AANoAlias>( 7729 *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL); 7730 if (NoAliasAA.isAssumedNoAlias()) 7731 MLK = NO_MALLOCED_MEM; 7732 else 7733 MLK = NO_UNKOWN_MEM; 7734 } else { 7735 MLK = NO_UNKOWN_MEM; 7736 } 7737 7738 assert(MLK != NO_LOCATIONS && "No location specified!"); 7739 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: " 7740 << *Obj << " -> " << getMemoryLocationsAsStr(MLK) 7741 << "\n"); 7742 updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed, 7743 getAccessKindFromInst(&I)); 7744 } 7745 7746 LLVM_DEBUG( 7747 dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: " 7748 << getMemoryLocationsAsStr(State.getAssumed()) << "\n"); 7749 } 7750 7751 void AAMemoryLocationImpl::categorizeArgumentPointerLocations( 7752 Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs, 7753 bool &Changed) { 7754 for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) { 7755 7756 // Skip non-pointer arguments. 7757 const Value *ArgOp = CB.getArgOperand(ArgNo); 7758 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 7759 continue; 7760 7761 // Skip readnone arguments. 7762 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo); 7763 const auto &ArgOpMemLocationAA = 7764 A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL); 7765 7766 if (ArgOpMemLocationAA.isAssumedReadNone()) 7767 continue; 7768 7769 // Categorize potentially accessed pointer arguments as if there was an 7770 // access instruction with them as pointer. 7771 categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed); 7772 } 7773 } 7774 7775 AAMemoryLocation::MemoryLocationsKind 7776 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, 7777 bool &Changed) { 7778 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for " 7779 << I << "\n"); 7780 7781 AAMemoryLocation::StateType AccessedLocs; 7782 AccessedLocs.intersectAssumedBits(NO_LOCATIONS); 7783 7784 if (auto *CB = dyn_cast<CallBase>(&I)) { 7785 7786 // First check if we assume any memory is access is visible. 7787 const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>( 7788 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 7789 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I 7790 << " [" << CBMemLocationAA << "]\n"); 7791 7792 if (CBMemLocationAA.isAssumedReadNone()) 7793 return NO_LOCATIONS; 7794 7795 if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) { 7796 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr, 7797 Changed, getAccessKindFromInst(&I)); 7798 return AccessedLocs.getAssumed(); 7799 } 7800 7801 uint32_t CBAssumedNotAccessedLocs = 7802 CBMemLocationAA.getAssumedNotAccessedLocation(); 7803 7804 // Set the argmemonly and global bit as we handle them separately below. 7805 uint32_t CBAssumedNotAccessedLocsNoArgMem = 7806 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; 7807 7808 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { 7809 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) 7810 continue; 7811 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed, 7812 getAccessKindFromInst(&I)); 7813 } 7814 7815 // Now handle global memory if it might be accessed. This is slightly tricky 7816 // as NO_GLOBAL_MEM has multiple bits set. 7817 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); 7818 if (HasGlobalAccesses) { 7819 auto AccessPred = [&](const Instruction *, const Value *Ptr, 7820 AccessKind Kind, MemoryLocationsKind MLK) { 7821 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed, 7822 getAccessKindFromInst(&I)); 7823 return true; 7824 }; 7825 if (!CBMemLocationAA.checkForAllAccessesToMemoryKind( 7826 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false))) 7827 return AccessedLocs.getWorstState(); 7828 } 7829 7830 LLVM_DEBUG( 7831 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " 7832 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 7833 7834 // Now handle argument memory if it might be accessed. 7835 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); 7836 if (HasArgAccesses) 7837 categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed); 7838 7839 LLVM_DEBUG( 7840 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " 7841 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 7842 7843 return AccessedLocs.getAssumed(); 7844 } 7845 7846 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) { 7847 LLVM_DEBUG( 7848 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " 7849 << I << " [" << *Ptr << "]\n"); 7850 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed); 7851 return AccessedLocs.getAssumed(); 7852 } 7853 7854 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " 7855 << I << "\n"); 7856 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed, 7857 getAccessKindFromInst(&I)); 7858 return AccessedLocs.getAssumed(); 7859 } 7860 7861 /// An AA to represent the memory behavior function attributes. 7862 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { 7863 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) 7864 : AAMemoryLocationImpl(IRP, A) {} 7865 7866 /// See AbstractAttribute::updateImpl(Attributor &A). 7867 virtual ChangeStatus updateImpl(Attributor &A) override { 7868 7869 const auto &MemBehaviorAA = 7870 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 7871 if (MemBehaviorAA.isAssumedReadNone()) { 7872 if (MemBehaviorAA.isKnownReadNone()) 7873 return indicateOptimisticFixpoint(); 7874 assert(isAssumedReadNone() && 7875 "AAMemoryLocation was not read-none but AAMemoryBehavior was!"); 7876 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 7877 return ChangeStatus::UNCHANGED; 7878 } 7879 7880 // The current assumed state used to determine a change. 7881 auto AssumedState = getAssumed(); 7882 bool Changed = false; 7883 7884 auto CheckRWInst = [&](Instruction &I) { 7885 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); 7886 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I 7887 << ": " << getMemoryLocationsAsStr(MLK) << "\n"); 7888 removeAssumedBits(inverseLocation(MLK, false, false)); 7889 // Stop once only the valid bit set in the *not assumed location*, thus 7890 // once we don't actually exclude any memory locations in the state. 7891 return getAssumedNotAccessedLocation() != VALID_STATE; 7892 }; 7893 7894 bool UsedAssumedInformation = false; 7895 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 7896 UsedAssumedInformation)) 7897 return indicatePessimisticFixpoint(); 7898 7899 Changed |= AssumedState != getAssumed(); 7900 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 7901 } 7902 7903 /// See AbstractAttribute::trackStatistics() 7904 void trackStatistics() const override { 7905 if (isAssumedReadNone()) 7906 STATS_DECLTRACK_FN_ATTR(readnone) 7907 else if (isAssumedArgMemOnly()) 7908 STATS_DECLTRACK_FN_ATTR(argmemonly) 7909 else if (isAssumedInaccessibleMemOnly()) 7910 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly) 7911 else if (isAssumedInaccessibleOrArgMemOnly()) 7912 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly) 7913 } 7914 }; 7915 7916 /// AAMemoryLocation attribute for call sites. 7917 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { 7918 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) 7919 : AAMemoryLocationImpl(IRP, A) {} 7920 7921 /// See AbstractAttribute::initialize(...). 7922 void initialize(Attributor &A) override { 7923 AAMemoryLocationImpl::initialize(A); 7924 Function *F = getAssociatedFunction(); 7925 if (!F || F->isDeclaration()) 7926 indicatePessimisticFixpoint(); 7927 } 7928 7929 /// See AbstractAttribute::updateImpl(...). 7930 ChangeStatus updateImpl(Attributor &A) override { 7931 // TODO: Once we have call site specific value information we can provide 7932 // call site specific liveness liveness information and then it makes 7933 // sense to specialize attributes for call sites arguments instead of 7934 // redirecting requests to the callee argument. 7935 Function *F = getAssociatedFunction(); 7936 const IRPosition &FnPos = IRPosition::function(*F); 7937 auto &FnAA = 7938 A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED); 7939 bool Changed = false; 7940 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 7941 AccessKind Kind, MemoryLocationsKind MLK) { 7942 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed, 7943 getAccessKindFromInst(I)); 7944 return true; 7945 }; 7946 if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS)) 7947 return indicatePessimisticFixpoint(); 7948 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 7949 } 7950 7951 /// See AbstractAttribute::trackStatistics() 7952 void trackStatistics() const override { 7953 if (isAssumedReadNone()) 7954 STATS_DECLTRACK_CS_ATTR(readnone) 7955 } 7956 }; 7957 7958 /// ------------------ Value Constant Range Attribute ------------------------- 7959 7960 struct AAValueConstantRangeImpl : AAValueConstantRange { 7961 using StateType = IntegerRangeState; 7962 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) 7963 : AAValueConstantRange(IRP, A) {} 7964 7965 /// See AbstractAttribute::initialize(..). 7966 void initialize(Attributor &A) override { 7967 if (A.hasSimplificationCallback(getIRPosition())) { 7968 indicatePessimisticFixpoint(); 7969 return; 7970 } 7971 7972 // Intersect a range given by SCEV. 7973 intersectKnown(getConstantRangeFromSCEV(A, getCtxI())); 7974 7975 // Intersect a range given by LVI. 7976 intersectKnown(getConstantRangeFromLVI(A, getCtxI())); 7977 } 7978 7979 /// See AbstractAttribute::getAsStr(). 7980 const std::string getAsStr() const override { 7981 std::string Str; 7982 llvm::raw_string_ostream OS(Str); 7983 OS << "range(" << getBitWidth() << ")<"; 7984 getKnown().print(OS); 7985 OS << " / "; 7986 getAssumed().print(OS); 7987 OS << ">"; 7988 return OS.str(); 7989 } 7990 7991 /// Helper function to get a SCEV expr for the associated value at program 7992 /// point \p I. 7993 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { 7994 if (!getAnchorScope()) 7995 return nullptr; 7996 7997 ScalarEvolution *SE = 7998 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 7999 *getAnchorScope()); 8000 8001 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( 8002 *getAnchorScope()); 8003 8004 if (!SE || !LI) 8005 return nullptr; 8006 8007 const SCEV *S = SE->getSCEV(&getAssociatedValue()); 8008 if (!I) 8009 return S; 8010 8011 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent())); 8012 } 8013 8014 /// Helper function to get a range from SCEV for the associated value at 8015 /// program point \p I. 8016 ConstantRange getConstantRangeFromSCEV(Attributor &A, 8017 const Instruction *I = nullptr) const { 8018 if (!getAnchorScope()) 8019 return getWorstState(getBitWidth()); 8020 8021 ScalarEvolution *SE = 8022 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 8023 *getAnchorScope()); 8024 8025 const SCEV *S = getSCEV(A, I); 8026 if (!SE || !S) 8027 return getWorstState(getBitWidth()); 8028 8029 return SE->getUnsignedRange(S); 8030 } 8031 8032 /// Helper function to get a range from LVI for the associated value at 8033 /// program point \p I. 8034 ConstantRange 8035 getConstantRangeFromLVI(Attributor &A, 8036 const Instruction *CtxI = nullptr) const { 8037 if (!getAnchorScope()) 8038 return getWorstState(getBitWidth()); 8039 8040 LazyValueInfo *LVI = 8041 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( 8042 *getAnchorScope()); 8043 8044 if (!LVI || !CtxI) 8045 return getWorstState(getBitWidth()); 8046 return LVI->getConstantRange(&getAssociatedValue(), 8047 const_cast<Instruction *>(CtxI)); 8048 } 8049 8050 /// Return true if \p CtxI is valid for querying outside analyses. 8051 /// This basically makes sure we do not ask intra-procedural analysis 8052 /// about a context in the wrong function or a context that violates 8053 /// dominance assumptions they might have. The \p AllowAACtxI flag indicates 8054 /// if the original context of this AA is OK or should be considered invalid. 8055 bool isValidCtxInstructionForOutsideAnalysis(Attributor &A, 8056 const Instruction *CtxI, 8057 bool AllowAACtxI) const { 8058 if (!CtxI || (!AllowAACtxI && CtxI == getCtxI())) 8059 return false; 8060 8061 // Our context might be in a different function, neither intra-procedural 8062 // analysis (ScalarEvolution nor LazyValueInfo) can handle that. 8063 if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction())) 8064 return false; 8065 8066 // If the context is not dominated by the value there are paths to the 8067 // context that do not define the value. This cannot be handled by 8068 // LazyValueInfo so we need to bail. 8069 if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) { 8070 InformationCache &InfoCache = A.getInfoCache(); 8071 const DominatorTree *DT = 8072 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( 8073 *I->getFunction()); 8074 return DT && DT->dominates(I, CtxI); 8075 } 8076 8077 return true; 8078 } 8079 8080 /// See AAValueConstantRange::getKnownConstantRange(..). 8081 ConstantRange 8082 getKnownConstantRange(Attributor &A, 8083 const Instruction *CtxI = nullptr) const override { 8084 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8085 /* AllowAACtxI */ false)) 8086 return getKnown(); 8087 8088 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8089 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8090 return getKnown().intersectWith(SCEVR).intersectWith(LVIR); 8091 } 8092 8093 /// See AAValueConstantRange::getAssumedConstantRange(..). 8094 ConstantRange 8095 getAssumedConstantRange(Attributor &A, 8096 const Instruction *CtxI = nullptr) const override { 8097 // TODO: Make SCEV use Attributor assumption. 8098 // We may be able to bound a variable range via assumptions in 8099 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to 8100 // evolve to x^2 + x, then we can say that y is in [2, 12]. 8101 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8102 /* AllowAACtxI */ false)) 8103 return getAssumed(); 8104 8105 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8106 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8107 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR); 8108 } 8109 8110 /// Helper function to create MDNode for range metadata. 8111 static MDNode * 8112 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, 8113 const ConstantRange &AssumedConstantRange) { 8114 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get( 8115 Ty, AssumedConstantRange.getLower())), 8116 ConstantAsMetadata::get(ConstantInt::get( 8117 Ty, AssumedConstantRange.getUpper()))}; 8118 return MDNode::get(Ctx, LowAndHigh); 8119 } 8120 8121 /// Return true if \p Assumed is included in \p KnownRanges. 8122 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { 8123 8124 if (Assumed.isFullSet()) 8125 return false; 8126 8127 if (!KnownRanges) 8128 return true; 8129 8130 // If multiple ranges are annotated in IR, we give up to annotate assumed 8131 // range for now. 8132 8133 // TODO: If there exists a known range which containts assumed range, we 8134 // can say assumed range is better. 8135 if (KnownRanges->getNumOperands() > 2) 8136 return false; 8137 8138 ConstantInt *Lower = 8139 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0)); 8140 ConstantInt *Upper = 8141 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1)); 8142 8143 ConstantRange Known(Lower->getValue(), Upper->getValue()); 8144 return Known.contains(Assumed) && Known != Assumed; 8145 } 8146 8147 /// Helper function to set range metadata. 8148 static bool 8149 setRangeMetadataIfisBetterRange(Instruction *I, 8150 const ConstantRange &AssumedConstantRange) { 8151 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range); 8152 if (isBetterRange(AssumedConstantRange, OldRangeMD)) { 8153 if (!AssumedConstantRange.isEmptySet()) { 8154 I->setMetadata(LLVMContext::MD_range, 8155 getMDNodeForConstantRange(I->getType(), I->getContext(), 8156 AssumedConstantRange)); 8157 return true; 8158 } 8159 } 8160 return false; 8161 } 8162 8163 /// See AbstractAttribute::manifest() 8164 ChangeStatus manifest(Attributor &A) override { 8165 ChangeStatus Changed = ChangeStatus::UNCHANGED; 8166 ConstantRange AssumedConstantRange = getAssumedConstantRange(A); 8167 assert(!AssumedConstantRange.isFullSet() && "Invalid state"); 8168 8169 auto &V = getAssociatedValue(); 8170 if (!AssumedConstantRange.isEmptySet() && 8171 !AssumedConstantRange.isSingleElement()) { 8172 if (Instruction *I = dyn_cast<Instruction>(&V)) { 8173 assert(I == getCtxI() && "Should not annotate an instruction which is " 8174 "not the context instruction"); 8175 if (isa<CallInst>(I) || isa<LoadInst>(I)) 8176 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) 8177 Changed = ChangeStatus::CHANGED; 8178 } 8179 } 8180 8181 return Changed; 8182 } 8183 }; 8184 8185 struct AAValueConstantRangeArgument final 8186 : AAArgumentFromCallSiteArguments< 8187 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8188 true /* BridgeCallBaseContext */> { 8189 using Base = AAArgumentFromCallSiteArguments< 8190 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8191 true /* BridgeCallBaseContext */>; 8192 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) 8193 : Base(IRP, A) {} 8194 8195 /// See AbstractAttribute::initialize(..). 8196 void initialize(Attributor &A) override { 8197 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8198 indicatePessimisticFixpoint(); 8199 } else { 8200 Base::initialize(A); 8201 } 8202 } 8203 8204 /// See AbstractAttribute::trackStatistics() 8205 void trackStatistics() const override { 8206 STATS_DECLTRACK_ARG_ATTR(value_range) 8207 } 8208 }; 8209 8210 struct AAValueConstantRangeReturned 8211 : AAReturnedFromReturnedValues<AAValueConstantRange, 8212 AAValueConstantRangeImpl, 8213 AAValueConstantRangeImpl::StateType, 8214 /* PropogateCallBaseContext */ true> { 8215 using Base = 8216 AAReturnedFromReturnedValues<AAValueConstantRange, 8217 AAValueConstantRangeImpl, 8218 AAValueConstantRangeImpl::StateType, 8219 /* PropogateCallBaseContext */ true>; 8220 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) 8221 : Base(IRP, A) {} 8222 8223 /// See AbstractAttribute::initialize(...). 8224 void initialize(Attributor &A) override {} 8225 8226 /// See AbstractAttribute::trackStatistics() 8227 void trackStatistics() const override { 8228 STATS_DECLTRACK_FNRET_ATTR(value_range) 8229 } 8230 }; 8231 8232 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { 8233 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) 8234 : AAValueConstantRangeImpl(IRP, A) {} 8235 8236 /// See AbstractAttribute::initialize(...). 8237 void initialize(Attributor &A) override { 8238 AAValueConstantRangeImpl::initialize(A); 8239 if (isAtFixpoint()) 8240 return; 8241 8242 Value &V = getAssociatedValue(); 8243 8244 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8245 unionAssumed(ConstantRange(C->getValue())); 8246 indicateOptimisticFixpoint(); 8247 return; 8248 } 8249 8250 if (isa<UndefValue>(&V)) { 8251 // Collapse the undef state to 0. 8252 unionAssumed(ConstantRange(APInt(getBitWidth(), 0))); 8253 indicateOptimisticFixpoint(); 8254 return; 8255 } 8256 8257 if (isa<CallBase>(&V)) 8258 return; 8259 8260 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V)) 8261 return; 8262 8263 // If it is a load instruction with range metadata, use it. 8264 if (LoadInst *LI = dyn_cast<LoadInst>(&V)) 8265 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) { 8266 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8267 return; 8268 } 8269 8270 // We can work with PHI and select instruction as we traverse their operands 8271 // during update. 8272 if (isa<SelectInst>(V) || isa<PHINode>(V)) 8273 return; 8274 8275 // Otherwise we give up. 8276 indicatePessimisticFixpoint(); 8277 8278 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: " 8279 << getAssociatedValue() << "\n"); 8280 } 8281 8282 bool calculateBinaryOperator( 8283 Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, 8284 const Instruction *CtxI, 8285 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8286 Value *LHS = BinOp->getOperand(0); 8287 Value *RHS = BinOp->getOperand(1); 8288 8289 // Simplify the operands first. 8290 bool UsedAssumedInformation = false; 8291 const auto &SimplifiedLHS = 8292 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8293 *this, UsedAssumedInformation); 8294 if (!SimplifiedLHS.hasValue()) 8295 return true; 8296 if (!SimplifiedLHS.getValue()) 8297 return false; 8298 LHS = *SimplifiedLHS; 8299 8300 const auto &SimplifiedRHS = 8301 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8302 *this, UsedAssumedInformation); 8303 if (!SimplifiedRHS.hasValue()) 8304 return true; 8305 if (!SimplifiedRHS.getValue()) 8306 return false; 8307 RHS = *SimplifiedRHS; 8308 8309 // TODO: Allow non integers as well. 8310 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8311 return false; 8312 8313 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8314 *this, IRPosition::value(*LHS, getCallBaseContext()), 8315 DepClassTy::REQUIRED); 8316 QuerriedAAs.push_back(&LHSAA); 8317 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8318 8319 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8320 *this, IRPosition::value(*RHS, getCallBaseContext()), 8321 DepClassTy::REQUIRED); 8322 QuerriedAAs.push_back(&RHSAA); 8323 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8324 8325 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange); 8326 8327 T.unionAssumed(AssumedRange); 8328 8329 // TODO: Track a known state too. 8330 8331 return T.isValidState(); 8332 } 8333 8334 bool calculateCastInst( 8335 Attributor &A, CastInst *CastI, IntegerRangeState &T, 8336 const Instruction *CtxI, 8337 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8338 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!"); 8339 // TODO: Allow non integers as well. 8340 Value *OpV = CastI->getOperand(0); 8341 8342 // Simplify the operand first. 8343 bool UsedAssumedInformation = false; 8344 const auto &SimplifiedOpV = 8345 A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()), 8346 *this, UsedAssumedInformation); 8347 if (!SimplifiedOpV.hasValue()) 8348 return true; 8349 if (!SimplifiedOpV.getValue()) 8350 return false; 8351 OpV = *SimplifiedOpV; 8352 8353 if (!OpV->getType()->isIntegerTy()) 8354 return false; 8355 8356 auto &OpAA = A.getAAFor<AAValueConstantRange>( 8357 *this, IRPosition::value(*OpV, getCallBaseContext()), 8358 DepClassTy::REQUIRED); 8359 QuerriedAAs.push_back(&OpAA); 8360 T.unionAssumed( 8361 OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth())); 8362 return T.isValidState(); 8363 } 8364 8365 bool 8366 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, 8367 const Instruction *CtxI, 8368 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8369 Value *LHS = CmpI->getOperand(0); 8370 Value *RHS = CmpI->getOperand(1); 8371 8372 // Simplify the operands first. 8373 bool UsedAssumedInformation = false; 8374 const auto &SimplifiedLHS = 8375 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8376 *this, UsedAssumedInformation); 8377 if (!SimplifiedLHS.hasValue()) 8378 return true; 8379 if (!SimplifiedLHS.getValue()) 8380 return false; 8381 LHS = *SimplifiedLHS; 8382 8383 const auto &SimplifiedRHS = 8384 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8385 *this, UsedAssumedInformation); 8386 if (!SimplifiedRHS.hasValue()) 8387 return true; 8388 if (!SimplifiedRHS.getValue()) 8389 return false; 8390 RHS = *SimplifiedRHS; 8391 8392 // TODO: Allow non integers as well. 8393 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8394 return false; 8395 8396 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8397 *this, IRPosition::value(*LHS, getCallBaseContext()), 8398 DepClassTy::REQUIRED); 8399 QuerriedAAs.push_back(&LHSAA); 8400 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8401 *this, IRPosition::value(*RHS, getCallBaseContext()), 8402 DepClassTy::REQUIRED); 8403 QuerriedAAs.push_back(&RHSAA); 8404 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8405 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8406 8407 // If one of them is empty set, we can't decide. 8408 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) 8409 return true; 8410 8411 bool MustTrue = false, MustFalse = false; 8412 8413 auto AllowedRegion = 8414 ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange); 8415 8416 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet()) 8417 MustFalse = true; 8418 8419 if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange)) 8420 MustTrue = true; 8421 8422 assert((!MustTrue || !MustFalse) && 8423 "Either MustTrue or MustFalse should be false!"); 8424 8425 if (MustTrue) 8426 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); 8427 else if (MustFalse) 8428 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); 8429 else 8430 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); 8431 8432 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA 8433 << " " << RHSAA << "\n"); 8434 8435 // TODO: Track a known state too. 8436 return T.isValidState(); 8437 } 8438 8439 /// See AbstractAttribute::updateImpl(...). 8440 ChangeStatus updateImpl(Attributor &A) override { 8441 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 8442 IntegerRangeState &T, bool Stripped) -> bool { 8443 Instruction *I = dyn_cast<Instruction>(&V); 8444 if (!I || isa<CallBase>(I)) { 8445 8446 // Simplify the operand first. 8447 bool UsedAssumedInformation = false; 8448 const auto &SimplifiedOpV = 8449 A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()), 8450 *this, UsedAssumedInformation); 8451 if (!SimplifiedOpV.hasValue()) 8452 return true; 8453 if (!SimplifiedOpV.getValue()) 8454 return false; 8455 Value *VPtr = *SimplifiedOpV; 8456 8457 // If the value is not instruction, we query AA to Attributor. 8458 const auto &AA = A.getAAFor<AAValueConstantRange>( 8459 *this, IRPosition::value(*VPtr, getCallBaseContext()), 8460 DepClassTy::REQUIRED); 8461 8462 // Clamp operator is not used to utilize a program point CtxI. 8463 T.unionAssumed(AA.getAssumedConstantRange(A, CtxI)); 8464 8465 return T.isValidState(); 8466 } 8467 8468 SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; 8469 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) { 8470 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) 8471 return false; 8472 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) { 8473 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) 8474 return false; 8475 } else if (auto *CastI = dyn_cast<CastInst>(I)) { 8476 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) 8477 return false; 8478 } else { 8479 // Give up with other instructions. 8480 // TODO: Add other instructions 8481 8482 T.indicatePessimisticFixpoint(); 8483 return false; 8484 } 8485 8486 // Catch circular reasoning in a pessimistic way for now. 8487 // TODO: Check how the range evolves and if we stripped anything, see also 8488 // AADereferenceable or AAAlign for similar situations. 8489 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { 8490 if (QueriedAA != this) 8491 continue; 8492 // If we are in a stady state we do not need to worry. 8493 if (T.getAssumed() == getState().getAssumed()) 8494 continue; 8495 T.indicatePessimisticFixpoint(); 8496 } 8497 8498 return T.isValidState(); 8499 }; 8500 8501 IntegerRangeState T(getBitWidth()); 8502 8503 if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T, 8504 VisitValueCB, getCtxI(), 8505 /* UseValueSimplify */ false)) 8506 return indicatePessimisticFixpoint(); 8507 8508 return clampStateAndIndicateChange(getState(), T); 8509 } 8510 8511 /// See AbstractAttribute::trackStatistics() 8512 void trackStatistics() const override { 8513 STATS_DECLTRACK_FLOATING_ATTR(value_range) 8514 } 8515 }; 8516 8517 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { 8518 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) 8519 : AAValueConstantRangeImpl(IRP, A) {} 8520 8521 /// See AbstractAttribute::initialize(...). 8522 ChangeStatus updateImpl(Attributor &A) override { 8523 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will " 8524 "not be called"); 8525 } 8526 8527 /// See AbstractAttribute::trackStatistics() 8528 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) } 8529 }; 8530 8531 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { 8532 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) 8533 : AAValueConstantRangeFunction(IRP, A) {} 8534 8535 /// See AbstractAttribute::trackStatistics() 8536 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) } 8537 }; 8538 8539 struct AAValueConstantRangeCallSiteReturned 8540 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8541 AAValueConstantRangeImpl, 8542 AAValueConstantRangeImpl::StateType, 8543 /* IntroduceCallBaseContext */ true> { 8544 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) 8545 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8546 AAValueConstantRangeImpl, 8547 AAValueConstantRangeImpl::StateType, 8548 /* IntroduceCallBaseContext */ true>(IRP, 8549 A) { 8550 } 8551 8552 /// See AbstractAttribute::initialize(...). 8553 void initialize(Attributor &A) override { 8554 // If it is a load instruction with range metadata, use the metadata. 8555 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue())) 8556 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range)) 8557 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8558 8559 AAValueConstantRangeImpl::initialize(A); 8560 } 8561 8562 /// See AbstractAttribute::trackStatistics() 8563 void trackStatistics() const override { 8564 STATS_DECLTRACK_CSRET_ATTR(value_range) 8565 } 8566 }; 8567 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { 8568 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) 8569 : AAValueConstantRangeFloating(IRP, A) {} 8570 8571 /// See AbstractAttribute::manifest() 8572 ChangeStatus manifest(Attributor &A) override { 8573 return ChangeStatus::UNCHANGED; 8574 } 8575 8576 /// See AbstractAttribute::trackStatistics() 8577 void trackStatistics() const override { 8578 STATS_DECLTRACK_CSARG_ATTR(value_range) 8579 } 8580 }; 8581 8582 /// ------------------ Potential Values Attribute ------------------------- 8583 8584 struct AAPotentialValuesImpl : AAPotentialValues { 8585 using StateType = PotentialConstantIntValuesState; 8586 8587 AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A) 8588 : AAPotentialValues(IRP, A) {} 8589 8590 /// See AbstractAttribute::initialize(..). 8591 void initialize(Attributor &A) override { 8592 if (A.hasSimplificationCallback(getIRPosition())) 8593 indicatePessimisticFixpoint(); 8594 else 8595 AAPotentialValues::initialize(A); 8596 } 8597 8598 /// See AbstractAttribute::getAsStr(). 8599 const std::string getAsStr() const override { 8600 std::string Str; 8601 llvm::raw_string_ostream OS(Str); 8602 OS << getState(); 8603 return OS.str(); 8604 } 8605 8606 /// See AbstractAttribute::updateImpl(...). 8607 ChangeStatus updateImpl(Attributor &A) override { 8608 return indicatePessimisticFixpoint(); 8609 } 8610 }; 8611 8612 struct AAPotentialValuesArgument final 8613 : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 8614 PotentialConstantIntValuesState> { 8615 using Base = 8616 AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl, 8617 PotentialConstantIntValuesState>; 8618 AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A) 8619 : Base(IRP, A) {} 8620 8621 /// See AbstractAttribute::initialize(..). 8622 void initialize(Attributor &A) override { 8623 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8624 indicatePessimisticFixpoint(); 8625 } else { 8626 Base::initialize(A); 8627 } 8628 } 8629 8630 /// See AbstractAttribute::trackStatistics() 8631 void trackStatistics() const override { 8632 STATS_DECLTRACK_ARG_ATTR(potential_values) 8633 } 8634 }; 8635 8636 struct AAPotentialValuesReturned 8637 : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> { 8638 using Base = 8639 AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>; 8640 AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A) 8641 : Base(IRP, A) {} 8642 8643 /// See AbstractAttribute::trackStatistics() 8644 void trackStatistics() const override { 8645 STATS_DECLTRACK_FNRET_ATTR(potential_values) 8646 } 8647 }; 8648 8649 struct AAPotentialValuesFloating : AAPotentialValuesImpl { 8650 AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A) 8651 : AAPotentialValuesImpl(IRP, A) {} 8652 8653 /// See AbstractAttribute::initialize(..). 8654 void initialize(Attributor &A) override { 8655 AAPotentialValuesImpl::initialize(A); 8656 if (isAtFixpoint()) 8657 return; 8658 8659 Value &V = getAssociatedValue(); 8660 8661 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8662 unionAssumed(C->getValue()); 8663 indicateOptimisticFixpoint(); 8664 return; 8665 } 8666 8667 if (isa<UndefValue>(&V)) { 8668 unionAssumedWithUndef(); 8669 indicateOptimisticFixpoint(); 8670 return; 8671 } 8672 8673 if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V)) 8674 return; 8675 8676 if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V)) 8677 return; 8678 8679 indicatePessimisticFixpoint(); 8680 8681 LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: " 8682 << getAssociatedValue() << "\n"); 8683 } 8684 8685 static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS, 8686 const APInt &RHS) { 8687 return ICmpInst::compare(LHS, RHS, ICI->getPredicate()); 8688 } 8689 8690 static APInt calculateCastInst(const CastInst *CI, const APInt &Src, 8691 uint32_t ResultBitWidth) { 8692 Instruction::CastOps CastOp = CI->getOpcode(); 8693 switch (CastOp) { 8694 default: 8695 llvm_unreachable("unsupported or not integer cast"); 8696 case Instruction::Trunc: 8697 return Src.trunc(ResultBitWidth); 8698 case Instruction::SExt: 8699 return Src.sext(ResultBitWidth); 8700 case Instruction::ZExt: 8701 return Src.zext(ResultBitWidth); 8702 case Instruction::BitCast: 8703 return Src; 8704 } 8705 } 8706 8707 static APInt calculateBinaryOperator(const BinaryOperator *BinOp, 8708 const APInt &LHS, const APInt &RHS, 8709 bool &SkipOperation, bool &Unsupported) { 8710 Instruction::BinaryOps BinOpcode = BinOp->getOpcode(); 8711 // Unsupported is set to true when the binary operator is not supported. 8712 // SkipOperation is set to true when UB occur with the given operand pair 8713 // (LHS, RHS). 8714 // TODO: we should look at nsw and nuw keywords to handle operations 8715 // that create poison or undef value. 8716 switch (BinOpcode) { 8717 default: 8718 Unsupported = true; 8719 return LHS; 8720 case Instruction::Add: 8721 return LHS + RHS; 8722 case Instruction::Sub: 8723 return LHS - RHS; 8724 case Instruction::Mul: 8725 return LHS * RHS; 8726 case Instruction::UDiv: 8727 if (RHS.isZero()) { 8728 SkipOperation = true; 8729 return LHS; 8730 } 8731 return LHS.udiv(RHS); 8732 case Instruction::SDiv: 8733 if (RHS.isZero()) { 8734 SkipOperation = true; 8735 return LHS; 8736 } 8737 return LHS.sdiv(RHS); 8738 case Instruction::URem: 8739 if (RHS.isZero()) { 8740 SkipOperation = true; 8741 return LHS; 8742 } 8743 return LHS.urem(RHS); 8744 case Instruction::SRem: 8745 if (RHS.isZero()) { 8746 SkipOperation = true; 8747 return LHS; 8748 } 8749 return LHS.srem(RHS); 8750 case Instruction::Shl: 8751 return LHS.shl(RHS); 8752 case Instruction::LShr: 8753 return LHS.lshr(RHS); 8754 case Instruction::AShr: 8755 return LHS.ashr(RHS); 8756 case Instruction::And: 8757 return LHS & RHS; 8758 case Instruction::Or: 8759 return LHS | RHS; 8760 case Instruction::Xor: 8761 return LHS ^ RHS; 8762 } 8763 } 8764 8765 bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp, 8766 const APInt &LHS, const APInt &RHS) { 8767 bool SkipOperation = false; 8768 bool Unsupported = false; 8769 APInt Result = 8770 calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported); 8771 if (Unsupported) 8772 return false; 8773 // If SkipOperation is true, we can ignore this operand pair (L, R). 8774 if (!SkipOperation) 8775 unionAssumed(Result); 8776 return isValidState(); 8777 } 8778 8779 ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) { 8780 auto AssumedBefore = getAssumed(); 8781 Value *LHS = ICI->getOperand(0); 8782 Value *RHS = ICI->getOperand(1); 8783 8784 // Simplify the operands first. 8785 bool UsedAssumedInformation = false; 8786 const auto &SimplifiedLHS = 8787 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8788 *this, UsedAssumedInformation); 8789 if (!SimplifiedLHS.hasValue()) 8790 return ChangeStatus::UNCHANGED; 8791 if (!SimplifiedLHS.getValue()) 8792 return indicatePessimisticFixpoint(); 8793 LHS = *SimplifiedLHS; 8794 8795 const auto &SimplifiedRHS = 8796 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8797 *this, UsedAssumedInformation); 8798 if (!SimplifiedRHS.hasValue()) 8799 return ChangeStatus::UNCHANGED; 8800 if (!SimplifiedRHS.getValue()) 8801 return indicatePessimisticFixpoint(); 8802 RHS = *SimplifiedRHS; 8803 8804 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8805 return indicatePessimisticFixpoint(); 8806 8807 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 8808 DepClassTy::REQUIRED); 8809 if (!LHSAA.isValidState()) 8810 return indicatePessimisticFixpoint(); 8811 8812 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 8813 DepClassTy::REQUIRED); 8814 if (!RHSAA.isValidState()) 8815 return indicatePessimisticFixpoint(); 8816 8817 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 8818 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 8819 8820 // TODO: make use of undef flag to limit potential values aggressively. 8821 bool MaybeTrue = false, MaybeFalse = false; 8822 const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0); 8823 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 8824 // The result of any comparison between undefs can be soundly replaced 8825 // with undef. 8826 unionAssumedWithUndef(); 8827 } else if (LHSAA.undefIsContained()) { 8828 for (const APInt &R : RHSAAPVS) { 8829 bool CmpResult = calculateICmpInst(ICI, Zero, R); 8830 MaybeTrue |= CmpResult; 8831 MaybeFalse |= !CmpResult; 8832 if (MaybeTrue & MaybeFalse) 8833 return indicatePessimisticFixpoint(); 8834 } 8835 } else if (RHSAA.undefIsContained()) { 8836 for (const APInt &L : LHSAAPVS) { 8837 bool CmpResult = calculateICmpInst(ICI, L, Zero); 8838 MaybeTrue |= CmpResult; 8839 MaybeFalse |= !CmpResult; 8840 if (MaybeTrue & MaybeFalse) 8841 return indicatePessimisticFixpoint(); 8842 } 8843 } else { 8844 for (const APInt &L : LHSAAPVS) { 8845 for (const APInt &R : RHSAAPVS) { 8846 bool CmpResult = calculateICmpInst(ICI, L, R); 8847 MaybeTrue |= CmpResult; 8848 MaybeFalse |= !CmpResult; 8849 if (MaybeTrue & MaybeFalse) 8850 return indicatePessimisticFixpoint(); 8851 } 8852 } 8853 } 8854 if (MaybeTrue) 8855 unionAssumed(APInt(/* numBits */ 1, /* val */ 1)); 8856 if (MaybeFalse) 8857 unionAssumed(APInt(/* numBits */ 1, /* val */ 0)); 8858 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 8859 : ChangeStatus::CHANGED; 8860 } 8861 8862 ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) { 8863 auto AssumedBefore = getAssumed(); 8864 Value *LHS = SI->getTrueValue(); 8865 Value *RHS = SI->getFalseValue(); 8866 8867 // Simplify the operands first. 8868 bool UsedAssumedInformation = false; 8869 const auto &SimplifiedLHS = 8870 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8871 *this, UsedAssumedInformation); 8872 if (!SimplifiedLHS.hasValue()) 8873 return ChangeStatus::UNCHANGED; 8874 if (!SimplifiedLHS.getValue()) 8875 return indicatePessimisticFixpoint(); 8876 LHS = *SimplifiedLHS; 8877 8878 const auto &SimplifiedRHS = 8879 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8880 *this, UsedAssumedInformation); 8881 if (!SimplifiedRHS.hasValue()) 8882 return ChangeStatus::UNCHANGED; 8883 if (!SimplifiedRHS.getValue()) 8884 return indicatePessimisticFixpoint(); 8885 RHS = *SimplifiedRHS; 8886 8887 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8888 return indicatePessimisticFixpoint(); 8889 8890 Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this, 8891 UsedAssumedInformation); 8892 8893 // Check if we only need one operand. 8894 bool OnlyLeft = false, OnlyRight = false; 8895 if (C.hasValue() && *C && (*C)->isOneValue()) 8896 OnlyLeft = true; 8897 else if (C.hasValue() && *C && (*C)->isZeroValue()) 8898 OnlyRight = true; 8899 8900 const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr; 8901 if (!OnlyRight) { 8902 LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 8903 DepClassTy::REQUIRED); 8904 if (!LHSAA->isValidState()) 8905 return indicatePessimisticFixpoint(); 8906 } 8907 if (!OnlyLeft) { 8908 RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 8909 DepClassTy::REQUIRED); 8910 if (!RHSAA->isValidState()) 8911 return indicatePessimisticFixpoint(); 8912 } 8913 8914 if (!LHSAA || !RHSAA) { 8915 // select (true/false), lhs, rhs 8916 auto *OpAA = LHSAA ? LHSAA : RHSAA; 8917 8918 if (OpAA->undefIsContained()) 8919 unionAssumedWithUndef(); 8920 else 8921 unionAssumed(*OpAA); 8922 8923 } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) { 8924 // select i1 *, undef , undef => undef 8925 unionAssumedWithUndef(); 8926 } else { 8927 unionAssumed(*LHSAA); 8928 unionAssumed(*RHSAA); 8929 } 8930 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 8931 : ChangeStatus::CHANGED; 8932 } 8933 8934 ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) { 8935 auto AssumedBefore = getAssumed(); 8936 if (!CI->isIntegerCast()) 8937 return indicatePessimisticFixpoint(); 8938 assert(CI->getNumOperands() == 1 && "Expected cast to be unary!"); 8939 uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth(); 8940 Value *Src = CI->getOperand(0); 8941 8942 // Simplify the operand first. 8943 bool UsedAssumedInformation = false; 8944 const auto &SimplifiedSrc = 8945 A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()), 8946 *this, UsedAssumedInformation); 8947 if (!SimplifiedSrc.hasValue()) 8948 return ChangeStatus::UNCHANGED; 8949 if (!SimplifiedSrc.getValue()) 8950 return indicatePessimisticFixpoint(); 8951 Src = *SimplifiedSrc; 8952 8953 auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src), 8954 DepClassTy::REQUIRED); 8955 if (!SrcAA.isValidState()) 8956 return indicatePessimisticFixpoint(); 8957 const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet(); 8958 if (SrcAA.undefIsContained()) 8959 unionAssumedWithUndef(); 8960 else { 8961 for (const APInt &S : SrcAAPVS) { 8962 APInt T = calculateCastInst(CI, S, ResultBitWidth); 8963 unionAssumed(T); 8964 } 8965 } 8966 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 8967 : ChangeStatus::CHANGED; 8968 } 8969 8970 ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) { 8971 auto AssumedBefore = getAssumed(); 8972 Value *LHS = BinOp->getOperand(0); 8973 Value *RHS = BinOp->getOperand(1); 8974 8975 // Simplify the operands first. 8976 bool UsedAssumedInformation = false; 8977 const auto &SimplifiedLHS = 8978 A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()), 8979 *this, UsedAssumedInformation); 8980 if (!SimplifiedLHS.hasValue()) 8981 return ChangeStatus::UNCHANGED; 8982 if (!SimplifiedLHS.getValue()) 8983 return indicatePessimisticFixpoint(); 8984 LHS = *SimplifiedLHS; 8985 8986 const auto &SimplifiedRHS = 8987 A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()), 8988 *this, UsedAssumedInformation); 8989 if (!SimplifiedRHS.hasValue()) 8990 return ChangeStatus::UNCHANGED; 8991 if (!SimplifiedRHS.getValue()) 8992 return indicatePessimisticFixpoint(); 8993 RHS = *SimplifiedRHS; 8994 8995 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8996 return indicatePessimisticFixpoint(); 8997 8998 auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS), 8999 DepClassTy::REQUIRED); 9000 if (!LHSAA.isValidState()) 9001 return indicatePessimisticFixpoint(); 9002 9003 auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS), 9004 DepClassTy::REQUIRED); 9005 if (!RHSAA.isValidState()) 9006 return indicatePessimisticFixpoint(); 9007 9008 const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet(); 9009 const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet(); 9010 const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0); 9011 9012 // TODO: make use of undef flag to limit potential values aggressively. 9013 if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) { 9014 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero)) 9015 return indicatePessimisticFixpoint(); 9016 } else if (LHSAA.undefIsContained()) { 9017 for (const APInt &R : RHSAAPVS) { 9018 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R)) 9019 return indicatePessimisticFixpoint(); 9020 } 9021 } else if (RHSAA.undefIsContained()) { 9022 for (const APInt &L : LHSAAPVS) { 9023 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero)) 9024 return indicatePessimisticFixpoint(); 9025 } 9026 } else { 9027 for (const APInt &L : LHSAAPVS) { 9028 for (const APInt &R : RHSAAPVS) { 9029 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R)) 9030 return indicatePessimisticFixpoint(); 9031 } 9032 } 9033 } 9034 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9035 : ChangeStatus::CHANGED; 9036 } 9037 9038 ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) { 9039 auto AssumedBefore = getAssumed(); 9040 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) { 9041 Value *IncomingValue = PHI->getIncomingValue(u); 9042 9043 // Simplify the operand first. 9044 bool UsedAssumedInformation = false; 9045 const auto &SimplifiedIncomingValue = A.getAssumedSimplified( 9046 IRPosition::value(*IncomingValue, getCallBaseContext()), *this, 9047 UsedAssumedInformation); 9048 if (!SimplifiedIncomingValue.hasValue()) 9049 continue; 9050 if (!SimplifiedIncomingValue.getValue()) 9051 return indicatePessimisticFixpoint(); 9052 IncomingValue = *SimplifiedIncomingValue; 9053 9054 auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>( 9055 *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED); 9056 if (!PotentialValuesAA.isValidState()) 9057 return indicatePessimisticFixpoint(); 9058 if (PotentialValuesAA.undefIsContained()) 9059 unionAssumedWithUndef(); 9060 else 9061 unionAssumed(PotentialValuesAA.getAssumed()); 9062 } 9063 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9064 : ChangeStatus::CHANGED; 9065 } 9066 9067 ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) { 9068 if (!L.getType()->isIntegerTy()) 9069 return indicatePessimisticFixpoint(); 9070 9071 auto Union = [&](Value &V) { 9072 if (isa<UndefValue>(V)) { 9073 unionAssumedWithUndef(); 9074 return true; 9075 } 9076 if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) { 9077 unionAssumed(CI->getValue()); 9078 return true; 9079 } 9080 return false; 9081 }; 9082 auto AssumedBefore = getAssumed(); 9083 9084 if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union)) 9085 return indicatePessimisticFixpoint(); 9086 9087 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9088 : ChangeStatus::CHANGED; 9089 } 9090 9091 /// See AbstractAttribute::updateImpl(...). 9092 ChangeStatus updateImpl(Attributor &A) override { 9093 Value &V = getAssociatedValue(); 9094 Instruction *I = dyn_cast<Instruction>(&V); 9095 9096 if (auto *ICI = dyn_cast<ICmpInst>(I)) 9097 return updateWithICmpInst(A, ICI); 9098 9099 if (auto *SI = dyn_cast<SelectInst>(I)) 9100 return updateWithSelectInst(A, SI); 9101 9102 if (auto *CI = dyn_cast<CastInst>(I)) 9103 return updateWithCastInst(A, CI); 9104 9105 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) 9106 return updateWithBinaryOperator(A, BinOp); 9107 9108 if (auto *PHI = dyn_cast<PHINode>(I)) 9109 return updateWithPHINode(A, PHI); 9110 9111 if (auto *L = dyn_cast<LoadInst>(I)) 9112 return updateWithLoad(A, *L); 9113 9114 return indicatePessimisticFixpoint(); 9115 } 9116 9117 /// See AbstractAttribute::trackStatistics() 9118 void trackStatistics() const override { 9119 STATS_DECLTRACK_FLOATING_ATTR(potential_values) 9120 } 9121 }; 9122 9123 struct AAPotentialValuesFunction : AAPotentialValuesImpl { 9124 AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A) 9125 : AAPotentialValuesImpl(IRP, A) {} 9126 9127 /// See AbstractAttribute::initialize(...). 9128 ChangeStatus updateImpl(Attributor &A) override { 9129 llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will " 9130 "not be called"); 9131 } 9132 9133 /// See AbstractAttribute::trackStatistics() 9134 void trackStatistics() const override { 9135 STATS_DECLTRACK_FN_ATTR(potential_values) 9136 } 9137 }; 9138 9139 struct AAPotentialValuesCallSite : AAPotentialValuesFunction { 9140 AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A) 9141 : AAPotentialValuesFunction(IRP, A) {} 9142 9143 /// See AbstractAttribute::trackStatistics() 9144 void trackStatistics() const override { 9145 STATS_DECLTRACK_CS_ATTR(potential_values) 9146 } 9147 }; 9148 9149 struct AAPotentialValuesCallSiteReturned 9150 : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> { 9151 AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A) 9152 : AACallSiteReturnedFromReturned<AAPotentialValues, 9153 AAPotentialValuesImpl>(IRP, A) {} 9154 9155 /// See AbstractAttribute::trackStatistics() 9156 void trackStatistics() const override { 9157 STATS_DECLTRACK_CSRET_ATTR(potential_values) 9158 } 9159 }; 9160 9161 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating { 9162 AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A) 9163 : AAPotentialValuesFloating(IRP, A) {} 9164 9165 /// See AbstractAttribute::initialize(..). 9166 void initialize(Attributor &A) override { 9167 AAPotentialValuesImpl::initialize(A); 9168 if (isAtFixpoint()) 9169 return; 9170 9171 Value &V = getAssociatedValue(); 9172 9173 if (auto *C = dyn_cast<ConstantInt>(&V)) { 9174 unionAssumed(C->getValue()); 9175 indicateOptimisticFixpoint(); 9176 return; 9177 } 9178 9179 if (isa<UndefValue>(&V)) { 9180 unionAssumedWithUndef(); 9181 indicateOptimisticFixpoint(); 9182 return; 9183 } 9184 } 9185 9186 /// See AbstractAttribute::updateImpl(...). 9187 ChangeStatus updateImpl(Attributor &A) override { 9188 Value &V = getAssociatedValue(); 9189 auto AssumedBefore = getAssumed(); 9190 auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V), 9191 DepClassTy::REQUIRED); 9192 const auto &S = AA.getAssumed(); 9193 unionAssumed(S); 9194 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9195 : ChangeStatus::CHANGED; 9196 } 9197 9198 /// See AbstractAttribute::trackStatistics() 9199 void trackStatistics() const override { 9200 STATS_DECLTRACK_CSARG_ATTR(potential_values) 9201 } 9202 }; 9203 9204 /// ------------------------ NoUndef Attribute --------------------------------- 9205 struct AANoUndefImpl : AANoUndef { 9206 AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {} 9207 9208 /// See AbstractAttribute::initialize(...). 9209 void initialize(Attributor &A) override { 9210 if (getIRPosition().hasAttr({Attribute::NoUndef})) { 9211 indicateOptimisticFixpoint(); 9212 return; 9213 } 9214 Value &V = getAssociatedValue(); 9215 if (isa<UndefValue>(V)) 9216 indicatePessimisticFixpoint(); 9217 else if (isa<FreezeInst>(V)) 9218 indicateOptimisticFixpoint(); 9219 else if (getPositionKind() != IRPosition::IRP_RETURNED && 9220 isGuaranteedNotToBeUndefOrPoison(&V)) 9221 indicateOptimisticFixpoint(); 9222 else 9223 AANoUndef::initialize(A); 9224 } 9225 9226 /// See followUsesInMBEC 9227 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 9228 AANoUndef::StateType &State) { 9229 const Value *UseV = U->get(); 9230 const DominatorTree *DT = nullptr; 9231 AssumptionCache *AC = nullptr; 9232 InformationCache &InfoCache = A.getInfoCache(); 9233 if (Function *F = getAnchorScope()) { 9234 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 9235 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 9236 } 9237 State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT)); 9238 bool TrackUse = false; 9239 // Track use for instructions which must produce undef or poison bits when 9240 // at least one operand contains such bits. 9241 if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I)) 9242 TrackUse = true; 9243 return TrackUse; 9244 } 9245 9246 /// See AbstractAttribute::getAsStr(). 9247 const std::string getAsStr() const override { 9248 return getAssumed() ? "noundef" : "may-undef-or-poison"; 9249 } 9250 9251 ChangeStatus manifest(Attributor &A) override { 9252 // We don't manifest noundef attribute for dead positions because the 9253 // associated values with dead positions would be replaced with undef 9254 // values. 9255 bool UsedAssumedInformation = false; 9256 if (A.isAssumedDead(getIRPosition(), nullptr, nullptr, 9257 UsedAssumedInformation)) 9258 return ChangeStatus::UNCHANGED; 9259 // A position whose simplified value does not have any value is 9260 // considered to be dead. We don't manifest noundef in such positions for 9261 // the same reason above. 9262 if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation) 9263 .hasValue()) 9264 return ChangeStatus::UNCHANGED; 9265 return AANoUndef::manifest(A); 9266 } 9267 }; 9268 9269 struct AANoUndefFloating : public AANoUndefImpl { 9270 AANoUndefFloating(const IRPosition &IRP, Attributor &A) 9271 : AANoUndefImpl(IRP, A) {} 9272 9273 /// See AbstractAttribute::initialize(...). 9274 void initialize(Attributor &A) override { 9275 AANoUndefImpl::initialize(A); 9276 if (!getState().isAtFixpoint()) 9277 if (Instruction *CtxI = getCtxI()) 9278 followUsesInMBEC(*this, A, getState(), *CtxI); 9279 } 9280 9281 /// See AbstractAttribute::updateImpl(...). 9282 ChangeStatus updateImpl(Attributor &A) override { 9283 auto VisitValueCB = [&](Value &V, const Instruction *CtxI, 9284 AANoUndef::StateType &T, bool Stripped) -> bool { 9285 const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V), 9286 DepClassTy::REQUIRED); 9287 if (!Stripped && this == &AA) { 9288 T.indicatePessimisticFixpoint(); 9289 } else { 9290 const AANoUndef::StateType &S = 9291 static_cast<const AANoUndef::StateType &>(AA.getState()); 9292 T ^= S; 9293 } 9294 return T.isValidState(); 9295 }; 9296 9297 StateType T; 9298 if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T, 9299 VisitValueCB, getCtxI())) 9300 return indicatePessimisticFixpoint(); 9301 9302 return clampStateAndIndicateChange(getState(), T); 9303 } 9304 9305 /// See AbstractAttribute::trackStatistics() 9306 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9307 }; 9308 9309 struct AANoUndefReturned final 9310 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> { 9311 AANoUndefReturned(const IRPosition &IRP, Attributor &A) 9312 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {} 9313 9314 /// See AbstractAttribute::trackStatistics() 9315 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9316 }; 9317 9318 struct AANoUndefArgument final 9319 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> { 9320 AANoUndefArgument(const IRPosition &IRP, Attributor &A) 9321 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {} 9322 9323 /// See AbstractAttribute::trackStatistics() 9324 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) } 9325 }; 9326 9327 struct AANoUndefCallSiteArgument final : AANoUndefFloating { 9328 AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A) 9329 : AANoUndefFloating(IRP, A) {} 9330 9331 /// See AbstractAttribute::trackStatistics() 9332 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) } 9333 }; 9334 9335 struct AANoUndefCallSiteReturned final 9336 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> { 9337 AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A) 9338 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {} 9339 9340 /// See AbstractAttribute::trackStatistics() 9341 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) } 9342 }; 9343 9344 struct AACallEdgesImpl : public AACallEdges { 9345 AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {} 9346 9347 virtual const SetVector<Function *> &getOptimisticEdges() const override { 9348 return CalledFunctions; 9349 } 9350 9351 virtual bool hasUnknownCallee() const override { return HasUnknownCallee; } 9352 9353 virtual bool hasNonAsmUnknownCallee() const override { 9354 return HasUnknownCalleeNonAsm; 9355 } 9356 9357 const std::string getAsStr() const override { 9358 return "CallEdges[" + std::to_string(HasUnknownCallee) + "," + 9359 std::to_string(CalledFunctions.size()) + "]"; 9360 } 9361 9362 void trackStatistics() const override {} 9363 9364 protected: 9365 void addCalledFunction(Function *Fn, ChangeStatus &Change) { 9366 if (CalledFunctions.insert(Fn)) { 9367 Change = ChangeStatus::CHANGED; 9368 LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName() 9369 << "\n"); 9370 } 9371 } 9372 9373 void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) { 9374 if (!HasUnknownCallee) 9375 Change = ChangeStatus::CHANGED; 9376 if (NonAsm && !HasUnknownCalleeNonAsm) 9377 Change = ChangeStatus::CHANGED; 9378 HasUnknownCalleeNonAsm |= NonAsm; 9379 HasUnknownCallee = true; 9380 } 9381 9382 private: 9383 /// Optimistic set of functions that might be called by this position. 9384 SetVector<Function *> CalledFunctions; 9385 9386 /// Is there any call with a unknown callee. 9387 bool HasUnknownCallee = false; 9388 9389 /// Is there any call with a unknown callee, excluding any inline asm. 9390 bool HasUnknownCalleeNonAsm = false; 9391 }; 9392 9393 struct AACallEdgesCallSite : public AACallEdgesImpl { 9394 AACallEdgesCallSite(const IRPosition &IRP, Attributor &A) 9395 : AACallEdgesImpl(IRP, A) {} 9396 /// See AbstractAttribute::updateImpl(...). 9397 ChangeStatus updateImpl(Attributor &A) override { 9398 ChangeStatus Change = ChangeStatus::UNCHANGED; 9399 9400 auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown, 9401 bool Stripped) -> bool { 9402 if (Function *Fn = dyn_cast<Function>(&V)) { 9403 addCalledFunction(Fn, Change); 9404 } else { 9405 LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n"); 9406 setHasUnknownCallee(true, Change); 9407 } 9408 9409 // Explore all values. 9410 return true; 9411 }; 9412 9413 // Process any value that we might call. 9414 auto ProcessCalledOperand = [&](Value *V) { 9415 bool DummyValue = false; 9416 if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this, 9417 DummyValue, VisitValue, nullptr, 9418 false)) { 9419 // If we haven't gone through all values, assume that there are unknown 9420 // callees. 9421 setHasUnknownCallee(true, Change); 9422 } 9423 }; 9424 9425 CallBase *CB = static_cast<CallBase *>(getCtxI()); 9426 9427 if (CB->isInlineAsm()) { 9428 setHasUnknownCallee(false, Change); 9429 return Change; 9430 } 9431 9432 // Process callee metadata if available. 9433 if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) { 9434 for (auto &Op : MD->operands()) { 9435 Function *Callee = mdconst::dyn_extract_or_null<Function>(Op); 9436 if (Callee) 9437 addCalledFunction(Callee, Change); 9438 } 9439 return Change; 9440 } 9441 9442 // The most simple case. 9443 ProcessCalledOperand(CB->getCalledOperand()); 9444 9445 // Process callback functions. 9446 SmallVector<const Use *, 4u> CallbackUses; 9447 AbstractCallSite::getCallbackUses(*CB, CallbackUses); 9448 for (const Use *U : CallbackUses) 9449 ProcessCalledOperand(U->get()); 9450 9451 return Change; 9452 } 9453 }; 9454 9455 struct AACallEdgesFunction : public AACallEdgesImpl { 9456 AACallEdgesFunction(const IRPosition &IRP, Attributor &A) 9457 : AACallEdgesImpl(IRP, A) {} 9458 9459 /// See AbstractAttribute::updateImpl(...). 9460 ChangeStatus updateImpl(Attributor &A) override { 9461 ChangeStatus Change = ChangeStatus::UNCHANGED; 9462 9463 auto ProcessCallInst = [&](Instruction &Inst) { 9464 CallBase &CB = static_cast<CallBase &>(Inst); 9465 9466 auto &CBEdges = A.getAAFor<AACallEdges>( 9467 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9468 if (CBEdges.hasNonAsmUnknownCallee()) 9469 setHasUnknownCallee(true, Change); 9470 if (CBEdges.hasUnknownCallee()) 9471 setHasUnknownCallee(false, Change); 9472 9473 for (Function *F : CBEdges.getOptimisticEdges()) 9474 addCalledFunction(F, Change); 9475 9476 return true; 9477 }; 9478 9479 // Visit all callable instructions. 9480 bool UsedAssumedInformation = false; 9481 if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this, 9482 UsedAssumedInformation)) { 9483 // If we haven't looked at all call like instructions, assume that there 9484 // are unknown callees. 9485 setHasUnknownCallee(true, Change); 9486 } 9487 9488 return Change; 9489 } 9490 }; 9491 9492 struct AAFunctionReachabilityFunction : public AAFunctionReachability { 9493 private: 9494 struct QuerySet { 9495 void markReachable(Function *Fn) { 9496 Reachable.insert(Fn); 9497 Unreachable.erase(Fn); 9498 } 9499 9500 ChangeStatus update(Attributor &A, const AAFunctionReachability &AA, 9501 ArrayRef<const AACallEdges *> AAEdgesList) { 9502 ChangeStatus Change = ChangeStatus::UNCHANGED; 9503 9504 for (auto *AAEdges : AAEdgesList) { 9505 if (AAEdges->hasUnknownCallee()) { 9506 if (!CanReachUnknownCallee) 9507 Change = ChangeStatus::CHANGED; 9508 CanReachUnknownCallee = true; 9509 return Change; 9510 } 9511 } 9512 9513 for (Function *Fn : make_early_inc_range(Unreachable)) { 9514 if (checkIfReachable(A, AA, AAEdgesList, Fn)) { 9515 Change = ChangeStatus::CHANGED; 9516 markReachable(Fn); 9517 } 9518 } 9519 return Change; 9520 } 9521 9522 bool isReachable(Attributor &A, const AAFunctionReachability &AA, 9523 ArrayRef<const AACallEdges *> AAEdgesList, Function *Fn) { 9524 // Assume that we can reach the function. 9525 // TODO: Be more specific with the unknown callee. 9526 if (CanReachUnknownCallee) 9527 return true; 9528 9529 if (Reachable.count(Fn)) 9530 return true; 9531 9532 if (Unreachable.count(Fn)) 9533 return false; 9534 9535 // We need to assume that this function can't reach Fn to prevent 9536 // an infinite loop if this function is recursive. 9537 Unreachable.insert(Fn); 9538 9539 bool Result = checkIfReachable(A, AA, AAEdgesList, Fn); 9540 if (Result) 9541 markReachable(Fn); 9542 return Result; 9543 } 9544 9545 bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA, 9546 ArrayRef<const AACallEdges *> AAEdgesList, 9547 Function *Fn) const { 9548 9549 // Handle the most trivial case first. 9550 for (auto *AAEdges : AAEdgesList) { 9551 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9552 9553 if (Edges.count(Fn)) 9554 return true; 9555 } 9556 9557 SmallVector<const AAFunctionReachability *, 8> Deps; 9558 for (auto &AAEdges : AAEdgesList) { 9559 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9560 9561 for (Function *Edge : Edges) { 9562 // We don't need a dependency if the result is reachable. 9563 const AAFunctionReachability &EdgeReachability = 9564 A.getAAFor<AAFunctionReachability>( 9565 AA, IRPosition::function(*Edge), DepClassTy::NONE); 9566 Deps.push_back(&EdgeReachability); 9567 9568 if (EdgeReachability.canReach(A, Fn)) 9569 return true; 9570 } 9571 } 9572 9573 // The result is false for now, set dependencies and leave. 9574 for (auto Dep : Deps) 9575 A.recordDependence(AA, *Dep, DepClassTy::REQUIRED); 9576 9577 return false; 9578 } 9579 9580 /// Set of functions that we know for sure is reachable. 9581 DenseSet<Function *> Reachable; 9582 9583 /// Set of functions that are unreachable, but might become reachable. 9584 DenseSet<Function *> Unreachable; 9585 9586 /// If we can reach a function with a call to a unknown function we assume 9587 /// that we can reach any function. 9588 bool CanReachUnknownCallee = false; 9589 }; 9590 9591 public: 9592 AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A) 9593 : AAFunctionReachability(IRP, A) {} 9594 9595 bool canReach(Attributor &A, Function *Fn) const override { 9596 const AACallEdges &AAEdges = 9597 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9598 9599 // Attributor returns attributes as const, so this function has to be 9600 // const for users of this attribute to use it without having to do 9601 // a const_cast. 9602 // This is a hack for us to be able to cache queries. 9603 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9604 bool Result = 9605 NonConstThis->WholeFunction.isReachable(A, *this, {&AAEdges}, Fn); 9606 9607 return Result; 9608 } 9609 9610 /// Can \p CB reach \p Fn 9611 bool canReach(Attributor &A, CallBase &CB, Function *Fn) const override { 9612 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9613 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9614 9615 // Attributor returns attributes as const, so this function has to be 9616 // const for users of this attribute to use it without having to do 9617 // a const_cast. 9618 // This is a hack for us to be able to cache queries. 9619 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9620 QuerySet &CBQuery = NonConstThis->CBQueries[&CB]; 9621 9622 bool Result = CBQuery.isReachable(A, *this, {&AAEdges}, Fn); 9623 9624 return Result; 9625 } 9626 9627 /// See AbstractAttribute::updateImpl(...). 9628 ChangeStatus updateImpl(Attributor &A) override { 9629 const AACallEdges &AAEdges = 9630 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9631 ChangeStatus Change = ChangeStatus::UNCHANGED; 9632 9633 Change |= WholeFunction.update(A, *this, {&AAEdges}); 9634 9635 for (auto CBPair : CBQueries) { 9636 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9637 *this, IRPosition::callsite_function(*CBPair.first), 9638 DepClassTy::REQUIRED); 9639 9640 Change |= CBPair.second.update(A, *this, {&AAEdges}); 9641 } 9642 9643 return Change; 9644 } 9645 9646 const std::string getAsStr() const override { 9647 size_t QueryCount = 9648 WholeFunction.Reachable.size() + WholeFunction.Unreachable.size(); 9649 9650 return "FunctionReachability [" + 9651 std::to_string(WholeFunction.Reachable.size()) + "," + 9652 std::to_string(QueryCount) + "]"; 9653 } 9654 9655 void trackStatistics() const override {} 9656 9657 private: 9658 bool canReachUnknownCallee() const override { 9659 return WholeFunction.CanReachUnknownCallee; 9660 } 9661 9662 /// Used to answer if a the whole function can reacha a specific function. 9663 QuerySet WholeFunction; 9664 9665 /// Used to answer if a call base inside this function can reach a specific 9666 /// function. 9667 DenseMap<CallBase *, QuerySet> CBQueries; 9668 }; 9669 9670 /// ---------------------- Assumption Propagation ------------------------------ 9671 struct AAAssumptionInfoImpl : public AAAssumptionInfo { 9672 AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A, 9673 const DenseSet<StringRef> &Known) 9674 : AAAssumptionInfo(IRP, A, Known) {} 9675 9676 bool hasAssumption(const StringRef Assumption) const override { 9677 return isValidState() && setContains(Assumption); 9678 } 9679 9680 /// See AbstractAttribute::getAsStr() 9681 const std::string getAsStr() const override { 9682 const SetContents &Known = getKnown(); 9683 const SetContents &Assumed = getAssumed(); 9684 9685 const std::string KnownStr = 9686 llvm::join(Known.getSet().begin(), Known.getSet().end(), ","); 9687 const std::string AssumedStr = 9688 (Assumed.isUniversal()) 9689 ? "Universal" 9690 : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ","); 9691 9692 return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]"; 9693 } 9694 }; 9695 9696 /// Propagates assumption information from parent functions to all of their 9697 /// successors. An assumption can be propagated if the containing function 9698 /// dominates the called function. 9699 /// 9700 /// We start with a "known" set of assumptions already valid for the associated 9701 /// function and an "assumed" set that initially contains all possible 9702 /// assumptions. The assumed set is inter-procedurally updated by narrowing its 9703 /// contents as concrete values are known. The concrete values are seeded by the 9704 /// first nodes that are either entries into the call graph, or contains no 9705 /// assumptions. Each node is updated as the intersection of the assumed state 9706 /// with all of its predecessors. 9707 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl { 9708 AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A) 9709 : AAAssumptionInfoImpl(IRP, A, 9710 getAssumptions(*IRP.getAssociatedFunction())) {} 9711 9712 /// See AbstractAttribute::manifest(...). 9713 ChangeStatus manifest(Attributor &A) override { 9714 const auto &Assumptions = getKnown(); 9715 9716 // Don't manifest a universal set if it somehow made it here. 9717 if (Assumptions.isUniversal()) 9718 return ChangeStatus::UNCHANGED; 9719 9720 Function *AssociatedFunction = getAssociatedFunction(); 9721 9722 bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet()); 9723 9724 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9725 } 9726 9727 /// See AbstractAttribute::updateImpl(...). 9728 ChangeStatus updateImpl(Attributor &A) override { 9729 bool Changed = false; 9730 9731 auto CallSitePred = [&](AbstractCallSite ACS) { 9732 const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>( 9733 *this, IRPosition::callsite_function(*ACS.getInstruction()), 9734 DepClassTy::REQUIRED); 9735 // Get the set of assumptions shared by all of this function's callers. 9736 Changed |= getIntersection(AssumptionAA.getAssumed()); 9737 return !getAssumed().empty() || !getKnown().empty(); 9738 }; 9739 9740 bool AllCallSitesKnown; 9741 // Get the intersection of all assumptions held by this node's predecessors. 9742 // If we don't know all the call sites then this is either an entry into the 9743 // call graph or an empty node. This node is known to only contain its own 9744 // assumptions and can be propagated to its successors. 9745 if (!A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) 9746 return indicatePessimisticFixpoint(); 9747 9748 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9749 } 9750 9751 void trackStatistics() const override {} 9752 }; 9753 9754 /// Assumption Info defined for call sites. 9755 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl { 9756 9757 AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A) 9758 : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {} 9759 9760 /// See AbstractAttribute::initialize(...). 9761 void initialize(Attributor &A) override { 9762 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 9763 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 9764 } 9765 9766 /// See AbstractAttribute::manifest(...). 9767 ChangeStatus manifest(Attributor &A) override { 9768 // Don't manifest a universal set if it somehow made it here. 9769 if (getKnown().isUniversal()) 9770 return ChangeStatus::UNCHANGED; 9771 9772 CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue()); 9773 bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet()); 9774 9775 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9776 } 9777 9778 /// See AbstractAttribute::updateImpl(...). 9779 ChangeStatus updateImpl(Attributor &A) override { 9780 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 9781 auto &AssumptionAA = 9782 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 9783 bool Changed = getIntersection(AssumptionAA.getAssumed()); 9784 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 9785 } 9786 9787 /// See AbstractAttribute::trackStatistics() 9788 void trackStatistics() const override {} 9789 9790 private: 9791 /// Helper to initialized the known set as all the assumptions this call and 9792 /// the callee contain. 9793 DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) { 9794 const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue()); 9795 auto Assumptions = getAssumptions(CB); 9796 if (Function *F = IRP.getAssociatedFunction()) 9797 set_union(Assumptions, getAssumptions(*F)); 9798 if (Function *F = IRP.getAssociatedFunction()) 9799 set_union(Assumptions, getAssumptions(*F)); 9800 return Assumptions; 9801 } 9802 }; 9803 9804 } // namespace 9805 9806 AACallGraphNode *AACallEdgeIterator::operator*() const { 9807 return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>( 9808 &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I)))); 9809 } 9810 9811 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); } 9812 9813 const char AAReturnedValues::ID = 0; 9814 const char AANoUnwind::ID = 0; 9815 const char AANoSync::ID = 0; 9816 const char AANoFree::ID = 0; 9817 const char AANonNull::ID = 0; 9818 const char AANoRecurse::ID = 0; 9819 const char AAWillReturn::ID = 0; 9820 const char AAUndefinedBehavior::ID = 0; 9821 const char AANoAlias::ID = 0; 9822 const char AAReachability::ID = 0; 9823 const char AANoReturn::ID = 0; 9824 const char AAIsDead::ID = 0; 9825 const char AADereferenceable::ID = 0; 9826 const char AAAlign::ID = 0; 9827 const char AANoCapture::ID = 0; 9828 const char AAValueSimplify::ID = 0; 9829 const char AAHeapToStack::ID = 0; 9830 const char AAPrivatizablePtr::ID = 0; 9831 const char AAMemoryBehavior::ID = 0; 9832 const char AAMemoryLocation::ID = 0; 9833 const char AAValueConstantRange::ID = 0; 9834 const char AAPotentialValues::ID = 0; 9835 const char AANoUndef::ID = 0; 9836 const char AACallEdges::ID = 0; 9837 const char AAFunctionReachability::ID = 0; 9838 const char AAPointerInfo::ID = 0; 9839 const char AAAssumptionInfo::ID = 0; 9840 9841 // Macro magic to create the static generator function for attributes that 9842 // follow the naming scheme. 9843 9844 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ 9845 case IRPosition::PK: \ 9846 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); 9847 9848 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ 9849 case IRPosition::PK: \ 9850 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ 9851 ++NumAAs; \ 9852 break; 9853 9854 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9855 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9856 CLASS *AA = nullptr; \ 9857 switch (IRP.getPositionKind()) { \ 9858 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9859 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 9860 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 9861 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 9862 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 9863 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 9864 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 9865 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 9866 } \ 9867 return *AA; \ 9868 } 9869 9870 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9871 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9872 CLASS *AA = nullptr; \ 9873 switch (IRP.getPositionKind()) { \ 9874 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9875 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ 9876 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 9877 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 9878 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 9879 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 9880 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 9881 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 9882 } \ 9883 return *AA; \ 9884 } 9885 9886 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9887 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9888 CLASS *AA = nullptr; \ 9889 switch (IRP.getPositionKind()) { \ 9890 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9891 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 9892 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 9893 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 9894 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 9895 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 9896 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 9897 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 9898 } \ 9899 return *AA; \ 9900 } 9901 9902 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9903 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9904 CLASS *AA = nullptr; \ 9905 switch (IRP.getPositionKind()) { \ 9906 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9907 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 9908 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 9909 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 9910 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 9911 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 9912 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 9913 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 9914 } \ 9915 return *AA; \ 9916 } 9917 9918 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 9919 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 9920 CLASS *AA = nullptr; \ 9921 switch (IRP.getPositionKind()) { \ 9922 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 9923 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 9924 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 9925 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 9926 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 9927 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 9928 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 9929 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 9930 } \ 9931 return *AA; \ 9932 } 9933 9934 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) 9935 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) 9936 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) 9937 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) 9938 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) 9939 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) 9940 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) 9941 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges) 9942 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo) 9943 9944 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) 9945 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) 9946 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) 9947 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) 9948 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) 9949 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) 9950 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) 9951 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues) 9952 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef) 9953 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo) 9954 9955 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) 9956 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) 9957 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) 9958 9959 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) 9960 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability) 9961 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) 9962 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability) 9963 9964 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) 9965 9966 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION 9967 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION 9968 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION 9969 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION 9970 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION 9971 #undef SWITCH_PK_CREATE 9972 #undef SWITCH_PK_INV 9973