1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // See the Attributor.h file comment and the class descriptions in that file for 10 // more information. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/IPO/Attributor.h" 15 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/DenseMapInfo.h" 18 #include "llvm/ADT/MapVector.h" 19 #include "llvm/ADT/SCCIterator.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetOperations.h" 22 #include "llvm/ADT/SetVector.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/Statistic.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/AssumeBundleQueries.h" 28 #include "llvm/Analysis/AssumptionCache.h" 29 #include "llvm/Analysis/CaptureTracking.h" 30 #include "llvm/Analysis/InstructionSimplify.h" 31 #include "llvm/Analysis/LazyValueInfo.h" 32 #include "llvm/Analysis/MemoryBuiltins.h" 33 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 34 #include "llvm/Analysis/ScalarEvolution.h" 35 #include "llvm/Analysis/TargetTransformInfo.h" 36 #include "llvm/Analysis/ValueTracking.h" 37 #include "llvm/IR/Argument.h" 38 #include "llvm/IR/Assumptions.h" 39 #include "llvm/IR/BasicBlock.h" 40 #include "llvm/IR/Constant.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DerivedTypes.h" 44 #include "llvm/IR/GlobalValue.h" 45 #include "llvm/IR/IRBuilder.h" 46 #include "llvm/IR/InstrTypes.h" 47 #include "llvm/IR/Instruction.h" 48 #include "llvm/IR/Instructions.h" 49 #include "llvm/IR/IntrinsicInst.h" 50 #include "llvm/IR/NoFolder.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/IR/ValueHandle.h" 53 #include "llvm/Support/Alignment.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/ErrorHandling.h" 57 #include "llvm/Support/GraphWriter.h" 58 #include "llvm/Support/MathExtras.h" 59 #include "llvm/Support/raw_ostream.h" 60 #include "llvm/Transforms/Utils/Local.h" 61 #include "llvm/Transforms/Utils/ValueMapper.h" 62 #include <cassert> 63 64 using namespace llvm; 65 66 #define DEBUG_TYPE "attributor" 67 68 static cl::opt<bool> ManifestInternal( 69 "attributor-manifest-internal", cl::Hidden, 70 cl::desc("Manifest Attributor internal string attributes."), 71 cl::init(false)); 72 73 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128), 74 cl::Hidden); 75 76 template <> 77 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0; 78 79 template <> unsigned llvm::PotentialLLVMValuesState::MaxPotentialValues = -1; 80 81 static cl::opt<unsigned, true> MaxPotentialValues( 82 "attributor-max-potential-values", cl::Hidden, 83 cl::desc("Maximum number of potential values to be " 84 "tracked for each position."), 85 cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues), 86 cl::init(7)); 87 88 static cl::opt<int> MaxPotentialValuesIterations( 89 "attributor-max-potential-values-iterations", cl::Hidden, 90 cl::desc( 91 "Maximum number of iterations we keep dismantling potential values."), 92 cl::init(64)); 93 94 static cl::opt<unsigned> MaxInterferingAccesses( 95 "attributor-max-interfering-accesses", cl::Hidden, 96 cl::desc("Maximum number of interfering accesses to " 97 "check before assuming all might interfere."), 98 cl::init(6)); 99 100 STATISTIC(NumAAs, "Number of abstract attributes created"); 101 102 // Some helper macros to deal with statistics tracking. 103 // 104 // Usage: 105 // For simple IR attribute tracking overload trackStatistics in the abstract 106 // attribute and choose the right STATS_DECLTRACK_********* macro, 107 // e.g.,: 108 // void trackStatistics() const override { 109 // STATS_DECLTRACK_ARG_ATTR(returned) 110 // } 111 // If there is a single "increment" side one can use the macro 112 // STATS_DECLTRACK with a custom message. If there are multiple increment 113 // sides, STATS_DECL and STATS_TRACK can also be used separately. 114 // 115 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \ 116 ("Number of " #TYPE " marked '" #NAME "'") 117 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME 118 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG); 119 #define STATS_DECL(NAME, TYPE, MSG) \ 120 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG); 121 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE)); 122 #define STATS_DECLTRACK(NAME, TYPE, MSG) \ 123 { \ 124 STATS_DECL(NAME, TYPE, MSG) \ 125 STATS_TRACK(NAME, TYPE) \ 126 } 127 #define STATS_DECLTRACK_ARG_ATTR(NAME) \ 128 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME)) 129 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \ 130 STATS_DECLTRACK(NAME, CSArguments, \ 131 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME)) 132 #define STATS_DECLTRACK_FN_ATTR(NAME) \ 133 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME)) 134 #define STATS_DECLTRACK_CS_ATTR(NAME) \ 135 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME)) 136 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \ 137 STATS_DECLTRACK(NAME, FunctionReturn, \ 138 BUILD_STAT_MSG_IR_ATTR(function returns, NAME)) 139 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \ 140 STATS_DECLTRACK(NAME, CSReturn, \ 141 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME)) 142 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \ 143 STATS_DECLTRACK(NAME, Floating, \ 144 ("Number of floating values known to be '" #NAME "'")) 145 146 // Specialization of the operator<< for abstract attributes subclasses. This 147 // disambiguates situations where multiple operators are applicable. 148 namespace llvm { 149 #define PIPE_OPERATOR(CLASS) \ 150 raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) { \ 151 return OS << static_cast<const AbstractAttribute &>(AA); \ 152 } 153 154 PIPE_OPERATOR(AAIsDead) 155 PIPE_OPERATOR(AANoUnwind) 156 PIPE_OPERATOR(AANoSync) 157 PIPE_OPERATOR(AANoRecurse) 158 PIPE_OPERATOR(AAWillReturn) 159 PIPE_OPERATOR(AANoReturn) 160 PIPE_OPERATOR(AAReturnedValues) 161 PIPE_OPERATOR(AANonNull) 162 PIPE_OPERATOR(AANoAlias) 163 PIPE_OPERATOR(AADereferenceable) 164 PIPE_OPERATOR(AAAlign) 165 PIPE_OPERATOR(AAInstanceInfo) 166 PIPE_OPERATOR(AANoCapture) 167 PIPE_OPERATOR(AAValueSimplify) 168 PIPE_OPERATOR(AANoFree) 169 PIPE_OPERATOR(AAHeapToStack) 170 PIPE_OPERATOR(AAReachability) 171 PIPE_OPERATOR(AAMemoryBehavior) 172 PIPE_OPERATOR(AAMemoryLocation) 173 PIPE_OPERATOR(AAValueConstantRange) 174 PIPE_OPERATOR(AAPrivatizablePtr) 175 PIPE_OPERATOR(AAUndefinedBehavior) 176 PIPE_OPERATOR(AAPotentialConstantValues) 177 PIPE_OPERATOR(AAPotentialValues) 178 PIPE_OPERATOR(AANoUndef) 179 PIPE_OPERATOR(AACallEdges) 180 PIPE_OPERATOR(AAFunctionReachability) 181 PIPE_OPERATOR(AAPointerInfo) 182 PIPE_OPERATOR(AAAssumptionInfo) 183 184 #undef PIPE_OPERATOR 185 186 template <> 187 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S, 188 const DerefState &R) { 189 ChangeStatus CS0 = 190 clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState); 191 ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState); 192 return CS0 | CS1; 193 } 194 195 } // namespace llvm 196 197 /// Checks if a type could have padding bytes. 198 static bool isDenselyPacked(Type *Ty, const DataLayout &DL) { 199 // There is no size information, so be conservative. 200 if (!Ty->isSized()) 201 return false; 202 203 // If the alloc size is not equal to the storage size, then there are padding 204 // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128. 205 if (DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty)) 206 return false; 207 208 // FIXME: This isn't the right way to check for padding in vectors with 209 // non-byte-size elements. 210 if (VectorType *SeqTy = dyn_cast<VectorType>(Ty)) 211 return isDenselyPacked(SeqTy->getElementType(), DL); 212 213 // For array types, check for padding within members. 214 if (ArrayType *SeqTy = dyn_cast<ArrayType>(Ty)) 215 return isDenselyPacked(SeqTy->getElementType(), DL); 216 217 if (!isa<StructType>(Ty)) 218 return true; 219 220 // Check for padding within and between elements of a struct. 221 StructType *StructTy = cast<StructType>(Ty); 222 const StructLayout *Layout = DL.getStructLayout(StructTy); 223 uint64_t StartPos = 0; 224 for (unsigned I = 0, E = StructTy->getNumElements(); I < E; ++I) { 225 Type *ElTy = StructTy->getElementType(I); 226 if (!isDenselyPacked(ElTy, DL)) 227 return false; 228 if (StartPos != Layout->getElementOffsetInBits(I)) 229 return false; 230 StartPos += DL.getTypeAllocSizeInBits(ElTy); 231 } 232 233 return true; 234 } 235 236 /// Get pointer operand of memory accessing instruction. If \p I is 237 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile, 238 /// is set to false and the instruction is volatile, return nullptr. 239 static const Value *getPointerOperand(const Instruction *I, 240 bool AllowVolatile) { 241 if (!AllowVolatile && I->isVolatile()) 242 return nullptr; 243 244 if (auto *LI = dyn_cast<LoadInst>(I)) { 245 return LI->getPointerOperand(); 246 } 247 248 if (auto *SI = dyn_cast<StoreInst>(I)) { 249 return SI->getPointerOperand(); 250 } 251 252 if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) { 253 return CXI->getPointerOperand(); 254 } 255 256 if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) { 257 return RMWI->getPointerOperand(); 258 } 259 260 return nullptr; 261 } 262 263 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and 264 /// advanced by \p Offset bytes. To aid later analysis the method tries to build 265 /// getelement pointer instructions that traverse the natural type of \p Ptr if 266 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence 267 /// through a cast to i8*. 268 /// 269 /// TODO: This could probably live somewhere more prominantly if it doesn't 270 /// already exist. 271 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr, 272 int64_t Offset, IRBuilder<NoFolder> &IRB, 273 const DataLayout &DL) { 274 assert(Offset >= 0 && "Negative offset not supported yet!"); 275 LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset 276 << "-bytes as " << *ResTy << "\n"); 277 278 if (Offset) { 279 Type *Ty = PtrElemTy; 280 APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset); 281 SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset); 282 283 SmallVector<Value *, 4> ValIndices; 284 std::string GEPName = Ptr->getName().str(); 285 for (const APInt &Index : IntIndices) { 286 ValIndices.push_back(IRB.getInt(Index)); 287 GEPName += "." + std::to_string(Index.getZExtValue()); 288 } 289 290 // Create a GEP for the indices collected above. 291 Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName); 292 293 // If an offset is left we use byte-wise adjustment. 294 if (IntOffset != 0) { 295 Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy()); 296 Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset), 297 GEPName + ".b" + Twine(IntOffset.getZExtValue())); 298 } 299 } 300 301 // Ensure the result has the requested type. 302 Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy, 303 Ptr->getName() + ".cast"); 304 305 LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n"); 306 return Ptr; 307 } 308 309 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr, 310 SmallSetVector<Value *, 8> &Objects, 311 const AbstractAttribute &QueryingAA, 312 const Instruction *CtxI, 313 bool &UsedAssumedInformation, 314 AA::ValueScope S, 315 SmallPtrSetImpl<Value *> *SeenObjects) { 316 SmallPtrSet<Value *, 8> LocalSeenObjects; 317 if (!SeenObjects) 318 SeenObjects = &LocalSeenObjects; 319 320 SmallVector<AA::ValueAndContext> Values; 321 if (!A.getAssumedSimplifiedValues(IRPosition::value(Ptr), &QueryingAA, Values, 322 S, UsedAssumedInformation)) { 323 Objects.insert(const_cast<Value *>(&Ptr)); 324 return true; 325 } 326 327 for (auto &VAC : Values) { 328 Value *UO = getUnderlyingObject(VAC.getValue()); 329 if (UO && UO != VAC.getValue() && SeenObjects->insert(UO).second) { 330 if (!getAssumedUnderlyingObjects(A, *UO, Objects, QueryingAA, 331 VAC.getCtxI(), UsedAssumedInformation, S, 332 SeenObjects)) 333 return false; 334 continue; 335 } 336 Objects.insert(VAC.getValue()); 337 } 338 return true; 339 } 340 341 static const Value * 342 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA, 343 const Value *Val, const DataLayout &DL, APInt &Offset, 344 bool GetMinOffset, bool AllowNonInbounds, 345 bool UseAssumed = false) { 346 347 auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool { 348 const IRPosition &Pos = IRPosition::value(V); 349 // Only track dependence if we are going to use the assumed info. 350 const AAValueConstantRange &ValueConstantRangeAA = 351 A.getAAFor<AAValueConstantRange>(QueryingAA, Pos, 352 UseAssumed ? DepClassTy::OPTIONAL 353 : DepClassTy::NONE); 354 ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed() 355 : ValueConstantRangeAA.getKnown(); 356 if (Range.isFullSet()) 357 return false; 358 359 // We can only use the lower part of the range because the upper part can 360 // be higher than what the value can really be. 361 if (GetMinOffset) 362 ROffset = Range.getSignedMin(); 363 else 364 ROffset = Range.getSignedMax(); 365 return true; 366 }; 367 368 return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds, 369 /* AllowInvariant */ true, 370 AttributorAnalysis); 371 } 372 373 static const Value * 374 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA, 375 const Value *Ptr, int64_t &BytesOffset, 376 const DataLayout &DL, bool AllowNonInbounds = false) { 377 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 378 const Value *Base = 379 stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt, 380 /* GetMinOffset */ true, AllowNonInbounds); 381 382 BytesOffset = OffsetAPInt.getSExtValue(); 383 return Base; 384 } 385 386 /// Clamp the information known for all returned values of a function 387 /// (identified by \p QueryingAA) into \p S. 388 template <typename AAType, typename StateType = typename AAType::StateType> 389 static void clampReturnedValueStates( 390 Attributor &A, const AAType &QueryingAA, StateType &S, 391 const IRPosition::CallBaseContext *CBContext = nullptr) { 392 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for " 393 << QueryingAA << " into " << S << "\n"); 394 395 assert((QueryingAA.getIRPosition().getPositionKind() == 396 IRPosition::IRP_RETURNED || 397 QueryingAA.getIRPosition().getPositionKind() == 398 IRPosition::IRP_CALL_SITE_RETURNED) && 399 "Can only clamp returned value states for a function returned or call " 400 "site returned position!"); 401 402 // Use an optional state as there might not be any return values and we want 403 // to join (IntegerState::operator&) the state of all there are. 404 Optional<StateType> T; 405 406 // Callback for each possibly returned value. 407 auto CheckReturnValue = [&](Value &RV) -> bool { 408 const IRPosition &RVPos = IRPosition::value(RV, CBContext); 409 const AAType &AA = 410 A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED); 411 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr() 412 << " @ " << RVPos << "\n"); 413 const StateType &AAS = AA.getState(); 414 if (!T) 415 T = StateType::getBestState(AAS); 416 *T &= AAS; 417 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T 418 << "\n"); 419 return T->isValidState(); 420 }; 421 422 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA)) 423 S.indicatePessimisticFixpoint(); 424 else if (T) 425 S ^= *T; 426 } 427 428 namespace { 429 /// Helper class for generic deduction: return value -> returned position. 430 template <typename AAType, typename BaseType, 431 typename StateType = typename BaseType::StateType, 432 bool PropagateCallBaseContext = false> 433 struct AAReturnedFromReturnedValues : public BaseType { 434 AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A) 435 : BaseType(IRP, A) {} 436 437 /// See AbstractAttribute::updateImpl(...). 438 ChangeStatus updateImpl(Attributor &A) override { 439 StateType S(StateType::getBestState(this->getState())); 440 clampReturnedValueStates<AAType, StateType>( 441 A, *this, S, 442 PropagateCallBaseContext ? this->getCallBaseContext() : nullptr); 443 // TODO: If we know we visited all returned values, thus no are assumed 444 // dead, we can take the known information from the state T. 445 return clampStateAndIndicateChange<StateType>(this->getState(), S); 446 } 447 }; 448 449 /// Clamp the information known at all call sites for a given argument 450 /// (identified by \p QueryingAA) into \p S. 451 template <typename AAType, typename StateType = typename AAType::StateType> 452 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA, 453 StateType &S) { 454 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for " 455 << QueryingAA << " into " << S << "\n"); 456 457 assert(QueryingAA.getIRPosition().getPositionKind() == 458 IRPosition::IRP_ARGUMENT && 459 "Can only clamp call site argument states for an argument position!"); 460 461 // Use an optional state as there might not be any return values and we want 462 // to join (IntegerState::operator&) the state of all there are. 463 Optional<StateType> T; 464 465 // The argument number which is also the call site argument number. 466 unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo(); 467 468 auto CallSiteCheck = [&](AbstractCallSite ACS) { 469 const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 470 // Check if a coresponding argument was found or if it is on not associated 471 // (which can happen for callback calls). 472 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 473 return false; 474 475 const AAType &AA = 476 A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED); 477 LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction() 478 << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n"); 479 const StateType &AAS = AA.getState(); 480 if (!T) 481 T = StateType::getBestState(AAS); 482 *T &= AAS; 483 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T 484 << "\n"); 485 return T->isValidState(); 486 }; 487 488 bool UsedAssumedInformation = false; 489 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true, 490 UsedAssumedInformation)) 491 S.indicatePessimisticFixpoint(); 492 else if (T) 493 S ^= *T; 494 } 495 496 /// This function is the bridge between argument position and the call base 497 /// context. 498 template <typename AAType, typename BaseType, 499 typename StateType = typename AAType::StateType> 500 bool getArgumentStateFromCallBaseContext(Attributor &A, 501 BaseType &QueryingAttribute, 502 IRPosition &Pos, StateType &State) { 503 assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) && 504 "Expected an 'argument' position !"); 505 const CallBase *CBContext = Pos.getCallBaseContext(); 506 if (!CBContext) 507 return false; 508 509 int ArgNo = Pos.getCallSiteArgNo(); 510 assert(ArgNo >= 0 && "Invalid Arg No!"); 511 512 const auto &AA = A.getAAFor<AAType>( 513 QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo), 514 DepClassTy::REQUIRED); 515 const StateType &CBArgumentState = 516 static_cast<const StateType &>(AA.getState()); 517 518 LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument" 519 << "Position:" << Pos << "CB Arg state:" << CBArgumentState 520 << "\n"); 521 522 // NOTE: If we want to do call site grouping it should happen here. 523 State ^= CBArgumentState; 524 return true; 525 } 526 527 /// Helper class for generic deduction: call site argument -> argument position. 528 template <typename AAType, typename BaseType, 529 typename StateType = typename AAType::StateType, 530 bool BridgeCallBaseContext = false> 531 struct AAArgumentFromCallSiteArguments : public BaseType { 532 AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A) 533 : BaseType(IRP, A) {} 534 535 /// See AbstractAttribute::updateImpl(...). 536 ChangeStatus updateImpl(Attributor &A) override { 537 StateType S = StateType::getBestState(this->getState()); 538 539 if (BridgeCallBaseContext) { 540 bool Success = 541 getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>( 542 A, *this, this->getIRPosition(), S); 543 if (Success) 544 return clampStateAndIndicateChange<StateType>(this->getState(), S); 545 } 546 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S); 547 548 // TODO: If we know we visited all incoming values, thus no are assumed 549 // dead, we can take the known information from the state T. 550 return clampStateAndIndicateChange<StateType>(this->getState(), S); 551 } 552 }; 553 554 /// Helper class for generic replication: function returned -> cs returned. 555 template <typename AAType, typename BaseType, 556 typename StateType = typename BaseType::StateType, 557 bool IntroduceCallBaseContext = false> 558 struct AACallSiteReturnedFromReturned : public BaseType { 559 AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A) 560 : BaseType(IRP, A) {} 561 562 /// See AbstractAttribute::updateImpl(...). 563 ChangeStatus updateImpl(Attributor &A) override { 564 assert(this->getIRPosition().getPositionKind() == 565 IRPosition::IRP_CALL_SITE_RETURNED && 566 "Can only wrap function returned positions for call site returned " 567 "positions!"); 568 auto &S = this->getState(); 569 570 const Function *AssociatedFunction = 571 this->getIRPosition().getAssociatedFunction(); 572 if (!AssociatedFunction) 573 return S.indicatePessimisticFixpoint(); 574 575 CallBase &CBContext = cast<CallBase>(this->getAnchorValue()); 576 if (IntroduceCallBaseContext) 577 LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:" 578 << CBContext << "\n"); 579 580 IRPosition FnPos = IRPosition::returned( 581 *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr); 582 const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED); 583 return clampStateAndIndicateChange(S, AA.getState()); 584 } 585 }; 586 587 /// Helper function to accumulate uses. 588 template <class AAType, typename StateType = typename AAType::StateType> 589 static void followUsesInContext(AAType &AA, Attributor &A, 590 MustBeExecutedContextExplorer &Explorer, 591 const Instruction *CtxI, 592 SetVector<const Use *> &Uses, 593 StateType &State) { 594 auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI); 595 for (unsigned u = 0; u < Uses.size(); ++u) { 596 const Use *U = Uses[u]; 597 if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) { 598 bool Found = Explorer.findInContextOf(UserI, EIt, EEnd); 599 if (Found && AA.followUseInMBEC(A, U, UserI, State)) 600 for (const Use &Us : UserI->uses()) 601 Uses.insert(&Us); 602 } 603 } 604 } 605 606 /// Use the must-be-executed-context around \p I to add information into \p S. 607 /// The AAType class is required to have `followUseInMBEC` method with the 608 /// following signature and behaviour: 609 /// 610 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I) 611 /// U - Underlying use. 612 /// I - The user of the \p U. 613 /// Returns true if the value should be tracked transitively. 614 /// 615 template <class AAType, typename StateType = typename AAType::StateType> 616 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S, 617 Instruction &CtxI) { 618 619 // Container for (transitive) uses of the associated value. 620 SetVector<const Use *> Uses; 621 for (const Use &U : AA.getIRPosition().getAssociatedValue().uses()) 622 Uses.insert(&U); 623 624 MustBeExecutedContextExplorer &Explorer = 625 A.getInfoCache().getMustBeExecutedContextExplorer(); 626 627 followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S); 628 629 if (S.isAtFixpoint()) 630 return; 631 632 SmallVector<const BranchInst *, 4> BrInsts; 633 auto Pred = [&](const Instruction *I) { 634 if (const BranchInst *Br = dyn_cast<BranchInst>(I)) 635 if (Br->isConditional()) 636 BrInsts.push_back(Br); 637 return true; 638 }; 639 640 // Here, accumulate conditional branch instructions in the context. We 641 // explore the child paths and collect the known states. The disjunction of 642 // those states can be merged to its own state. Let ParentState_i be a state 643 // to indicate the known information for an i-th branch instruction in the 644 // context. ChildStates are created for its successors respectively. 645 // 646 // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1} 647 // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2} 648 // ... 649 // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m} 650 // 651 // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m 652 // 653 // FIXME: Currently, recursive branches are not handled. For example, we 654 // can't deduce that ptr must be dereferenced in below function. 655 // 656 // void f(int a, int c, int *ptr) { 657 // if(a) 658 // if (b) { 659 // *ptr = 0; 660 // } else { 661 // *ptr = 1; 662 // } 663 // else { 664 // if (b) { 665 // *ptr = 0; 666 // } else { 667 // *ptr = 1; 668 // } 669 // } 670 // } 671 672 Explorer.checkForAllContext(&CtxI, Pred); 673 for (const BranchInst *Br : BrInsts) { 674 StateType ParentState; 675 676 // The known state of the parent state is a conjunction of children's 677 // known states so it is initialized with a best state. 678 ParentState.indicateOptimisticFixpoint(); 679 680 for (const BasicBlock *BB : Br->successors()) { 681 StateType ChildState; 682 683 size_t BeforeSize = Uses.size(); 684 followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState); 685 686 // Erase uses which only appear in the child. 687 for (auto It = Uses.begin() + BeforeSize; It != Uses.end();) 688 It = Uses.erase(It); 689 690 ParentState &= ChildState; 691 } 692 693 // Use only known state. 694 S += ParentState; 695 } 696 } 697 } // namespace 698 699 /// ------------------------ PointerInfo --------------------------------------- 700 701 namespace llvm { 702 namespace AA { 703 namespace PointerInfo { 704 705 struct State; 706 707 } // namespace PointerInfo 708 } // namespace AA 709 710 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage. 711 template <> 712 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> { 713 using Access = AAPointerInfo::Access; 714 static inline Access getEmptyKey(); 715 static inline Access getTombstoneKey(); 716 static unsigned getHashValue(const Access &A); 717 static bool isEqual(const Access &LHS, const Access &RHS); 718 }; 719 720 /// Helper that allows OffsetAndSize as a key in a DenseMap. 721 template <> 722 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize> 723 : DenseMapInfo<std::pair<int64_t, int64_t>> {}; 724 725 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign 726 /// but the instruction 727 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> { 728 using Base = DenseMapInfo<Instruction *>; 729 using Access = AAPointerInfo::Access; 730 static inline Access getEmptyKey(); 731 static inline Access getTombstoneKey(); 732 static unsigned getHashValue(const Access &A); 733 static bool isEqual(const Access &LHS, const Access &RHS); 734 }; 735 736 } // namespace llvm 737 738 /// A type to track pointer/struct usage and accesses for AAPointerInfo. 739 struct AA::PointerInfo::State : public AbstractState { 740 741 ~State() { 742 // We do not delete the Accesses objects but need to destroy them still. 743 for (auto &It : AccessBins) 744 It.second->~Accesses(); 745 } 746 747 /// Return the best possible representable state. 748 static State getBestState(const State &SIS) { return State(); } 749 750 /// Return the worst possible representable state. 751 static State getWorstState(const State &SIS) { 752 State R; 753 R.indicatePessimisticFixpoint(); 754 return R; 755 } 756 757 State() = default; 758 State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) { 759 SIS.AccessBins.clear(); 760 } 761 762 const State &getAssumed() const { return *this; } 763 764 /// See AbstractState::isValidState(). 765 bool isValidState() const override { return BS.isValidState(); } 766 767 /// See AbstractState::isAtFixpoint(). 768 bool isAtFixpoint() const override { return BS.isAtFixpoint(); } 769 770 /// See AbstractState::indicateOptimisticFixpoint(). 771 ChangeStatus indicateOptimisticFixpoint() override { 772 BS.indicateOptimisticFixpoint(); 773 return ChangeStatus::UNCHANGED; 774 } 775 776 /// See AbstractState::indicatePessimisticFixpoint(). 777 ChangeStatus indicatePessimisticFixpoint() override { 778 BS.indicatePessimisticFixpoint(); 779 return ChangeStatus::CHANGED; 780 } 781 782 State &operator=(const State &R) { 783 if (this == &R) 784 return *this; 785 BS = R.BS; 786 AccessBins = R.AccessBins; 787 return *this; 788 } 789 790 State &operator=(State &&R) { 791 if (this == &R) 792 return *this; 793 std::swap(BS, R.BS); 794 std::swap(AccessBins, R.AccessBins); 795 return *this; 796 } 797 798 bool operator==(const State &R) const { 799 if (BS != R.BS) 800 return false; 801 if (AccessBins.size() != R.AccessBins.size()) 802 return false; 803 auto It = begin(), RIt = R.begin(), E = end(); 804 while (It != E) { 805 if (It->getFirst() != RIt->getFirst()) 806 return false; 807 auto &Accs = It->getSecond(); 808 auto &RAccs = RIt->getSecond(); 809 if (Accs->size() != RAccs->size()) 810 return false; 811 for (const auto &ZipIt : llvm::zip(*Accs, *RAccs)) 812 if (std::get<0>(ZipIt) != std::get<1>(ZipIt)) 813 return false; 814 ++It; 815 ++RIt; 816 } 817 return true; 818 } 819 bool operator!=(const State &R) const { return !(*this == R); } 820 821 /// We store accesses in a set with the instruction as key. 822 struct Accesses { 823 SmallVector<AAPointerInfo::Access, 4> Accesses; 824 DenseMap<const Instruction *, unsigned> Map; 825 826 unsigned size() const { return Accesses.size(); } 827 828 using vec_iterator = decltype(Accesses)::iterator; 829 vec_iterator begin() { return Accesses.begin(); } 830 vec_iterator end() { return Accesses.end(); } 831 832 using iterator = decltype(Map)::const_iterator; 833 iterator find(AAPointerInfo::Access &Acc) { 834 return Map.find(Acc.getRemoteInst()); 835 } 836 iterator find_end() { return Map.end(); } 837 838 AAPointerInfo::Access &get(iterator &It) { 839 return Accesses[It->getSecond()]; 840 } 841 842 void insert(AAPointerInfo::Access &Acc) { 843 Map[Acc.getRemoteInst()] = Accesses.size(); 844 Accesses.push_back(Acc); 845 } 846 }; 847 848 /// We store all accesses in bins denoted by their offset and size. 849 using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>; 850 851 AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); } 852 AccessBinsTy::const_iterator end() const { return AccessBins.end(); } 853 854 protected: 855 /// The bins with all the accesses for the associated pointer. 856 AccessBinsTy AccessBins; 857 858 /// Add a new access to the state at offset \p Offset and with size \p Size. 859 /// The access is associated with \p I, writes \p Content (if anything), and 860 /// is of kind \p Kind. 861 /// \Returns CHANGED, if the state changed, UNCHANGED otherwise. 862 ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size, 863 Instruction &I, Optional<Value *> Content, 864 AAPointerInfo::AccessKind Kind, Type *Ty, 865 Instruction *RemoteI = nullptr, 866 Accesses *BinPtr = nullptr) { 867 AAPointerInfo::OffsetAndSize Key{Offset, Size}; 868 Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key]; 869 if (!Bin) 870 Bin = new (A.Allocator) Accesses; 871 AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty); 872 // Check if we have an access for this instruction in this bin, if not, 873 // simply add it. 874 auto It = Bin->find(Acc); 875 if (It == Bin->find_end()) { 876 Bin->insert(Acc); 877 return ChangeStatus::CHANGED; 878 } 879 // If the existing access is the same as then new one, nothing changed. 880 AAPointerInfo::Access &Current = Bin->get(It); 881 AAPointerInfo::Access Before = Current; 882 // The new one will be combined with the existing one. 883 Current &= Acc; 884 return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED; 885 } 886 887 /// See AAPointerInfo::forallInterferingAccesses. 888 bool forallInterferingAccesses( 889 AAPointerInfo::OffsetAndSize OAS, 890 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { 891 if (!isValidState()) 892 return false; 893 894 for (auto &It : AccessBins) { 895 AAPointerInfo::OffsetAndSize ItOAS = It.getFirst(); 896 if (!OAS.mayOverlap(ItOAS)) 897 continue; 898 bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown(); 899 for (auto &Access : *It.getSecond()) 900 if (!CB(Access, IsExact)) 901 return false; 902 } 903 return true; 904 } 905 906 /// See AAPointerInfo::forallInterferingAccesses. 907 bool forallInterferingAccesses( 908 Instruction &I, 909 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const { 910 if (!isValidState()) 911 return false; 912 913 // First find the offset and size of I. 914 AAPointerInfo::OffsetAndSize OAS(-1, -1); 915 for (auto &It : AccessBins) { 916 for (auto &Access : *It.getSecond()) { 917 if (Access.getRemoteInst() == &I) { 918 OAS = It.getFirst(); 919 break; 920 } 921 } 922 if (OAS.getSize() != -1) 923 break; 924 } 925 // No access for I was found, we are done. 926 if (OAS.getSize() == -1) 927 return true; 928 929 // Now that we have an offset and size, find all overlapping ones and use 930 // the callback on the accesses. 931 return forallInterferingAccesses(OAS, CB); 932 } 933 934 private: 935 /// State to track fixpoint and validity. 936 BooleanState BS; 937 }; 938 939 namespace { 940 struct AAPointerInfoImpl 941 : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> { 942 using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>; 943 AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {} 944 945 /// See AbstractAttribute::initialize(...). 946 void initialize(Attributor &A) override { AAPointerInfo::initialize(A); } 947 948 /// See AbstractAttribute::getAsStr(). 949 const std::string getAsStr() const override { 950 return std::string("PointerInfo ") + 951 (isValidState() ? (std::string("#") + 952 std::to_string(AccessBins.size()) + " bins") 953 : "<invalid>"); 954 } 955 956 /// See AbstractAttribute::manifest(...). 957 ChangeStatus manifest(Attributor &A) override { 958 return AAPointerInfo::manifest(A); 959 } 960 961 bool forallInterferingAccesses( 962 OffsetAndSize OAS, 963 function_ref<bool(const AAPointerInfo::Access &, bool)> CB) 964 const override { 965 return State::forallInterferingAccesses(OAS, CB); 966 } 967 bool forallInterferingAccesses( 968 Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I, 969 function_ref<bool(const Access &, bool)> UserCB) const override { 970 SmallPtrSet<const Access *, 8> DominatingWrites; 971 SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses; 972 973 Function &Scope = *I.getFunction(); 974 const auto &NoSyncAA = A.getAAFor<AANoSync>( 975 QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL); 976 const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>( 977 IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL); 978 const bool NoSync = NoSyncAA.isAssumedNoSync(); 979 980 // Helper to determine if we need to consider threading, which we cannot 981 // right now. However, if the function is (assumed) nosync or the thread 982 // executing all instructions is the main thread only we can ignore 983 // threading. 984 auto CanIgnoreThreading = [&](const Instruction &I) -> bool { 985 if (NoSync) 986 return true; 987 if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I)) 988 return true; 989 return false; 990 }; 991 992 // Helper to determine if the access is executed by the same thread as the 993 // load, for now it is sufficient to avoid any potential threading effects 994 // as we cannot deal with them anyway. 995 auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool { 996 return CanIgnoreThreading(*Acc.getLocalInst()); 997 }; 998 999 // TODO: Use inter-procedural reachability and dominance. 1000 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1001 QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL); 1002 1003 const bool FindInterferingWrites = I.mayReadFromMemory(); 1004 const bool FindInterferingReads = I.mayWriteToMemory(); 1005 const bool UseDominanceReasoning = FindInterferingWrites; 1006 const bool CanUseCFGResoning = CanIgnoreThreading(I); 1007 InformationCache &InfoCache = A.getInfoCache(); 1008 const DominatorTree *DT = 1009 NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning 1010 ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( 1011 Scope) 1012 : nullptr; 1013 1014 enum GPUAddressSpace : unsigned { 1015 Generic = 0, 1016 Global = 1, 1017 Shared = 3, 1018 Constant = 4, 1019 Local = 5, 1020 }; 1021 1022 // Helper to check if a value has "kernel lifetime", that is it will not 1023 // outlive a GPU kernel. This is true for shared, constant, and local 1024 // globals on AMD and NVIDIA GPUs. 1025 auto HasKernelLifetime = [&](Value *V, Module &M) { 1026 Triple T(M.getTargetTriple()); 1027 if (!(T.isAMDGPU() || T.isNVPTX())) 1028 return false; 1029 switch (V->getType()->getPointerAddressSpace()) { 1030 case GPUAddressSpace::Shared: 1031 case GPUAddressSpace::Constant: 1032 case GPUAddressSpace::Local: 1033 return true; 1034 default: 1035 return false; 1036 }; 1037 }; 1038 1039 // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query 1040 // to determine if we should look at reachability from the callee. For 1041 // certain pointers we know the lifetime and we do not have to step into the 1042 // callee to determine reachability as the pointer would be dead in the 1043 // callee. See the conditional initialization below. 1044 std::function<bool(const Function &)> IsLiveInCalleeCB; 1045 1046 if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) { 1047 // If the alloca containing function is not recursive the alloca 1048 // must be dead in the callee. 1049 const Function *AIFn = AI->getFunction(); 1050 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 1051 *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL); 1052 if (NoRecurseAA.isAssumedNoRecurse()) { 1053 IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; }; 1054 } 1055 } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) { 1056 // If the global has kernel lifetime we can stop if we reach a kernel 1057 // as it is "dead" in the (unknown) callees. 1058 if (HasKernelLifetime(GV, *GV->getParent())) 1059 IsLiveInCalleeCB = [](const Function &Fn) { 1060 return !Fn.hasFnAttribute("kernel"); 1061 }; 1062 } 1063 1064 auto AccessCB = [&](const Access &Acc, bool Exact) { 1065 if ((!FindInterferingWrites || !Acc.isWrite()) && 1066 (!FindInterferingReads || !Acc.isRead())) 1067 return true; 1068 1069 // For now we only filter accesses based on CFG reasoning which does not 1070 // work yet if we have threading effects, or the access is complicated. 1071 if (CanUseCFGResoning) { 1072 if ((!Acc.isWrite() || 1073 !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA, 1074 IsLiveInCalleeCB)) && 1075 (!Acc.isRead() || 1076 !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA, 1077 IsLiveInCalleeCB))) 1078 return true; 1079 if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) && 1080 IsSameThreadAsLoad(Acc)) { 1081 if (DT->dominates(Acc.getLocalInst(), &I)) 1082 DominatingWrites.insert(&Acc); 1083 } 1084 } 1085 1086 InterferingAccesses.push_back({&Acc, Exact}); 1087 return true; 1088 }; 1089 if (!State::forallInterferingAccesses(I, AccessCB)) 1090 return false; 1091 1092 // If we cannot use CFG reasoning we only filter the non-write accesses 1093 // and are done here. 1094 if (!CanUseCFGResoning) { 1095 for (auto &It : InterferingAccesses) 1096 if (!UserCB(*It.first, It.second)) 1097 return false; 1098 return true; 1099 } 1100 1101 // Helper to determine if we can skip a specific write access. This is in 1102 // the worst case quadratic as we are looking for another write that will 1103 // hide the effect of this one. 1104 auto CanSkipAccess = [&](const Access &Acc, bool Exact) { 1105 if (!IsSameThreadAsLoad(Acc)) 1106 return false; 1107 if (!DominatingWrites.count(&Acc)) 1108 return false; 1109 for (const Access *DomAcc : DominatingWrites) { 1110 assert(Acc.getLocalInst()->getFunction() == 1111 DomAcc->getLocalInst()->getFunction() && 1112 "Expected dominating writes to be in the same function!"); 1113 1114 if (DomAcc != &Acc && 1115 DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) { 1116 return true; 1117 } 1118 } 1119 return false; 1120 }; 1121 1122 // Run the user callback on all accesses we cannot skip and return if that 1123 // succeeded for all or not. 1124 unsigned NumInterferingAccesses = InterferingAccesses.size(); 1125 for (auto &It : InterferingAccesses) { 1126 if (!DT || NumInterferingAccesses > MaxInterferingAccesses || 1127 !CanSkipAccess(*It.first, It.second)) { 1128 if (!UserCB(*It.first, It.second)) 1129 return false; 1130 } 1131 } 1132 return true; 1133 } 1134 1135 ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA, 1136 int64_t Offset, CallBase &CB, 1137 bool FromCallee = false) { 1138 using namespace AA::PointerInfo; 1139 if (!OtherAA.getState().isValidState() || !isValidState()) 1140 return indicatePessimisticFixpoint(); 1141 1142 const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA); 1143 bool IsByval = 1144 FromCallee && OtherAAImpl.getAssociatedArgument()->hasByValAttr(); 1145 1146 // Combine the accesses bin by bin. 1147 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1148 for (auto &It : OtherAAImpl.getState()) { 1149 OffsetAndSize OAS = OffsetAndSize::getUnknown(); 1150 if (Offset != OffsetAndSize::Unknown) 1151 OAS = OffsetAndSize(It.first.getOffset() + Offset, It.first.getSize()); 1152 Accesses *Bin = AccessBins.lookup(OAS); 1153 for (const AAPointerInfo::Access &RAcc : *It.second) { 1154 if (IsByval && !RAcc.isRead()) 1155 continue; 1156 bool UsedAssumedInformation = false; 1157 AccessKind AK = RAcc.getKind(); 1158 Optional<Value *> Content = RAcc.getContent(); 1159 if (FromCallee) { 1160 Content = A.translateArgumentToCallSiteContent( 1161 RAcc.getContent(), CB, *this, UsedAssumedInformation); 1162 AK = AccessKind( 1163 AK & (IsByval ? AccessKind::AK_READ : AccessKind::AK_READ_WRITE)); 1164 } 1165 Changed = 1166 Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content, 1167 AK, RAcc.getType(), RAcc.getRemoteInst(), Bin); 1168 } 1169 } 1170 return Changed; 1171 } 1172 1173 /// Statistic tracking for all AAPointerInfo implementations. 1174 /// See AbstractAttribute::trackStatistics(). 1175 void trackPointerInfoStatistics(const IRPosition &IRP) const {} 1176 1177 /// Dump the state into \p O. 1178 void dumpState(raw_ostream &O) { 1179 for (auto &It : AccessBins) { 1180 O << "[" << It.first.getOffset() << "-" 1181 << It.first.getOffset() + It.first.getSize() 1182 << "] : " << It.getSecond()->size() << "\n"; 1183 for (auto &Acc : *It.getSecond()) { 1184 O << " - " << Acc.getKind() << " - " << *Acc.getLocalInst() << "\n"; 1185 if (Acc.getLocalInst() != Acc.getRemoteInst()) 1186 O << " --> " << *Acc.getRemoteInst() 1187 << "\n"; 1188 if (!Acc.isWrittenValueYetUndetermined()) { 1189 if (Acc.getWrittenValue()) 1190 O << " - c: " << *Acc.getWrittenValue() << "\n"; 1191 else 1192 O << " - c: <unknown>\n"; 1193 } 1194 } 1195 } 1196 } 1197 }; 1198 1199 struct AAPointerInfoFloating : public AAPointerInfoImpl { 1200 using AccessKind = AAPointerInfo::AccessKind; 1201 AAPointerInfoFloating(const IRPosition &IRP, Attributor &A) 1202 : AAPointerInfoImpl(IRP, A) {} 1203 1204 /// See AbstractAttribute::initialize(...). 1205 void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); } 1206 1207 /// Deal with an access and signal if it was handled successfully. 1208 bool handleAccess(Attributor &A, Instruction &I, Value &Ptr, 1209 Optional<Value *> Content, AccessKind Kind, int64_t Offset, 1210 ChangeStatus &Changed, Type *Ty, 1211 int64_t Size = OffsetAndSize::Unknown) { 1212 using namespace AA::PointerInfo; 1213 // No need to find a size if one is given or the offset is unknown. 1214 if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown && 1215 Ty) { 1216 const DataLayout &DL = A.getDataLayout(); 1217 TypeSize AccessSize = DL.getTypeStoreSize(Ty); 1218 if (!AccessSize.isScalable()) 1219 Size = AccessSize.getFixedSize(); 1220 } 1221 Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty); 1222 return true; 1223 }; 1224 1225 /// Helper struct, will support ranges eventually. 1226 struct OffsetInfo { 1227 int64_t Offset = OffsetAndSize::Unknown; 1228 1229 bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; } 1230 }; 1231 1232 /// See AbstractAttribute::updateImpl(...). 1233 ChangeStatus updateImpl(Attributor &A) override { 1234 using namespace AA::PointerInfo; 1235 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1236 Value &AssociatedValue = getAssociatedValue(); 1237 1238 const DataLayout &DL = A.getDataLayout(); 1239 DenseMap<Value *, OffsetInfo> OffsetInfoMap; 1240 OffsetInfoMap[&AssociatedValue] = OffsetInfo{0}; 1241 1242 auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI, 1243 bool &Follow) { 1244 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1245 UsrOI = PtrOI; 1246 Follow = true; 1247 return true; 1248 }; 1249 1250 const auto *TLI = getAnchorScope() 1251 ? A.getInfoCache().getTargetLibraryInfoForFunction( 1252 *getAnchorScope()) 1253 : nullptr; 1254 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 1255 Value *CurPtr = U.get(); 1256 User *Usr = U.getUser(); 1257 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " 1258 << *Usr << "\n"); 1259 assert(OffsetInfoMap.count(CurPtr) && 1260 "The current pointer offset should have been seeded!"); 1261 1262 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) { 1263 if (CE->isCast()) 1264 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); 1265 if (CE->isCompare()) 1266 return true; 1267 if (!isa<GEPOperator>(CE)) { 1268 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE 1269 << "\n"); 1270 return false; 1271 } 1272 } 1273 if (auto *GEP = dyn_cast<GEPOperator>(Usr)) { 1274 // Note the order here, the Usr access might change the map, CurPtr is 1275 // already in it though. 1276 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1277 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1278 UsrOI = PtrOI; 1279 1280 // TODO: Use range information. 1281 if (PtrOI.Offset == OffsetAndSize::Unknown || 1282 !GEP->hasAllConstantIndices()) { 1283 UsrOI.Offset = OffsetAndSize::Unknown; 1284 Follow = true; 1285 return true; 1286 } 1287 1288 SmallVector<Value *, 8> Indices; 1289 for (Use &Idx : GEP->indices()) { 1290 if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) { 1291 Indices.push_back(CIdx); 1292 continue; 1293 } 1294 1295 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP 1296 << " : " << *Idx << "\n"); 1297 return false; 1298 } 1299 UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType( 1300 GEP->getSourceElementType(), Indices); 1301 Follow = true; 1302 return true; 1303 } 1304 if (isa<CastInst>(Usr) || isa<SelectInst>(Usr) || isa<ReturnInst>(Usr)) 1305 return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow); 1306 1307 // For PHIs we need to take care of the recurrence explicitly as the value 1308 // might change while we iterate through a loop. For now, we give up if 1309 // the PHI is not invariant. 1310 if (isa<PHINode>(Usr)) { 1311 // Note the order here, the Usr access might change the map, CurPtr is 1312 // already in it though. 1313 OffsetInfo &UsrOI = OffsetInfoMap[Usr]; 1314 OffsetInfo &PtrOI = OffsetInfoMap[CurPtr]; 1315 // Check if the PHI is invariant (so far). 1316 if (UsrOI == PtrOI) 1317 return true; 1318 1319 // Check if the PHI operand has already an unknown offset as we can't 1320 // improve on that anymore. 1321 if (PtrOI.Offset == OffsetAndSize::Unknown) { 1322 UsrOI = PtrOI; 1323 Follow = true; 1324 return true; 1325 } 1326 1327 // Check if the PHI operand is not dependent on the PHI itself. 1328 // TODO: This is not great as we look at the pointer type. However, it 1329 // is unclear where the Offset size comes from with typeless pointers. 1330 APInt Offset( 1331 DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()), 1332 0); 1333 if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets( 1334 DL, Offset, /* AllowNonInbounds */ true)) { 1335 if (Offset != PtrOI.Offset) { 1336 LLVM_DEBUG(dbgs() 1337 << "[AAPointerInfo] PHI operand pointer offset mismatch " 1338 << *CurPtr << " in " << *Usr << "\n"); 1339 return false; 1340 } 1341 return HandlePassthroughUser(Usr, PtrOI, Follow); 1342 } 1343 1344 // TODO: Approximate in case we know the direction of the recurrence. 1345 LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex " 1346 << *CurPtr << " in " << *Usr << "\n"); 1347 UsrOI = PtrOI; 1348 UsrOI.Offset = OffsetAndSize::Unknown; 1349 Follow = true; 1350 return true; 1351 } 1352 1353 if (auto *LoadI = dyn_cast<LoadInst>(Usr)) 1354 return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr, 1355 AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset, 1356 Changed, LoadI->getType()); 1357 if (auto *StoreI = dyn_cast<StoreInst>(Usr)) { 1358 if (StoreI->getValueOperand() == CurPtr) { 1359 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store " 1360 << *StoreI << "\n"); 1361 return false; 1362 } 1363 bool UsedAssumedInformation = false; 1364 Optional<Value *> Content = 1365 A.getAssumedSimplified(*StoreI->getValueOperand(), *this, 1366 UsedAssumedInformation, AA::Interprocedural); 1367 return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE, 1368 OffsetInfoMap[CurPtr].Offset, Changed, 1369 StoreI->getValueOperand()->getType()); 1370 } 1371 if (auto *CB = dyn_cast<CallBase>(Usr)) { 1372 if (CB->isLifetimeStartOrEnd()) 1373 return true; 1374 if (TLI && isFreeCall(CB, TLI)) 1375 return true; 1376 if (CB->isArgOperand(&U)) { 1377 unsigned ArgNo = CB->getArgOperandNo(&U); 1378 const auto &CSArgPI = A.getAAFor<AAPointerInfo>( 1379 *this, IRPosition::callsite_argument(*CB, ArgNo), 1380 DepClassTy::REQUIRED); 1381 Changed = translateAndAddState(A, CSArgPI, 1382 OffsetInfoMap[CurPtr].Offset, *CB) | 1383 Changed; 1384 return true; 1385 } 1386 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB 1387 << "\n"); 1388 // TODO: Allow some call uses 1389 return false; 1390 } 1391 1392 LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n"); 1393 return false; 1394 }; 1395 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { 1396 if (OffsetInfoMap.count(NewU)) { 1397 LLVM_DEBUG({ 1398 if (!(OffsetInfoMap[NewU] == OffsetInfoMap[OldU])) { 1399 dbgs() << "[AAPointerInfo] Equivalent use callback failed: " 1400 << OffsetInfoMap[NewU].Offset << " vs " 1401 << OffsetInfoMap[OldU].Offset << "\n"; 1402 } 1403 }); 1404 return OffsetInfoMap[NewU] == OffsetInfoMap[OldU]; 1405 } 1406 OffsetInfoMap[NewU] = OffsetInfoMap[OldU]; 1407 return true; 1408 }; 1409 if (!A.checkForAllUses(UsePred, *this, AssociatedValue, 1410 /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL, 1411 /* IgnoreDroppableUses */ true, EquivalentUseCB)) { 1412 LLVM_DEBUG( 1413 dbgs() << "[AAPointerInfo] Check for all uses failed, abort!\n"); 1414 return indicatePessimisticFixpoint(); 1415 } 1416 1417 LLVM_DEBUG({ 1418 dbgs() << "Accesses by bin after update:\n"; 1419 dumpState(dbgs()); 1420 }); 1421 1422 return Changed; 1423 } 1424 1425 /// See AbstractAttribute::trackStatistics() 1426 void trackStatistics() const override { 1427 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1428 } 1429 }; 1430 1431 struct AAPointerInfoReturned final : AAPointerInfoImpl { 1432 AAPointerInfoReturned(const IRPosition &IRP, Attributor &A) 1433 : AAPointerInfoImpl(IRP, A) {} 1434 1435 /// See AbstractAttribute::updateImpl(...). 1436 ChangeStatus updateImpl(Attributor &A) override { 1437 return indicatePessimisticFixpoint(); 1438 } 1439 1440 /// See AbstractAttribute::trackStatistics() 1441 void trackStatistics() const override { 1442 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1443 } 1444 }; 1445 1446 struct AAPointerInfoArgument final : AAPointerInfoFloating { 1447 AAPointerInfoArgument(const IRPosition &IRP, Attributor &A) 1448 : AAPointerInfoFloating(IRP, A) {} 1449 1450 /// See AbstractAttribute::initialize(...). 1451 void initialize(Attributor &A) override { 1452 AAPointerInfoFloating::initialize(A); 1453 if (getAnchorScope()->isDeclaration()) 1454 indicatePessimisticFixpoint(); 1455 } 1456 1457 /// See AbstractAttribute::trackStatistics() 1458 void trackStatistics() const override { 1459 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1460 } 1461 }; 1462 1463 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating { 1464 AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) 1465 : AAPointerInfoFloating(IRP, A) {} 1466 1467 /// See AbstractAttribute::updateImpl(...). 1468 ChangeStatus updateImpl(Attributor &A) override { 1469 using namespace AA::PointerInfo; 1470 // We handle memory intrinsics explicitly, at least the first (= 1471 // destination) and second (=source) arguments as we know how they are 1472 // accessed. 1473 if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) { 1474 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); 1475 int64_t LengthVal = OffsetAndSize::Unknown; 1476 if (Length) 1477 LengthVal = Length->getSExtValue(); 1478 Value &Ptr = getAssociatedValue(); 1479 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 1480 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1481 if (ArgNo == 0) { 1482 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed, 1483 nullptr, LengthVal); 1484 } else if (ArgNo == 1) { 1485 handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed, 1486 nullptr, LengthVal); 1487 } else { 1488 LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic " 1489 << *MI << "\n"); 1490 return indicatePessimisticFixpoint(); 1491 } 1492 1493 LLVM_DEBUG({ 1494 dbgs() << "Accesses by bin after update:\n"; 1495 dumpState(dbgs()); 1496 }); 1497 1498 return Changed; 1499 } 1500 1501 // TODO: Once we have call site specific value information we can provide 1502 // call site specific liveness information and then it makes 1503 // sense to specialize attributes for call sites arguments instead of 1504 // redirecting requests to the callee argument. 1505 Argument *Arg = getAssociatedArgument(); 1506 if (!Arg) 1507 return indicatePessimisticFixpoint(); 1508 const IRPosition &ArgPos = IRPosition::argument(*Arg); 1509 auto &ArgAA = 1510 A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED); 1511 return translateAndAddState(A, ArgAA, 0, *cast<CallBase>(getCtxI()), 1512 /* FromCallee */ true); 1513 } 1514 1515 /// See AbstractAttribute::trackStatistics() 1516 void trackStatistics() const override { 1517 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1518 } 1519 }; 1520 1521 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating { 1522 AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) 1523 : AAPointerInfoFloating(IRP, A) {} 1524 1525 /// See AbstractAttribute::trackStatistics() 1526 void trackStatistics() const override { 1527 AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition()); 1528 } 1529 }; 1530 } // namespace 1531 1532 /// -----------------------NoUnwind Function Attribute-------------------------- 1533 1534 namespace { 1535 struct AANoUnwindImpl : AANoUnwind { 1536 AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {} 1537 1538 const std::string getAsStr() const override { 1539 return getAssumed() ? "nounwind" : "may-unwind"; 1540 } 1541 1542 /// See AbstractAttribute::updateImpl(...). 1543 ChangeStatus updateImpl(Attributor &A) override { 1544 auto Opcodes = { 1545 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 1546 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet, 1547 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume}; 1548 1549 auto CheckForNoUnwind = [&](Instruction &I) { 1550 if (!I.mayThrow()) 1551 return true; 1552 1553 if (const auto *CB = dyn_cast<CallBase>(&I)) { 1554 const auto &NoUnwindAA = A.getAAFor<AANoUnwind>( 1555 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 1556 return NoUnwindAA.isAssumedNoUnwind(); 1557 } 1558 return false; 1559 }; 1560 1561 bool UsedAssumedInformation = false; 1562 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes, 1563 UsedAssumedInformation)) 1564 return indicatePessimisticFixpoint(); 1565 1566 return ChangeStatus::UNCHANGED; 1567 } 1568 }; 1569 1570 struct AANoUnwindFunction final : public AANoUnwindImpl { 1571 AANoUnwindFunction(const IRPosition &IRP, Attributor &A) 1572 : AANoUnwindImpl(IRP, A) {} 1573 1574 /// See AbstractAttribute::trackStatistics() 1575 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) } 1576 }; 1577 1578 /// NoUnwind attribute deduction for a call sites. 1579 struct AANoUnwindCallSite final : AANoUnwindImpl { 1580 AANoUnwindCallSite(const IRPosition &IRP, Attributor &A) 1581 : AANoUnwindImpl(IRP, A) {} 1582 1583 /// See AbstractAttribute::initialize(...). 1584 void initialize(Attributor &A) override { 1585 AANoUnwindImpl::initialize(A); 1586 Function *F = getAssociatedFunction(); 1587 if (!F || F->isDeclaration()) 1588 indicatePessimisticFixpoint(); 1589 } 1590 1591 /// See AbstractAttribute::updateImpl(...). 1592 ChangeStatus updateImpl(Attributor &A) override { 1593 // TODO: Once we have call site specific value information we can provide 1594 // call site specific liveness information and then it makes 1595 // sense to specialize attributes for call sites arguments instead of 1596 // redirecting requests to the callee argument. 1597 Function *F = getAssociatedFunction(); 1598 const IRPosition &FnPos = IRPosition::function(*F); 1599 auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED); 1600 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1601 } 1602 1603 /// See AbstractAttribute::trackStatistics() 1604 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); } 1605 }; 1606 } // namespace 1607 1608 /// --------------------- Function Return Values ------------------------------- 1609 1610 namespace { 1611 /// "Attribute" that collects all potential returned values and the return 1612 /// instructions that they arise from. 1613 /// 1614 /// If there is a unique returned value R, the manifest method will: 1615 /// - mark R with the "returned" attribute, if R is an argument. 1616 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState { 1617 1618 /// Mapping of values potentially returned by the associated function to the 1619 /// return instructions that might return them. 1620 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues; 1621 1622 /// State flags 1623 /// 1624 ///{ 1625 bool IsFixed = false; 1626 bool IsValidState = true; 1627 ///} 1628 1629 public: 1630 AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A) 1631 : AAReturnedValues(IRP, A) {} 1632 1633 /// See AbstractAttribute::initialize(...). 1634 void initialize(Attributor &A) override { 1635 // Reset the state. 1636 IsFixed = false; 1637 IsValidState = true; 1638 ReturnedValues.clear(); 1639 1640 Function *F = getAssociatedFunction(); 1641 if (!F || F->isDeclaration()) { 1642 indicatePessimisticFixpoint(); 1643 return; 1644 } 1645 assert(!F->getReturnType()->isVoidTy() && 1646 "Did not expect a void return type!"); 1647 1648 // The map from instruction opcodes to those instructions in the function. 1649 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F); 1650 1651 // Look through all arguments, if one is marked as returned we are done. 1652 for (Argument &Arg : F->args()) { 1653 if (Arg.hasReturnedAttr()) { 1654 auto &ReturnInstSet = ReturnedValues[&Arg]; 1655 if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret)) 1656 for (Instruction *RI : *Insts) 1657 ReturnInstSet.insert(cast<ReturnInst>(RI)); 1658 1659 indicateOptimisticFixpoint(); 1660 return; 1661 } 1662 } 1663 1664 if (!A.isFunctionIPOAmendable(*F)) 1665 indicatePessimisticFixpoint(); 1666 } 1667 1668 /// See AbstractAttribute::manifest(...). 1669 ChangeStatus manifest(Attributor &A) override; 1670 1671 /// See AbstractAttribute::getState(...). 1672 AbstractState &getState() override { return *this; } 1673 1674 /// See AbstractAttribute::getState(...). 1675 const AbstractState &getState() const override { return *this; } 1676 1677 /// See AbstractAttribute::updateImpl(Attributor &A). 1678 ChangeStatus updateImpl(Attributor &A) override; 1679 1680 llvm::iterator_range<iterator> returned_values() override { 1681 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1682 } 1683 1684 llvm::iterator_range<const_iterator> returned_values() const override { 1685 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end()); 1686 } 1687 1688 /// Return the number of potential return values, -1 if unknown. 1689 size_t getNumReturnValues() const override { 1690 return isValidState() ? ReturnedValues.size() : -1; 1691 } 1692 1693 /// Return an assumed unique return value if a single candidate is found. If 1694 /// there cannot be one, return a nullptr. If it is not clear yet, return the 1695 /// Optional::NoneType. 1696 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 1697 1698 /// See AbstractState::checkForAllReturnedValues(...). 1699 bool checkForAllReturnedValuesAndReturnInsts( 1700 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1701 const override; 1702 1703 /// Pretty print the attribute similar to the IR representation. 1704 const std::string getAsStr() const override; 1705 1706 /// See AbstractState::isAtFixpoint(). 1707 bool isAtFixpoint() const override { return IsFixed; } 1708 1709 /// See AbstractState::isValidState(). 1710 bool isValidState() const override { return IsValidState; } 1711 1712 /// See AbstractState::indicateOptimisticFixpoint(...). 1713 ChangeStatus indicateOptimisticFixpoint() override { 1714 IsFixed = true; 1715 return ChangeStatus::UNCHANGED; 1716 } 1717 1718 ChangeStatus indicatePessimisticFixpoint() override { 1719 IsFixed = true; 1720 IsValidState = false; 1721 return ChangeStatus::CHANGED; 1722 } 1723 }; 1724 1725 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) { 1726 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1727 1728 // Bookkeeping. 1729 assert(isValidState()); 1730 STATS_DECLTRACK(KnownReturnValues, FunctionReturn, 1731 "Number of function with known return values"); 1732 1733 // Check if we have an assumed unique return value that we could manifest. 1734 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A); 1735 1736 if (!UniqueRV || !UniqueRV.value()) 1737 return Changed; 1738 1739 // Bookkeeping. 1740 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn, 1741 "Number of function with unique return"); 1742 // If the assumed unique return value is an argument, annotate it. 1743 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.value())) { 1744 if (UniqueRVArg->getType()->canLosslesslyBitCastTo( 1745 getAssociatedFunction()->getReturnType())) { 1746 getIRPosition() = IRPosition::argument(*UniqueRVArg); 1747 Changed = IRAttribute::manifest(A); 1748 } 1749 } 1750 return Changed; 1751 } 1752 1753 const std::string AAReturnedValuesImpl::getAsStr() const { 1754 return (isAtFixpoint() ? "returns(#" : "may-return(#") + 1755 (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")"; 1756 } 1757 1758 Optional<Value *> 1759 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const { 1760 // If checkForAllReturnedValues provides a unique value, ignoring potential 1761 // undef values that can also be present, it is assumed to be the actual 1762 // return value and forwarded to the caller of this method. If there are 1763 // multiple, a nullptr is returned indicating there cannot be a unique 1764 // returned value. 1765 Optional<Value *> UniqueRV; 1766 Type *Ty = getAssociatedFunction()->getReturnType(); 1767 1768 auto Pred = [&](Value &RV) -> bool { 1769 UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty); 1770 return UniqueRV != Optional<Value *>(nullptr); 1771 }; 1772 1773 if (!A.checkForAllReturnedValues(Pred, *this)) 1774 UniqueRV = nullptr; 1775 1776 return UniqueRV; 1777 } 1778 1779 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts( 1780 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 1781 const { 1782 if (!isValidState()) 1783 return false; 1784 1785 // Check all returned values but ignore call sites as long as we have not 1786 // encountered an overdefined one during an update. 1787 for (auto &It : ReturnedValues) { 1788 Value *RV = It.first; 1789 if (!Pred(*RV, It.second)) 1790 return false; 1791 } 1792 1793 return true; 1794 } 1795 1796 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) { 1797 ChangeStatus Changed = ChangeStatus::UNCHANGED; 1798 1799 SmallVector<AA::ValueAndContext> Values; 1800 bool UsedAssumedInformation = false; 1801 auto ReturnInstCB = [&](Instruction &I) { 1802 ReturnInst &Ret = cast<ReturnInst>(I); 1803 Values.clear(); 1804 if (!A.getAssumedSimplifiedValues(IRPosition::value(*Ret.getReturnValue()), 1805 *this, Values, AA::Intraprocedural, 1806 UsedAssumedInformation)) 1807 Values.push_back({*Ret.getReturnValue(), Ret}); 1808 1809 for (auto &VAC : Values) { 1810 assert(AA::isValidInScope(*VAC.getValue(), Ret.getFunction()) && 1811 "Assumed returned value should be valid in function scope!"); 1812 if (ReturnedValues[VAC.getValue()].insert(&Ret)) 1813 Changed = ChangeStatus::CHANGED; 1814 } 1815 return true; 1816 }; 1817 1818 // Discover returned values from all live returned instructions in the 1819 // associated function. 1820 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret}, 1821 UsedAssumedInformation)) 1822 return indicatePessimisticFixpoint(); 1823 return Changed; 1824 } 1825 1826 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl { 1827 AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A) 1828 : AAReturnedValuesImpl(IRP, A) {} 1829 1830 /// See AbstractAttribute::trackStatistics() 1831 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) } 1832 }; 1833 1834 /// Returned values information for a call sites. 1835 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl { 1836 AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A) 1837 : AAReturnedValuesImpl(IRP, A) {} 1838 1839 /// See AbstractAttribute::initialize(...). 1840 void initialize(Attributor &A) override { 1841 // TODO: Once we have call site specific value information we can provide 1842 // call site specific liveness information and then it makes 1843 // sense to specialize attributes for call sites instead of 1844 // redirecting requests to the callee. 1845 llvm_unreachable("Abstract attributes for returned values are not " 1846 "supported for call sites yet!"); 1847 } 1848 1849 /// See AbstractAttribute::updateImpl(...). 1850 ChangeStatus updateImpl(Attributor &A) override { 1851 return indicatePessimisticFixpoint(); 1852 } 1853 1854 /// See AbstractAttribute::trackStatistics() 1855 void trackStatistics() const override {} 1856 }; 1857 } // namespace 1858 1859 /// ------------------------ NoSync Function Attribute ------------------------- 1860 1861 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) { 1862 if (!I->isAtomic()) 1863 return false; 1864 1865 if (auto *FI = dyn_cast<FenceInst>(I)) 1866 // All legal orderings for fence are stronger than monotonic. 1867 return FI->getSyncScopeID() != SyncScope::SingleThread; 1868 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) { 1869 // Unordered is not a legal ordering for cmpxchg. 1870 return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic || 1871 AI->getFailureOrdering() != AtomicOrdering::Monotonic); 1872 } 1873 1874 AtomicOrdering Ordering; 1875 switch (I->getOpcode()) { 1876 case Instruction::AtomicRMW: 1877 Ordering = cast<AtomicRMWInst>(I)->getOrdering(); 1878 break; 1879 case Instruction::Store: 1880 Ordering = cast<StoreInst>(I)->getOrdering(); 1881 break; 1882 case Instruction::Load: 1883 Ordering = cast<LoadInst>(I)->getOrdering(); 1884 break; 1885 default: 1886 llvm_unreachable( 1887 "New atomic operations need to be known in the attributor."); 1888 } 1889 1890 return (Ordering != AtomicOrdering::Unordered && 1891 Ordering != AtomicOrdering::Monotonic); 1892 } 1893 1894 /// Return true if this intrinsic is nosync. This is only used for intrinsics 1895 /// which would be nosync except that they have a volatile flag. All other 1896 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td. 1897 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) { 1898 if (auto *MI = dyn_cast<MemIntrinsic>(I)) 1899 return !MI->isVolatile(); 1900 return false; 1901 } 1902 1903 namespace { 1904 struct AANoSyncImpl : AANoSync { 1905 AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {} 1906 1907 const std::string getAsStr() const override { 1908 return getAssumed() ? "nosync" : "may-sync"; 1909 } 1910 1911 /// See AbstractAttribute::updateImpl(...). 1912 ChangeStatus updateImpl(Attributor &A) override; 1913 }; 1914 1915 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) { 1916 1917 auto CheckRWInstForNoSync = [&](Instruction &I) { 1918 return AA::isNoSyncInst(A, I, *this); 1919 }; 1920 1921 auto CheckForNoSync = [&](Instruction &I) { 1922 // At this point we handled all read/write effects and they are all 1923 // nosync, so they can be skipped. 1924 if (I.mayReadOrWriteMemory()) 1925 return true; 1926 1927 // non-convergent and readnone imply nosync. 1928 return !cast<CallBase>(I).isConvergent(); 1929 }; 1930 1931 bool UsedAssumedInformation = false; 1932 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this, 1933 UsedAssumedInformation) || 1934 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this, 1935 UsedAssumedInformation)) 1936 return indicatePessimisticFixpoint(); 1937 1938 return ChangeStatus::UNCHANGED; 1939 } 1940 1941 struct AANoSyncFunction final : public AANoSyncImpl { 1942 AANoSyncFunction(const IRPosition &IRP, Attributor &A) 1943 : AANoSyncImpl(IRP, A) {} 1944 1945 /// See AbstractAttribute::trackStatistics() 1946 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) } 1947 }; 1948 1949 /// NoSync attribute deduction for a call sites. 1950 struct AANoSyncCallSite final : AANoSyncImpl { 1951 AANoSyncCallSite(const IRPosition &IRP, Attributor &A) 1952 : AANoSyncImpl(IRP, A) {} 1953 1954 /// See AbstractAttribute::initialize(...). 1955 void initialize(Attributor &A) override { 1956 AANoSyncImpl::initialize(A); 1957 Function *F = getAssociatedFunction(); 1958 if (!F || F->isDeclaration()) 1959 indicatePessimisticFixpoint(); 1960 } 1961 1962 /// See AbstractAttribute::updateImpl(...). 1963 ChangeStatus updateImpl(Attributor &A) override { 1964 // TODO: Once we have call site specific value information we can provide 1965 // call site specific liveness information and then it makes 1966 // sense to specialize attributes for call sites arguments instead of 1967 // redirecting requests to the callee argument. 1968 Function *F = getAssociatedFunction(); 1969 const IRPosition &FnPos = IRPosition::function(*F); 1970 auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED); 1971 return clampStateAndIndicateChange(getState(), FnAA.getState()); 1972 } 1973 1974 /// See AbstractAttribute::trackStatistics() 1975 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); } 1976 }; 1977 } // namespace 1978 1979 /// ------------------------ No-Free Attributes ---------------------------- 1980 1981 namespace { 1982 struct AANoFreeImpl : public AANoFree { 1983 AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {} 1984 1985 /// See AbstractAttribute::updateImpl(...). 1986 ChangeStatus updateImpl(Attributor &A) override { 1987 auto CheckForNoFree = [&](Instruction &I) { 1988 const auto &CB = cast<CallBase>(I); 1989 if (CB.hasFnAttr(Attribute::NoFree)) 1990 return true; 1991 1992 const auto &NoFreeAA = A.getAAFor<AANoFree>( 1993 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 1994 return NoFreeAA.isAssumedNoFree(); 1995 }; 1996 1997 bool UsedAssumedInformation = false; 1998 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this, 1999 UsedAssumedInformation)) 2000 return indicatePessimisticFixpoint(); 2001 return ChangeStatus::UNCHANGED; 2002 } 2003 2004 /// See AbstractAttribute::getAsStr(). 2005 const std::string getAsStr() const override { 2006 return getAssumed() ? "nofree" : "may-free"; 2007 } 2008 }; 2009 2010 struct AANoFreeFunction final : public AANoFreeImpl { 2011 AANoFreeFunction(const IRPosition &IRP, Attributor &A) 2012 : AANoFreeImpl(IRP, A) {} 2013 2014 /// See AbstractAttribute::trackStatistics() 2015 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) } 2016 }; 2017 2018 /// NoFree attribute deduction for a call sites. 2019 struct AANoFreeCallSite final : AANoFreeImpl { 2020 AANoFreeCallSite(const IRPosition &IRP, Attributor &A) 2021 : AANoFreeImpl(IRP, A) {} 2022 2023 /// See AbstractAttribute::initialize(...). 2024 void initialize(Attributor &A) override { 2025 AANoFreeImpl::initialize(A); 2026 Function *F = getAssociatedFunction(); 2027 if (!F || F->isDeclaration()) 2028 indicatePessimisticFixpoint(); 2029 } 2030 2031 /// See AbstractAttribute::updateImpl(...). 2032 ChangeStatus updateImpl(Attributor &A) override { 2033 // TODO: Once we have call site specific value information we can provide 2034 // call site specific liveness information and then it makes 2035 // sense to specialize attributes for call sites arguments instead of 2036 // redirecting requests to the callee argument. 2037 Function *F = getAssociatedFunction(); 2038 const IRPosition &FnPos = IRPosition::function(*F); 2039 auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED); 2040 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2041 } 2042 2043 /// See AbstractAttribute::trackStatistics() 2044 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); } 2045 }; 2046 2047 /// NoFree attribute for floating values. 2048 struct AANoFreeFloating : AANoFreeImpl { 2049 AANoFreeFloating(const IRPosition &IRP, Attributor &A) 2050 : AANoFreeImpl(IRP, A) {} 2051 2052 /// See AbstractAttribute::trackStatistics() 2053 void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)} 2054 2055 /// See Abstract Attribute::updateImpl(...). 2056 ChangeStatus updateImpl(Attributor &A) override { 2057 const IRPosition &IRP = getIRPosition(); 2058 2059 const auto &NoFreeAA = A.getAAFor<AANoFree>( 2060 *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL); 2061 if (NoFreeAA.isAssumedNoFree()) 2062 return ChangeStatus::UNCHANGED; 2063 2064 Value &AssociatedValue = getIRPosition().getAssociatedValue(); 2065 auto Pred = [&](const Use &U, bool &Follow) -> bool { 2066 Instruction *UserI = cast<Instruction>(U.getUser()); 2067 if (auto *CB = dyn_cast<CallBase>(UserI)) { 2068 if (CB->isBundleOperand(&U)) 2069 return false; 2070 if (!CB->isArgOperand(&U)) 2071 return true; 2072 unsigned ArgNo = CB->getArgOperandNo(&U); 2073 2074 const auto &NoFreeArg = A.getAAFor<AANoFree>( 2075 *this, IRPosition::callsite_argument(*CB, ArgNo), 2076 DepClassTy::REQUIRED); 2077 return NoFreeArg.isAssumedNoFree(); 2078 } 2079 2080 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 2081 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 2082 Follow = true; 2083 return true; 2084 } 2085 if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) || 2086 isa<ReturnInst>(UserI)) 2087 return true; 2088 2089 // Unknown user. 2090 return false; 2091 }; 2092 if (!A.checkForAllUses(Pred, *this, AssociatedValue)) 2093 return indicatePessimisticFixpoint(); 2094 2095 return ChangeStatus::UNCHANGED; 2096 } 2097 }; 2098 2099 /// NoFree attribute for a call site argument. 2100 struct AANoFreeArgument final : AANoFreeFloating { 2101 AANoFreeArgument(const IRPosition &IRP, Attributor &A) 2102 : AANoFreeFloating(IRP, A) {} 2103 2104 /// See AbstractAttribute::trackStatistics() 2105 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) } 2106 }; 2107 2108 /// NoFree attribute for call site arguments. 2109 struct AANoFreeCallSiteArgument final : AANoFreeFloating { 2110 AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A) 2111 : AANoFreeFloating(IRP, A) {} 2112 2113 /// See AbstractAttribute::updateImpl(...). 2114 ChangeStatus updateImpl(Attributor &A) override { 2115 // TODO: Once we have call site specific value information we can provide 2116 // call site specific liveness information and then it makes 2117 // sense to specialize attributes for call sites arguments instead of 2118 // redirecting requests to the callee argument. 2119 Argument *Arg = getAssociatedArgument(); 2120 if (!Arg) 2121 return indicatePessimisticFixpoint(); 2122 const IRPosition &ArgPos = IRPosition::argument(*Arg); 2123 auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED); 2124 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 2125 } 2126 2127 /// See AbstractAttribute::trackStatistics() 2128 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)}; 2129 }; 2130 2131 /// NoFree attribute for function return value. 2132 struct AANoFreeReturned final : AANoFreeFloating { 2133 AANoFreeReturned(const IRPosition &IRP, Attributor &A) 2134 : AANoFreeFloating(IRP, A) { 2135 llvm_unreachable("NoFree is not applicable to function returns!"); 2136 } 2137 2138 /// See AbstractAttribute::initialize(...). 2139 void initialize(Attributor &A) override { 2140 llvm_unreachable("NoFree is not applicable to function returns!"); 2141 } 2142 2143 /// See AbstractAttribute::updateImpl(...). 2144 ChangeStatus updateImpl(Attributor &A) override { 2145 llvm_unreachable("NoFree is not applicable to function returns!"); 2146 } 2147 2148 /// See AbstractAttribute::trackStatistics() 2149 void trackStatistics() const override {} 2150 }; 2151 2152 /// NoFree attribute deduction for a call site return value. 2153 struct AANoFreeCallSiteReturned final : AANoFreeFloating { 2154 AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A) 2155 : AANoFreeFloating(IRP, A) {} 2156 2157 ChangeStatus manifest(Attributor &A) override { 2158 return ChangeStatus::UNCHANGED; 2159 } 2160 /// See AbstractAttribute::trackStatistics() 2161 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) } 2162 }; 2163 } // namespace 2164 2165 /// ------------------------ NonNull Argument Attribute ------------------------ 2166 namespace { 2167 static int64_t getKnownNonNullAndDerefBytesForUse( 2168 Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue, 2169 const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) { 2170 TrackUse = false; 2171 2172 const Value *UseV = U->get(); 2173 if (!UseV->getType()->isPointerTy()) 2174 return 0; 2175 2176 // We need to follow common pointer manipulation uses to the accesses they 2177 // feed into. We can try to be smart to avoid looking through things we do not 2178 // like for now, e.g., non-inbounds GEPs. 2179 if (isa<CastInst>(I)) { 2180 TrackUse = true; 2181 return 0; 2182 } 2183 2184 if (isa<GetElementPtrInst>(I)) { 2185 TrackUse = true; 2186 return 0; 2187 } 2188 2189 Type *PtrTy = UseV->getType(); 2190 const Function *F = I->getFunction(); 2191 bool NullPointerIsDefined = 2192 F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true; 2193 const DataLayout &DL = A.getInfoCache().getDL(); 2194 if (const auto *CB = dyn_cast<CallBase>(I)) { 2195 if (CB->isBundleOperand(U)) { 2196 if (RetainedKnowledge RK = getKnowledgeFromUse( 2197 U, {Attribute::NonNull, Attribute::Dereferenceable})) { 2198 IsNonNull |= 2199 (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined); 2200 return RK.ArgValue; 2201 } 2202 return 0; 2203 } 2204 2205 if (CB->isCallee(U)) { 2206 IsNonNull |= !NullPointerIsDefined; 2207 return 0; 2208 } 2209 2210 unsigned ArgNo = CB->getArgOperandNo(U); 2211 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 2212 // As long as we only use known information there is no need to track 2213 // dependences here. 2214 auto &DerefAA = 2215 A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE); 2216 IsNonNull |= DerefAA.isKnownNonNull(); 2217 return DerefAA.getKnownDereferenceableBytes(); 2218 } 2219 2220 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); 2221 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) 2222 return 0; 2223 2224 int64_t Offset; 2225 const Value *Base = 2226 getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL); 2227 if (Base && Base == &AssociatedValue) { 2228 int64_t DerefBytes = Loc->Size.getValue() + Offset; 2229 IsNonNull |= !NullPointerIsDefined; 2230 return std::max(int64_t(0), DerefBytes); 2231 } 2232 2233 /// Corner case when an offset is 0. 2234 Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL, 2235 /*AllowNonInbounds*/ true); 2236 if (Base && Base == &AssociatedValue && Offset == 0) { 2237 int64_t DerefBytes = Loc->Size.getValue(); 2238 IsNonNull |= !NullPointerIsDefined; 2239 return std::max(int64_t(0), DerefBytes); 2240 } 2241 2242 return 0; 2243 } 2244 2245 struct AANonNullImpl : AANonNull { 2246 AANonNullImpl(const IRPosition &IRP, Attributor &A) 2247 : AANonNull(IRP, A), 2248 NullIsDefined(NullPointerIsDefined( 2249 getAnchorScope(), 2250 getAssociatedValue().getType()->getPointerAddressSpace())) {} 2251 2252 /// See AbstractAttribute::initialize(...). 2253 void initialize(Attributor &A) override { 2254 Value &V = *getAssociatedValue().stripPointerCasts(); 2255 if (!NullIsDefined && 2256 hasAttr({Attribute::NonNull, Attribute::Dereferenceable}, 2257 /* IgnoreSubsumingPositions */ false, &A)) { 2258 indicateOptimisticFixpoint(); 2259 return; 2260 } 2261 2262 if (isa<ConstantPointerNull>(V)) { 2263 indicatePessimisticFixpoint(); 2264 return; 2265 } 2266 2267 AANonNull::initialize(A); 2268 2269 bool CanBeNull, CanBeFreed; 2270 if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull, 2271 CanBeFreed)) { 2272 if (!CanBeNull) { 2273 indicateOptimisticFixpoint(); 2274 return; 2275 } 2276 } 2277 2278 if (isa<GlobalValue>(V)) { 2279 indicatePessimisticFixpoint(); 2280 return; 2281 } 2282 2283 if (Instruction *CtxI = getCtxI()) 2284 followUsesInMBEC(*this, A, getState(), *CtxI); 2285 } 2286 2287 /// See followUsesInMBEC 2288 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 2289 AANonNull::StateType &State) { 2290 bool IsNonNull = false; 2291 bool TrackUse = false; 2292 getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I, 2293 IsNonNull, TrackUse); 2294 State.setKnown(IsNonNull); 2295 return TrackUse; 2296 } 2297 2298 /// See AbstractAttribute::getAsStr(). 2299 const std::string getAsStr() const override { 2300 return getAssumed() ? "nonnull" : "may-null"; 2301 } 2302 2303 /// Flag to determine if the underlying value can be null and still allow 2304 /// valid accesses. 2305 const bool NullIsDefined; 2306 }; 2307 2308 /// NonNull attribute for a floating value. 2309 struct AANonNullFloating : public AANonNullImpl { 2310 AANonNullFloating(const IRPosition &IRP, Attributor &A) 2311 : AANonNullImpl(IRP, A) {} 2312 2313 /// See AbstractAttribute::updateImpl(...). 2314 ChangeStatus updateImpl(Attributor &A) override { 2315 const DataLayout &DL = A.getDataLayout(); 2316 2317 bool Stripped; 2318 bool UsedAssumedInformation = false; 2319 SmallVector<AA::ValueAndContext> Values; 2320 if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values, 2321 AA::AnyScope, UsedAssumedInformation)) { 2322 Values.push_back({getAssociatedValue(), getCtxI()}); 2323 Stripped = false; 2324 } else { 2325 Stripped = Values.size() != 1 || 2326 Values.front().getValue() != &getAssociatedValue(); 2327 } 2328 2329 DominatorTree *DT = nullptr; 2330 AssumptionCache *AC = nullptr; 2331 InformationCache &InfoCache = A.getInfoCache(); 2332 if (const Function *Fn = getAnchorScope()) { 2333 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn); 2334 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn); 2335 } 2336 2337 AANonNull::StateType T; 2338 auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool { 2339 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V), 2340 DepClassTy::REQUIRED); 2341 if (!Stripped && this == &AA) { 2342 if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT)) 2343 T.indicatePessimisticFixpoint(); 2344 } else { 2345 // Use abstract attribute information. 2346 const AANonNull::StateType &NS = AA.getState(); 2347 T ^= NS; 2348 } 2349 return T.isValidState(); 2350 }; 2351 2352 for (const auto &VAC : Values) 2353 if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI())) 2354 return indicatePessimisticFixpoint(); 2355 2356 return clampStateAndIndicateChange(getState(), T); 2357 } 2358 2359 /// See AbstractAttribute::trackStatistics() 2360 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2361 }; 2362 2363 /// NonNull attribute for function return value. 2364 struct AANonNullReturned final 2365 : AAReturnedFromReturnedValues<AANonNull, AANonNull> { 2366 AANonNullReturned(const IRPosition &IRP, Attributor &A) 2367 : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {} 2368 2369 /// See AbstractAttribute::getAsStr(). 2370 const std::string getAsStr() const override { 2371 return getAssumed() ? "nonnull" : "may-null"; 2372 } 2373 2374 /// See AbstractAttribute::trackStatistics() 2375 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) } 2376 }; 2377 2378 /// NonNull attribute for function argument. 2379 struct AANonNullArgument final 2380 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> { 2381 AANonNullArgument(const IRPosition &IRP, Attributor &A) 2382 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {} 2383 2384 /// See AbstractAttribute::trackStatistics() 2385 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) } 2386 }; 2387 2388 struct AANonNullCallSiteArgument final : AANonNullFloating { 2389 AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A) 2390 : AANonNullFloating(IRP, A) {} 2391 2392 /// See AbstractAttribute::trackStatistics() 2393 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) } 2394 }; 2395 2396 /// NonNull attribute for a call site return position. 2397 struct AANonNullCallSiteReturned final 2398 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> { 2399 AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A) 2400 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {} 2401 2402 /// See AbstractAttribute::trackStatistics() 2403 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) } 2404 }; 2405 } // namespace 2406 2407 /// ------------------------ No-Recurse Attributes ---------------------------- 2408 2409 namespace { 2410 struct AANoRecurseImpl : public AANoRecurse { 2411 AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {} 2412 2413 /// See AbstractAttribute::getAsStr() 2414 const std::string getAsStr() const override { 2415 return getAssumed() ? "norecurse" : "may-recurse"; 2416 } 2417 }; 2418 2419 struct AANoRecurseFunction final : AANoRecurseImpl { 2420 AANoRecurseFunction(const IRPosition &IRP, Attributor &A) 2421 : AANoRecurseImpl(IRP, A) {} 2422 2423 /// See AbstractAttribute::updateImpl(...). 2424 ChangeStatus updateImpl(Attributor &A) override { 2425 2426 // If all live call sites are known to be no-recurse, we are as well. 2427 auto CallSitePred = [&](AbstractCallSite ACS) { 2428 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 2429 *this, IRPosition::function(*ACS.getInstruction()->getFunction()), 2430 DepClassTy::NONE); 2431 return NoRecurseAA.isKnownNoRecurse(); 2432 }; 2433 bool UsedAssumedInformation = false; 2434 if (A.checkForAllCallSites(CallSitePred, *this, true, 2435 UsedAssumedInformation)) { 2436 // If we know all call sites and all are known no-recurse, we are done. 2437 // If all known call sites, which might not be all that exist, are known 2438 // to be no-recurse, we are not done but we can continue to assume 2439 // no-recurse. If one of the call sites we have not visited will become 2440 // live, another update is triggered. 2441 if (!UsedAssumedInformation) 2442 indicateOptimisticFixpoint(); 2443 return ChangeStatus::UNCHANGED; 2444 } 2445 2446 const AAFunctionReachability &EdgeReachability = 2447 A.getAAFor<AAFunctionReachability>(*this, getIRPosition(), 2448 DepClassTy::REQUIRED); 2449 if (EdgeReachability.canReach(A, *getAnchorScope())) 2450 return indicatePessimisticFixpoint(); 2451 return ChangeStatus::UNCHANGED; 2452 } 2453 2454 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) } 2455 }; 2456 2457 /// NoRecurse attribute deduction for a call sites. 2458 struct AANoRecurseCallSite final : AANoRecurseImpl { 2459 AANoRecurseCallSite(const IRPosition &IRP, Attributor &A) 2460 : AANoRecurseImpl(IRP, A) {} 2461 2462 /// See AbstractAttribute::initialize(...). 2463 void initialize(Attributor &A) override { 2464 AANoRecurseImpl::initialize(A); 2465 Function *F = getAssociatedFunction(); 2466 if (!F || F->isDeclaration()) 2467 indicatePessimisticFixpoint(); 2468 } 2469 2470 /// See AbstractAttribute::updateImpl(...). 2471 ChangeStatus updateImpl(Attributor &A) override { 2472 // TODO: Once we have call site specific value information we can provide 2473 // call site specific liveness information and then it makes 2474 // sense to specialize attributes for call sites arguments instead of 2475 // redirecting requests to the callee argument. 2476 Function *F = getAssociatedFunction(); 2477 const IRPosition &FnPos = IRPosition::function(*F); 2478 auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED); 2479 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2480 } 2481 2482 /// See AbstractAttribute::trackStatistics() 2483 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); } 2484 }; 2485 } // namespace 2486 2487 /// -------------------- Undefined-Behavior Attributes ------------------------ 2488 2489 namespace { 2490 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior { 2491 AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A) 2492 : AAUndefinedBehavior(IRP, A) {} 2493 2494 /// See AbstractAttribute::updateImpl(...). 2495 // through a pointer (i.e. also branches etc.) 2496 ChangeStatus updateImpl(Attributor &A) override { 2497 const size_t UBPrevSize = KnownUBInsts.size(); 2498 const size_t NoUBPrevSize = AssumedNoUBInsts.size(); 2499 2500 auto InspectMemAccessInstForUB = [&](Instruction &I) { 2501 // Lang ref now states volatile store is not UB, let's skip them. 2502 if (I.isVolatile() && I.mayWriteToMemory()) 2503 return true; 2504 2505 // Skip instructions that are already saved. 2506 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2507 return true; 2508 2509 // If we reach here, we know we have an instruction 2510 // that accesses memory through a pointer operand, 2511 // for which getPointerOperand() should give it to us. 2512 Value *PtrOp = 2513 const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true)); 2514 assert(PtrOp && 2515 "Expected pointer operand of memory accessing instruction"); 2516 2517 // Either we stopped and the appropriate action was taken, 2518 // or we got back a simplified value to continue. 2519 Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I); 2520 if (!SimplifiedPtrOp || !SimplifiedPtrOp.value()) 2521 return true; 2522 const Value *PtrOpVal = SimplifiedPtrOp.value(); 2523 2524 // A memory access through a pointer is considered UB 2525 // only if the pointer has constant null value. 2526 // TODO: Expand it to not only check constant values. 2527 if (!isa<ConstantPointerNull>(PtrOpVal)) { 2528 AssumedNoUBInsts.insert(&I); 2529 return true; 2530 } 2531 const Type *PtrTy = PtrOpVal->getType(); 2532 2533 // Because we only consider instructions inside functions, 2534 // assume that a parent function exists. 2535 const Function *F = I.getFunction(); 2536 2537 // A memory access using constant null pointer is only considered UB 2538 // if null pointer is _not_ defined for the target platform. 2539 if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace())) 2540 AssumedNoUBInsts.insert(&I); 2541 else 2542 KnownUBInsts.insert(&I); 2543 return true; 2544 }; 2545 2546 auto InspectBrInstForUB = [&](Instruction &I) { 2547 // A conditional branch instruction is considered UB if it has `undef` 2548 // condition. 2549 2550 // Skip instructions that are already saved. 2551 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2552 return true; 2553 2554 // We know we have a branch instruction. 2555 auto *BrInst = cast<BranchInst>(&I); 2556 2557 // Unconditional branches are never considered UB. 2558 if (BrInst->isUnconditional()) 2559 return true; 2560 2561 // Either we stopped and the appropriate action was taken, 2562 // or we got back a simplified value to continue. 2563 Optional<Value *> SimplifiedCond = 2564 stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst); 2565 if (!SimplifiedCond || !*SimplifiedCond) 2566 return true; 2567 AssumedNoUBInsts.insert(&I); 2568 return true; 2569 }; 2570 2571 auto InspectCallSiteForUB = [&](Instruction &I) { 2572 // Check whether a callsite always cause UB or not 2573 2574 // Skip instructions that are already saved. 2575 if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I)) 2576 return true; 2577 2578 // Check nonnull and noundef argument attribute violation for each 2579 // callsite. 2580 CallBase &CB = cast<CallBase>(I); 2581 Function *Callee = CB.getCalledFunction(); 2582 if (!Callee) 2583 return true; 2584 for (unsigned idx = 0; idx < CB.arg_size(); idx++) { 2585 // If current argument is known to be simplified to null pointer and the 2586 // corresponding argument position is known to have nonnull attribute, 2587 // the argument is poison. Furthermore, if the argument is poison and 2588 // the position is known to have noundef attriubte, this callsite is 2589 // considered UB. 2590 if (idx >= Callee->arg_size()) 2591 break; 2592 Value *ArgVal = CB.getArgOperand(idx); 2593 if (!ArgVal) 2594 continue; 2595 // Here, we handle three cases. 2596 // (1) Not having a value means it is dead. (we can replace the value 2597 // with undef) 2598 // (2) Simplified to undef. The argument violate noundef attriubte. 2599 // (3) Simplified to null pointer where known to be nonnull. 2600 // The argument is a poison value and violate noundef attribute. 2601 IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx); 2602 auto &NoUndefAA = 2603 A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2604 if (!NoUndefAA.isKnownNoUndef()) 2605 continue; 2606 bool UsedAssumedInformation = false; 2607 Optional<Value *> SimplifiedVal = 2608 A.getAssumedSimplified(IRPosition::value(*ArgVal), *this, 2609 UsedAssumedInformation, AA::Interprocedural); 2610 if (UsedAssumedInformation) 2611 continue; 2612 if (SimplifiedVal && !SimplifiedVal.value()) 2613 return true; 2614 if (!SimplifiedVal || isa<UndefValue>(*SimplifiedVal.value())) { 2615 KnownUBInsts.insert(&I); 2616 continue; 2617 } 2618 if (!ArgVal->getType()->isPointerTy() || 2619 !isa<ConstantPointerNull>(*SimplifiedVal.value())) 2620 continue; 2621 auto &NonNullAA = 2622 A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE); 2623 if (NonNullAA.isKnownNonNull()) 2624 KnownUBInsts.insert(&I); 2625 } 2626 return true; 2627 }; 2628 2629 auto InspectReturnInstForUB = [&](Instruction &I) { 2630 auto &RI = cast<ReturnInst>(I); 2631 // Either we stopped and the appropriate action was taken, 2632 // or we got back a simplified return value to continue. 2633 Optional<Value *> SimplifiedRetValue = 2634 stopOnUndefOrAssumed(A, RI.getReturnValue(), &I); 2635 if (!SimplifiedRetValue || !*SimplifiedRetValue) 2636 return true; 2637 2638 // Check if a return instruction always cause UB or not 2639 // Note: It is guaranteed that the returned position of the anchor 2640 // scope has noundef attribute when this is called. 2641 // We also ensure the return position is not "assumed dead" 2642 // because the returned value was then potentially simplified to 2643 // `undef` in AAReturnedValues without removing the `noundef` 2644 // attribute yet. 2645 2646 // When the returned position has noundef attriubte, UB occurs in the 2647 // following cases. 2648 // (1) Returned value is known to be undef. 2649 // (2) The value is known to be a null pointer and the returned 2650 // position has nonnull attribute (because the returned value is 2651 // poison). 2652 if (isa<ConstantPointerNull>(*SimplifiedRetValue)) { 2653 auto &NonNullAA = A.getAAFor<AANonNull>( 2654 *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE); 2655 if (NonNullAA.isKnownNonNull()) 2656 KnownUBInsts.insert(&I); 2657 } 2658 2659 return true; 2660 }; 2661 2662 bool UsedAssumedInformation = false; 2663 A.checkForAllInstructions(InspectMemAccessInstForUB, *this, 2664 {Instruction::Load, Instruction::Store, 2665 Instruction::AtomicCmpXchg, 2666 Instruction::AtomicRMW}, 2667 UsedAssumedInformation, 2668 /* CheckBBLivenessOnly */ true); 2669 A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br}, 2670 UsedAssumedInformation, 2671 /* CheckBBLivenessOnly */ true); 2672 A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this, 2673 UsedAssumedInformation); 2674 2675 // If the returned position of the anchor scope has noundef attriubte, check 2676 // all returned instructions. 2677 if (!getAnchorScope()->getReturnType()->isVoidTy()) { 2678 const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope()); 2679 if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) { 2680 auto &RetPosNoUndefAA = 2681 A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE); 2682 if (RetPosNoUndefAA.isKnownNoUndef()) 2683 A.checkForAllInstructions(InspectReturnInstForUB, *this, 2684 {Instruction::Ret}, UsedAssumedInformation, 2685 /* CheckBBLivenessOnly */ true); 2686 } 2687 } 2688 2689 if (NoUBPrevSize != AssumedNoUBInsts.size() || 2690 UBPrevSize != KnownUBInsts.size()) 2691 return ChangeStatus::CHANGED; 2692 return ChangeStatus::UNCHANGED; 2693 } 2694 2695 bool isKnownToCauseUB(Instruction *I) const override { 2696 return KnownUBInsts.count(I); 2697 } 2698 2699 bool isAssumedToCauseUB(Instruction *I) const override { 2700 // In simple words, if an instruction is not in the assumed to _not_ 2701 // cause UB, then it is assumed UB (that includes those 2702 // in the KnownUBInsts set). The rest is boilerplate 2703 // is to ensure that it is one of the instructions we test 2704 // for UB. 2705 2706 switch (I->getOpcode()) { 2707 case Instruction::Load: 2708 case Instruction::Store: 2709 case Instruction::AtomicCmpXchg: 2710 case Instruction::AtomicRMW: 2711 return !AssumedNoUBInsts.count(I); 2712 case Instruction::Br: { 2713 auto *BrInst = cast<BranchInst>(I); 2714 if (BrInst->isUnconditional()) 2715 return false; 2716 return !AssumedNoUBInsts.count(I); 2717 } break; 2718 default: 2719 return false; 2720 } 2721 return false; 2722 } 2723 2724 ChangeStatus manifest(Attributor &A) override { 2725 if (KnownUBInsts.empty()) 2726 return ChangeStatus::UNCHANGED; 2727 for (Instruction *I : KnownUBInsts) 2728 A.changeToUnreachableAfterManifest(I); 2729 return ChangeStatus::CHANGED; 2730 } 2731 2732 /// See AbstractAttribute::getAsStr() 2733 const std::string getAsStr() const override { 2734 return getAssumed() ? "undefined-behavior" : "no-ub"; 2735 } 2736 2737 /// Note: The correctness of this analysis depends on the fact that the 2738 /// following 2 sets will stop changing after some point. 2739 /// "Change" here means that their size changes. 2740 /// The size of each set is monotonically increasing 2741 /// (we only add items to them) and it is upper bounded by the number of 2742 /// instructions in the processed function (we can never save more 2743 /// elements in either set than this number). Hence, at some point, 2744 /// they will stop increasing. 2745 /// Consequently, at some point, both sets will have stopped 2746 /// changing, effectively making the analysis reach a fixpoint. 2747 2748 /// Note: These 2 sets are disjoint and an instruction can be considered 2749 /// one of 3 things: 2750 /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in 2751 /// the KnownUBInsts set. 2752 /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior 2753 /// has a reason to assume it). 2754 /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior 2755 /// could not find a reason to assume or prove that it can cause UB, 2756 /// hence it assumes it doesn't. We have a set for these instructions 2757 /// so that we don't reprocess them in every update. 2758 /// Note however that instructions in this set may cause UB. 2759 2760 protected: 2761 /// A set of all live instructions _known_ to cause UB. 2762 SmallPtrSet<Instruction *, 8> KnownUBInsts; 2763 2764 private: 2765 /// A set of all the (live) instructions that are assumed to _not_ cause UB. 2766 SmallPtrSet<Instruction *, 8> AssumedNoUBInsts; 2767 2768 // Should be called on updates in which if we're processing an instruction 2769 // \p I that depends on a value \p V, one of the following has to happen: 2770 // - If the value is assumed, then stop. 2771 // - If the value is known but undef, then consider it UB. 2772 // - Otherwise, do specific processing with the simplified value. 2773 // We return None in the first 2 cases to signify that an appropriate 2774 // action was taken and the caller should stop. 2775 // Otherwise, we return the simplified value that the caller should 2776 // use for specific processing. 2777 Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V, 2778 Instruction *I) { 2779 bool UsedAssumedInformation = false; 2780 Optional<Value *> SimplifiedV = 2781 A.getAssumedSimplified(IRPosition::value(*V), *this, 2782 UsedAssumedInformation, AA::Interprocedural); 2783 if (!UsedAssumedInformation) { 2784 // Don't depend on assumed values. 2785 if (!SimplifiedV) { 2786 // If it is known (which we tested above) but it doesn't have a value, 2787 // then we can assume `undef` and hence the instruction is UB. 2788 KnownUBInsts.insert(I); 2789 return llvm::None; 2790 } 2791 if (!*SimplifiedV) 2792 return nullptr; 2793 V = *SimplifiedV; 2794 } 2795 if (isa<UndefValue>(V)) { 2796 KnownUBInsts.insert(I); 2797 return llvm::None; 2798 } 2799 return V; 2800 } 2801 }; 2802 2803 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl { 2804 AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A) 2805 : AAUndefinedBehaviorImpl(IRP, A) {} 2806 2807 /// See AbstractAttribute::trackStatistics() 2808 void trackStatistics() const override { 2809 STATS_DECL(UndefinedBehaviorInstruction, Instruction, 2810 "Number of instructions known to have UB"); 2811 BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) += 2812 KnownUBInsts.size(); 2813 } 2814 }; 2815 } // namespace 2816 2817 /// ------------------------ Will-Return Attributes ---------------------------- 2818 2819 namespace { 2820 // Helper function that checks whether a function has any cycle which we don't 2821 // know if it is bounded or not. 2822 // Loops with maximum trip count are considered bounded, any other cycle not. 2823 static bool mayContainUnboundedCycle(Function &F, Attributor &A) { 2824 ScalarEvolution *SE = 2825 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F); 2826 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F); 2827 // If either SCEV or LoopInfo is not available for the function then we assume 2828 // any cycle to be unbounded cycle. 2829 // We use scc_iterator which uses Tarjan algorithm to find all the maximal 2830 // SCCs.To detect if there's a cycle, we only need to find the maximal ones. 2831 if (!SE || !LI) { 2832 for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI) 2833 if (SCCI.hasCycle()) 2834 return true; 2835 return false; 2836 } 2837 2838 // If there's irreducible control, the function may contain non-loop cycles. 2839 if (mayContainIrreducibleControl(F, LI)) 2840 return true; 2841 2842 // Any loop that does not have a max trip count is considered unbounded cycle. 2843 for (auto *L : LI->getLoopsInPreorder()) { 2844 if (!SE->getSmallConstantMaxTripCount(L)) 2845 return true; 2846 } 2847 return false; 2848 } 2849 2850 struct AAWillReturnImpl : public AAWillReturn { 2851 AAWillReturnImpl(const IRPosition &IRP, Attributor &A) 2852 : AAWillReturn(IRP, A) {} 2853 2854 /// See AbstractAttribute::initialize(...). 2855 void initialize(Attributor &A) override { 2856 AAWillReturn::initialize(A); 2857 2858 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) { 2859 indicateOptimisticFixpoint(); 2860 return; 2861 } 2862 } 2863 2864 /// Check for `mustprogress` and `readonly` as they imply `willreturn`. 2865 bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) { 2866 // Check for `mustprogress` in the scope and the associated function which 2867 // might be different if this is a call site. 2868 if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) && 2869 (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress())) 2870 return false; 2871 2872 bool IsKnown; 2873 if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 2874 return IsKnown || !KnownOnly; 2875 return false; 2876 } 2877 2878 /// See AbstractAttribute::updateImpl(...). 2879 ChangeStatus updateImpl(Attributor &A) override { 2880 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 2881 return ChangeStatus::UNCHANGED; 2882 2883 auto CheckForWillReturn = [&](Instruction &I) { 2884 IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I)); 2885 const auto &WillReturnAA = 2886 A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED); 2887 if (WillReturnAA.isKnownWillReturn()) 2888 return true; 2889 if (!WillReturnAA.isAssumedWillReturn()) 2890 return false; 2891 const auto &NoRecurseAA = 2892 A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED); 2893 return NoRecurseAA.isAssumedNoRecurse(); 2894 }; 2895 2896 bool UsedAssumedInformation = false; 2897 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this, 2898 UsedAssumedInformation)) 2899 return indicatePessimisticFixpoint(); 2900 2901 return ChangeStatus::UNCHANGED; 2902 } 2903 2904 /// See AbstractAttribute::getAsStr() 2905 const std::string getAsStr() const override { 2906 return getAssumed() ? "willreturn" : "may-noreturn"; 2907 } 2908 }; 2909 2910 struct AAWillReturnFunction final : AAWillReturnImpl { 2911 AAWillReturnFunction(const IRPosition &IRP, Attributor &A) 2912 : AAWillReturnImpl(IRP, A) {} 2913 2914 /// See AbstractAttribute::initialize(...). 2915 void initialize(Attributor &A) override { 2916 AAWillReturnImpl::initialize(A); 2917 2918 Function *F = getAnchorScope(); 2919 if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A)) 2920 indicatePessimisticFixpoint(); 2921 } 2922 2923 /// See AbstractAttribute::trackStatistics() 2924 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) } 2925 }; 2926 2927 /// WillReturn attribute deduction for a call sites. 2928 struct AAWillReturnCallSite final : AAWillReturnImpl { 2929 AAWillReturnCallSite(const IRPosition &IRP, Attributor &A) 2930 : AAWillReturnImpl(IRP, A) {} 2931 2932 /// See AbstractAttribute::initialize(...). 2933 void initialize(Attributor &A) override { 2934 AAWillReturnImpl::initialize(A); 2935 Function *F = getAssociatedFunction(); 2936 if (!F || !A.isFunctionIPOAmendable(*F)) 2937 indicatePessimisticFixpoint(); 2938 } 2939 2940 /// See AbstractAttribute::updateImpl(...). 2941 ChangeStatus updateImpl(Attributor &A) override { 2942 if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false)) 2943 return ChangeStatus::UNCHANGED; 2944 2945 // TODO: Once we have call site specific value information we can provide 2946 // call site specific liveness information and then it makes 2947 // sense to specialize attributes for call sites arguments instead of 2948 // redirecting requests to the callee argument. 2949 Function *F = getAssociatedFunction(); 2950 const IRPosition &FnPos = IRPosition::function(*F); 2951 auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED); 2952 return clampStateAndIndicateChange(getState(), FnAA.getState()); 2953 } 2954 2955 /// See AbstractAttribute::trackStatistics() 2956 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); } 2957 }; 2958 } // namespace 2959 2960 /// -------------------AAReachability Attribute-------------------------- 2961 2962 namespace { 2963 struct AAReachabilityImpl : AAReachability { 2964 AAReachabilityImpl(const IRPosition &IRP, Attributor &A) 2965 : AAReachability(IRP, A) {} 2966 2967 const std::string getAsStr() const override { 2968 // TODO: Return the number of reachable queries. 2969 return "reachable"; 2970 } 2971 2972 /// See AbstractAttribute::updateImpl(...). 2973 ChangeStatus updateImpl(Attributor &A) override { 2974 return ChangeStatus::UNCHANGED; 2975 } 2976 }; 2977 2978 struct AAReachabilityFunction final : public AAReachabilityImpl { 2979 AAReachabilityFunction(const IRPosition &IRP, Attributor &A) 2980 : AAReachabilityImpl(IRP, A) {} 2981 2982 /// See AbstractAttribute::trackStatistics() 2983 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); } 2984 }; 2985 } // namespace 2986 2987 /// ------------------------ NoAlias Argument Attribute ------------------------ 2988 2989 namespace { 2990 struct AANoAliasImpl : AANoAlias { 2991 AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) { 2992 assert(getAssociatedType()->isPointerTy() && 2993 "Noalias is a pointer attribute"); 2994 } 2995 2996 const std::string getAsStr() const override { 2997 return getAssumed() ? "noalias" : "may-alias"; 2998 } 2999 }; 3000 3001 /// NoAlias attribute for a floating value. 3002 struct AANoAliasFloating final : AANoAliasImpl { 3003 AANoAliasFloating(const IRPosition &IRP, Attributor &A) 3004 : AANoAliasImpl(IRP, A) {} 3005 3006 /// See AbstractAttribute::initialize(...). 3007 void initialize(Attributor &A) override { 3008 AANoAliasImpl::initialize(A); 3009 Value *Val = &getAssociatedValue(); 3010 do { 3011 CastInst *CI = dyn_cast<CastInst>(Val); 3012 if (!CI) 3013 break; 3014 Value *Base = CI->getOperand(0); 3015 if (!Base->hasOneUse()) 3016 break; 3017 Val = Base; 3018 } while (true); 3019 3020 if (!Val->getType()->isPointerTy()) { 3021 indicatePessimisticFixpoint(); 3022 return; 3023 } 3024 3025 if (isa<AllocaInst>(Val)) 3026 indicateOptimisticFixpoint(); 3027 else if (isa<ConstantPointerNull>(Val) && 3028 !NullPointerIsDefined(getAnchorScope(), 3029 Val->getType()->getPointerAddressSpace())) 3030 indicateOptimisticFixpoint(); 3031 else if (Val != &getAssociatedValue()) { 3032 const auto &ValNoAliasAA = A.getAAFor<AANoAlias>( 3033 *this, IRPosition::value(*Val), DepClassTy::OPTIONAL); 3034 if (ValNoAliasAA.isKnownNoAlias()) 3035 indicateOptimisticFixpoint(); 3036 } 3037 } 3038 3039 /// See AbstractAttribute::updateImpl(...). 3040 ChangeStatus updateImpl(Attributor &A) override { 3041 // TODO: Implement this. 3042 return indicatePessimisticFixpoint(); 3043 } 3044 3045 /// See AbstractAttribute::trackStatistics() 3046 void trackStatistics() const override { 3047 STATS_DECLTRACK_FLOATING_ATTR(noalias) 3048 } 3049 }; 3050 3051 /// NoAlias attribute for an argument. 3052 struct AANoAliasArgument final 3053 : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> { 3054 using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>; 3055 AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 3056 3057 /// See AbstractAttribute::initialize(...). 3058 void initialize(Attributor &A) override { 3059 Base::initialize(A); 3060 // See callsite argument attribute and callee argument attribute. 3061 if (hasAttr({Attribute::ByVal})) 3062 indicateOptimisticFixpoint(); 3063 } 3064 3065 /// See AbstractAttribute::update(...). 3066 ChangeStatus updateImpl(Attributor &A) override { 3067 // We have to make sure no-alias on the argument does not break 3068 // synchronization when this is a callback argument, see also [1] below. 3069 // If synchronization cannot be affected, we delegate to the base updateImpl 3070 // function, otherwise we give up for now. 3071 3072 // If the function is no-sync, no-alias cannot break synchronization. 3073 const auto &NoSyncAA = 3074 A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()), 3075 DepClassTy::OPTIONAL); 3076 if (NoSyncAA.isAssumedNoSync()) 3077 return Base::updateImpl(A); 3078 3079 // If the argument is read-only, no-alias cannot break synchronization. 3080 bool IsKnown; 3081 if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 3082 return Base::updateImpl(A); 3083 3084 // If the argument is never passed through callbacks, no-alias cannot break 3085 // synchronization. 3086 bool UsedAssumedInformation = false; 3087 if (A.checkForAllCallSites( 3088 [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this, 3089 true, UsedAssumedInformation)) 3090 return Base::updateImpl(A); 3091 3092 // TODO: add no-alias but make sure it doesn't break synchronization by 3093 // introducing fake uses. See: 3094 // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel, 3095 // International Workshop on OpenMP 2018, 3096 // http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf 3097 3098 return indicatePessimisticFixpoint(); 3099 } 3100 3101 /// See AbstractAttribute::trackStatistics() 3102 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) } 3103 }; 3104 3105 struct AANoAliasCallSiteArgument final : AANoAliasImpl { 3106 AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A) 3107 : AANoAliasImpl(IRP, A) {} 3108 3109 /// See AbstractAttribute::initialize(...). 3110 void initialize(Attributor &A) override { 3111 // See callsite argument attribute and callee argument attribute. 3112 const auto &CB = cast<CallBase>(getAnchorValue()); 3113 if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias)) 3114 indicateOptimisticFixpoint(); 3115 Value &Val = getAssociatedValue(); 3116 if (isa<ConstantPointerNull>(Val) && 3117 !NullPointerIsDefined(getAnchorScope(), 3118 Val.getType()->getPointerAddressSpace())) 3119 indicateOptimisticFixpoint(); 3120 } 3121 3122 /// Determine if the underlying value may alias with the call site argument 3123 /// \p OtherArgNo of \p ICS (= the underlying call site). 3124 bool mayAliasWithArgument(Attributor &A, AAResults *&AAR, 3125 const AAMemoryBehavior &MemBehaviorAA, 3126 const CallBase &CB, unsigned OtherArgNo) { 3127 // We do not need to worry about aliasing with the underlying IRP. 3128 if (this->getCalleeArgNo() == (int)OtherArgNo) 3129 return false; 3130 3131 // If it is not a pointer or pointer vector we do not alias. 3132 const Value *ArgOp = CB.getArgOperand(OtherArgNo); 3133 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 3134 return false; 3135 3136 auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 3137 *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE); 3138 3139 // If the argument is readnone, there is no read-write aliasing. 3140 if (CBArgMemBehaviorAA.isAssumedReadNone()) { 3141 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3142 return false; 3143 } 3144 3145 // If the argument is readonly and the underlying value is readonly, there 3146 // is no read-write aliasing. 3147 bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly(); 3148 if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) { 3149 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3150 A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL); 3151 return false; 3152 } 3153 3154 // We have to utilize actual alias analysis queries so we need the object. 3155 if (!AAR) 3156 AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope()); 3157 3158 // Try to rule it out at the call site. 3159 bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp); 3160 LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between " 3161 "callsite arguments: " 3162 << getAssociatedValue() << " " << *ArgOp << " => " 3163 << (IsAliasing ? "" : "no-") << "alias \n"); 3164 3165 return IsAliasing; 3166 } 3167 3168 bool 3169 isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR, 3170 const AAMemoryBehavior &MemBehaviorAA, 3171 const AANoAlias &NoAliasAA) { 3172 // We can deduce "noalias" if the following conditions hold. 3173 // (i) Associated value is assumed to be noalias in the definition. 3174 // (ii) Associated value is assumed to be no-capture in all the uses 3175 // possibly executed before this callsite. 3176 // (iii) There is no other pointer argument which could alias with the 3177 // value. 3178 3179 bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias(); 3180 if (!AssociatedValueIsNoAliasAtDef) { 3181 LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue() 3182 << " is not no-alias at the definition\n"); 3183 return false; 3184 } 3185 3186 auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) { 3187 const auto &DerefAA = A.getAAFor<AADereferenceable>( 3188 *this, IRPosition::value(*O), DepClassTy::OPTIONAL); 3189 return DerefAA.getAssumedDereferenceableBytes(); 3190 }; 3191 3192 A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL); 3193 3194 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3195 const Function *ScopeFn = VIRP.getAnchorScope(); 3196 auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE); 3197 // Check whether the value is captured in the scope using AANoCapture. 3198 // Look at CFG and check only uses possibly executed before this 3199 // callsite. 3200 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 3201 Instruction *UserI = cast<Instruction>(U.getUser()); 3202 3203 // If UserI is the curr instruction and there is a single potential use of 3204 // the value in UserI we allow the use. 3205 // TODO: We should inspect the operands and allow those that cannot alias 3206 // with the value. 3207 if (UserI == getCtxI() && UserI->getNumOperands() == 1) 3208 return true; 3209 3210 if (ScopeFn) { 3211 if (auto *CB = dyn_cast<CallBase>(UserI)) { 3212 if (CB->isArgOperand(&U)) { 3213 3214 unsigned ArgNo = CB->getArgOperandNo(&U); 3215 3216 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 3217 *this, IRPosition::callsite_argument(*CB, ArgNo), 3218 DepClassTy::OPTIONAL); 3219 3220 if (NoCaptureAA.isAssumedNoCapture()) 3221 return true; 3222 } 3223 } 3224 3225 if (!AA::isPotentiallyReachable(A, *UserI, *getCtxI(), *this)) 3226 return true; 3227 } 3228 3229 // TODO: We should track the capturing uses in AANoCapture but the problem 3230 // is CGSCC runs. For those we would need to "allow" AANoCapture for 3231 // a value in the module slice. 3232 switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) { 3233 case UseCaptureKind::NO_CAPTURE: 3234 return true; 3235 case UseCaptureKind::MAY_CAPTURE: 3236 LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI 3237 << "\n"); 3238 return false; 3239 case UseCaptureKind::PASSTHROUGH: 3240 Follow = true; 3241 return true; 3242 } 3243 llvm_unreachable("unknown UseCaptureKind"); 3244 }; 3245 3246 if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 3247 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) { 3248 LLVM_DEBUG( 3249 dbgs() << "[AANoAliasCSArg] " << getAssociatedValue() 3250 << " cannot be noalias as it is potentially captured\n"); 3251 return false; 3252 } 3253 } 3254 A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL); 3255 3256 // Check there is no other pointer argument which could alias with the 3257 // value passed at this call site. 3258 // TODO: AbstractCallSite 3259 const auto &CB = cast<CallBase>(getAnchorValue()); 3260 for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++) 3261 if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo)) 3262 return false; 3263 3264 return true; 3265 } 3266 3267 /// See AbstractAttribute::updateImpl(...). 3268 ChangeStatus updateImpl(Attributor &A) override { 3269 // If the argument is readnone we are done as there are no accesses via the 3270 // argument. 3271 auto &MemBehaviorAA = 3272 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 3273 if (MemBehaviorAA.isAssumedReadNone()) { 3274 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 3275 return ChangeStatus::UNCHANGED; 3276 } 3277 3278 const IRPosition &VIRP = IRPosition::value(getAssociatedValue()); 3279 const auto &NoAliasAA = 3280 A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE); 3281 3282 AAResults *AAR = nullptr; 3283 if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA, 3284 NoAliasAA)) { 3285 LLVM_DEBUG( 3286 dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n"); 3287 return ChangeStatus::UNCHANGED; 3288 } 3289 3290 return indicatePessimisticFixpoint(); 3291 } 3292 3293 /// See AbstractAttribute::trackStatistics() 3294 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) } 3295 }; 3296 3297 /// NoAlias attribute for function return value. 3298 struct AANoAliasReturned final : AANoAliasImpl { 3299 AANoAliasReturned(const IRPosition &IRP, Attributor &A) 3300 : AANoAliasImpl(IRP, A) {} 3301 3302 /// See AbstractAttribute::initialize(...). 3303 void initialize(Attributor &A) override { 3304 AANoAliasImpl::initialize(A); 3305 Function *F = getAssociatedFunction(); 3306 if (!F || F->isDeclaration()) 3307 indicatePessimisticFixpoint(); 3308 } 3309 3310 /// See AbstractAttribute::updateImpl(...). 3311 virtual ChangeStatus updateImpl(Attributor &A) override { 3312 3313 auto CheckReturnValue = [&](Value &RV) -> bool { 3314 if (Constant *C = dyn_cast<Constant>(&RV)) 3315 if (C->isNullValue() || isa<UndefValue>(C)) 3316 return true; 3317 3318 /// For now, we can only deduce noalias if we have call sites. 3319 /// FIXME: add more support. 3320 if (!isa<CallBase>(&RV)) 3321 return false; 3322 3323 const IRPosition &RVPos = IRPosition::value(RV); 3324 const auto &NoAliasAA = 3325 A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED); 3326 if (!NoAliasAA.isAssumedNoAlias()) 3327 return false; 3328 3329 const auto &NoCaptureAA = 3330 A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED); 3331 return NoCaptureAA.isAssumedNoCaptureMaybeReturned(); 3332 }; 3333 3334 if (!A.checkForAllReturnedValues(CheckReturnValue, *this)) 3335 return indicatePessimisticFixpoint(); 3336 3337 return ChangeStatus::UNCHANGED; 3338 } 3339 3340 /// See AbstractAttribute::trackStatistics() 3341 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) } 3342 }; 3343 3344 /// NoAlias attribute deduction for a call site return value. 3345 struct AANoAliasCallSiteReturned final : AANoAliasImpl { 3346 AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A) 3347 : AANoAliasImpl(IRP, A) {} 3348 3349 /// See AbstractAttribute::initialize(...). 3350 void initialize(Attributor &A) override { 3351 AANoAliasImpl::initialize(A); 3352 Function *F = getAssociatedFunction(); 3353 if (!F || F->isDeclaration()) 3354 indicatePessimisticFixpoint(); 3355 } 3356 3357 /// See AbstractAttribute::updateImpl(...). 3358 ChangeStatus updateImpl(Attributor &A) override { 3359 // TODO: Once we have call site specific value information we can provide 3360 // call site specific liveness information and then it makes 3361 // sense to specialize attributes for call sites arguments instead of 3362 // redirecting requests to the callee argument. 3363 Function *F = getAssociatedFunction(); 3364 const IRPosition &FnPos = IRPosition::returned(*F); 3365 auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED); 3366 return clampStateAndIndicateChange(getState(), FnAA.getState()); 3367 } 3368 3369 /// See AbstractAttribute::trackStatistics() 3370 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); } 3371 }; 3372 } // namespace 3373 3374 /// -------------------AAIsDead Function Attribute----------------------- 3375 3376 namespace { 3377 struct AAIsDeadValueImpl : public AAIsDead { 3378 AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3379 3380 /// See AbstractAttribute::initialize(...). 3381 void initialize(Attributor &A) override { 3382 if (auto *Scope = getAnchorScope()) 3383 if (!A.isRunOn(*Scope)) 3384 indicatePessimisticFixpoint(); 3385 } 3386 3387 /// See AAIsDead::isAssumedDead(). 3388 bool isAssumedDead() const override { return isAssumed(IS_DEAD); } 3389 3390 /// See AAIsDead::isKnownDead(). 3391 bool isKnownDead() const override { return isKnown(IS_DEAD); } 3392 3393 /// See AAIsDead::isAssumedDead(BasicBlock *). 3394 bool isAssumedDead(const BasicBlock *BB) const override { return false; } 3395 3396 /// See AAIsDead::isKnownDead(BasicBlock *). 3397 bool isKnownDead(const BasicBlock *BB) const override { return false; } 3398 3399 /// See AAIsDead::isAssumedDead(Instruction *I). 3400 bool isAssumedDead(const Instruction *I) const override { 3401 return I == getCtxI() && isAssumedDead(); 3402 } 3403 3404 /// See AAIsDead::isKnownDead(Instruction *I). 3405 bool isKnownDead(const Instruction *I) const override { 3406 return isAssumedDead(I) && isKnownDead(); 3407 } 3408 3409 /// See AbstractAttribute::getAsStr(). 3410 virtual const std::string getAsStr() const override { 3411 return isAssumedDead() ? "assumed-dead" : "assumed-live"; 3412 } 3413 3414 /// Check if all uses are assumed dead. 3415 bool areAllUsesAssumedDead(Attributor &A, Value &V) { 3416 // Callers might not check the type, void has no uses. 3417 if (V.getType()->isVoidTy() || V.use_empty()) 3418 return true; 3419 3420 // If we replace a value with a constant there are no uses left afterwards. 3421 if (!isa<Constant>(V)) { 3422 if (auto *I = dyn_cast<Instruction>(&V)) 3423 if (!A.isRunOn(*I->getFunction())) 3424 return false; 3425 bool UsedAssumedInformation = false; 3426 Optional<Constant *> C = 3427 A.getAssumedConstant(V, *this, UsedAssumedInformation); 3428 if (!C || *C) 3429 return true; 3430 } 3431 3432 auto UsePred = [&](const Use &U, bool &Follow) { return false; }; 3433 // Explicitly set the dependence class to required because we want a long 3434 // chain of N dependent instructions to be considered live as soon as one is 3435 // without going through N update cycles. This is not required for 3436 // correctness. 3437 return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false, 3438 DepClassTy::REQUIRED, 3439 /* IgnoreDroppableUses */ false); 3440 } 3441 3442 /// Determine if \p I is assumed to be side-effect free. 3443 bool isAssumedSideEffectFree(Attributor &A, Instruction *I) { 3444 if (!I || wouldInstructionBeTriviallyDead(I)) 3445 return true; 3446 3447 auto *CB = dyn_cast<CallBase>(I); 3448 if (!CB || isa<IntrinsicInst>(CB)) 3449 return false; 3450 3451 const IRPosition &CallIRP = IRPosition::callsite_function(*CB); 3452 const auto &NoUnwindAA = 3453 A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE); 3454 if (!NoUnwindAA.isAssumedNoUnwind()) 3455 return false; 3456 if (!NoUnwindAA.isKnownNoUnwind()) 3457 A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL); 3458 3459 bool IsKnown; 3460 return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown); 3461 } 3462 }; 3463 3464 struct AAIsDeadFloating : public AAIsDeadValueImpl { 3465 AAIsDeadFloating(const IRPosition &IRP, Attributor &A) 3466 : AAIsDeadValueImpl(IRP, A) {} 3467 3468 /// See AbstractAttribute::initialize(...). 3469 void initialize(Attributor &A) override { 3470 AAIsDeadValueImpl::initialize(A); 3471 3472 if (isa<UndefValue>(getAssociatedValue())) { 3473 indicatePessimisticFixpoint(); 3474 return; 3475 } 3476 3477 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3478 if (!isAssumedSideEffectFree(A, I)) { 3479 if (!isa_and_nonnull<StoreInst>(I)) 3480 indicatePessimisticFixpoint(); 3481 else 3482 removeAssumedBits(HAS_NO_EFFECT); 3483 } 3484 } 3485 3486 bool isDeadStore(Attributor &A, StoreInst &SI) { 3487 // Lang ref now states volatile store is not UB/dead, let's skip them. 3488 if (SI.isVolatile()) 3489 return false; 3490 3491 bool UsedAssumedInformation = false; 3492 SmallSetVector<Value *, 4> PotentialCopies; 3493 if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this, 3494 UsedAssumedInformation)) 3495 return false; 3496 return llvm::all_of(PotentialCopies, [&](Value *V) { 3497 return A.isAssumedDead(IRPosition::value(*V), this, nullptr, 3498 UsedAssumedInformation); 3499 }); 3500 } 3501 3502 /// See AbstractAttribute::getAsStr(). 3503 const std::string getAsStr() const override { 3504 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3505 if (isa_and_nonnull<StoreInst>(I)) 3506 if (isValidState()) 3507 return "assumed-dead-store"; 3508 return AAIsDeadValueImpl::getAsStr(); 3509 } 3510 3511 /// See AbstractAttribute::updateImpl(...). 3512 ChangeStatus updateImpl(Attributor &A) override { 3513 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 3514 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { 3515 if (!isDeadStore(A, *SI)) 3516 return indicatePessimisticFixpoint(); 3517 } else { 3518 if (!isAssumedSideEffectFree(A, I)) 3519 return indicatePessimisticFixpoint(); 3520 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3521 return indicatePessimisticFixpoint(); 3522 } 3523 return ChangeStatus::UNCHANGED; 3524 } 3525 3526 bool isRemovableStore() const override { 3527 return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue()); 3528 } 3529 3530 /// See AbstractAttribute::manifest(...). 3531 ChangeStatus manifest(Attributor &A) override { 3532 Value &V = getAssociatedValue(); 3533 if (auto *I = dyn_cast<Instruction>(&V)) { 3534 // If we get here we basically know the users are all dead. We check if 3535 // isAssumedSideEffectFree returns true here again because it might not be 3536 // the case and only the users are dead but the instruction (=call) is 3537 // still needed. 3538 if (isa<StoreInst>(I) || 3539 (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) { 3540 A.deleteAfterManifest(*I); 3541 return ChangeStatus::CHANGED; 3542 } 3543 } 3544 return ChangeStatus::UNCHANGED; 3545 } 3546 3547 /// See AbstractAttribute::trackStatistics() 3548 void trackStatistics() const override { 3549 STATS_DECLTRACK_FLOATING_ATTR(IsDead) 3550 } 3551 }; 3552 3553 struct AAIsDeadArgument : public AAIsDeadFloating { 3554 AAIsDeadArgument(const IRPosition &IRP, Attributor &A) 3555 : AAIsDeadFloating(IRP, A) {} 3556 3557 /// See AbstractAttribute::initialize(...). 3558 void initialize(Attributor &A) override { 3559 AAIsDeadFloating::initialize(A); 3560 if (!A.isFunctionIPOAmendable(*getAnchorScope())) 3561 indicatePessimisticFixpoint(); 3562 } 3563 3564 /// See AbstractAttribute::manifest(...). 3565 ChangeStatus manifest(Attributor &A) override { 3566 Argument &Arg = *getAssociatedArgument(); 3567 if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {})) 3568 if (A.registerFunctionSignatureRewrite( 3569 Arg, /* ReplacementTypes */ {}, 3570 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{}, 3571 Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) { 3572 return ChangeStatus::CHANGED; 3573 } 3574 return ChangeStatus::UNCHANGED; 3575 } 3576 3577 /// See AbstractAttribute::trackStatistics() 3578 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) } 3579 }; 3580 3581 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl { 3582 AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A) 3583 : AAIsDeadValueImpl(IRP, A) {} 3584 3585 /// See AbstractAttribute::initialize(...). 3586 void initialize(Attributor &A) override { 3587 AAIsDeadValueImpl::initialize(A); 3588 if (isa<UndefValue>(getAssociatedValue())) 3589 indicatePessimisticFixpoint(); 3590 } 3591 3592 /// See AbstractAttribute::updateImpl(...). 3593 ChangeStatus updateImpl(Attributor &A) override { 3594 // TODO: Once we have call site specific value information we can provide 3595 // call site specific liveness information and then it makes 3596 // sense to specialize attributes for call sites arguments instead of 3597 // redirecting requests to the callee argument. 3598 Argument *Arg = getAssociatedArgument(); 3599 if (!Arg) 3600 return indicatePessimisticFixpoint(); 3601 const IRPosition &ArgPos = IRPosition::argument(*Arg); 3602 auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED); 3603 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 3604 } 3605 3606 /// See AbstractAttribute::manifest(...). 3607 ChangeStatus manifest(Attributor &A) override { 3608 CallBase &CB = cast<CallBase>(getAnchorValue()); 3609 Use &U = CB.getArgOperandUse(getCallSiteArgNo()); 3610 assert(!isa<UndefValue>(U.get()) && 3611 "Expected undef values to be filtered out!"); 3612 UndefValue &UV = *UndefValue::get(U->getType()); 3613 if (A.changeUseAfterManifest(U, UV)) 3614 return ChangeStatus::CHANGED; 3615 return ChangeStatus::UNCHANGED; 3616 } 3617 3618 /// See AbstractAttribute::trackStatistics() 3619 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) } 3620 }; 3621 3622 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating { 3623 AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A) 3624 : AAIsDeadFloating(IRP, A) {} 3625 3626 /// See AAIsDead::isAssumedDead(). 3627 bool isAssumedDead() const override { 3628 return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree; 3629 } 3630 3631 /// See AbstractAttribute::initialize(...). 3632 void initialize(Attributor &A) override { 3633 AAIsDeadFloating::initialize(A); 3634 if (isa<UndefValue>(getAssociatedValue())) { 3635 indicatePessimisticFixpoint(); 3636 return; 3637 } 3638 3639 // We track this separately as a secondary state. 3640 IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI()); 3641 } 3642 3643 /// See AbstractAttribute::updateImpl(...). 3644 ChangeStatus updateImpl(Attributor &A) override { 3645 ChangeStatus Changed = ChangeStatus::UNCHANGED; 3646 if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) { 3647 IsAssumedSideEffectFree = false; 3648 Changed = ChangeStatus::CHANGED; 3649 } 3650 if (!areAllUsesAssumedDead(A, getAssociatedValue())) 3651 return indicatePessimisticFixpoint(); 3652 return Changed; 3653 } 3654 3655 /// See AbstractAttribute::trackStatistics() 3656 void trackStatistics() const override { 3657 if (IsAssumedSideEffectFree) 3658 STATS_DECLTRACK_CSRET_ATTR(IsDead) 3659 else 3660 STATS_DECLTRACK_CSRET_ATTR(UnusedResult) 3661 } 3662 3663 /// See AbstractAttribute::getAsStr(). 3664 const std::string getAsStr() const override { 3665 return isAssumedDead() 3666 ? "assumed-dead" 3667 : (getAssumed() ? "assumed-dead-users" : "assumed-live"); 3668 } 3669 3670 private: 3671 bool IsAssumedSideEffectFree = true; 3672 }; 3673 3674 struct AAIsDeadReturned : public AAIsDeadValueImpl { 3675 AAIsDeadReturned(const IRPosition &IRP, Attributor &A) 3676 : AAIsDeadValueImpl(IRP, A) {} 3677 3678 /// See AbstractAttribute::updateImpl(...). 3679 ChangeStatus updateImpl(Attributor &A) override { 3680 3681 bool UsedAssumedInformation = false; 3682 A.checkForAllInstructions([](Instruction &) { return true; }, *this, 3683 {Instruction::Ret}, UsedAssumedInformation); 3684 3685 auto PredForCallSite = [&](AbstractCallSite ACS) { 3686 if (ACS.isCallbackCall() || !ACS.getInstruction()) 3687 return false; 3688 return areAllUsesAssumedDead(A, *ACS.getInstruction()); 3689 }; 3690 3691 if (!A.checkForAllCallSites(PredForCallSite, *this, true, 3692 UsedAssumedInformation)) 3693 return indicatePessimisticFixpoint(); 3694 3695 return ChangeStatus::UNCHANGED; 3696 } 3697 3698 /// See AbstractAttribute::manifest(...). 3699 ChangeStatus manifest(Attributor &A) override { 3700 // TODO: Rewrite the signature to return void? 3701 bool AnyChange = false; 3702 UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType()); 3703 auto RetInstPred = [&](Instruction &I) { 3704 ReturnInst &RI = cast<ReturnInst>(I); 3705 if (!isa<UndefValue>(RI.getReturnValue())) 3706 AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV); 3707 return true; 3708 }; 3709 bool UsedAssumedInformation = false; 3710 A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret}, 3711 UsedAssumedInformation); 3712 return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 3713 } 3714 3715 /// See AbstractAttribute::trackStatistics() 3716 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) } 3717 }; 3718 3719 struct AAIsDeadFunction : public AAIsDead { 3720 AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {} 3721 3722 /// See AbstractAttribute::initialize(...). 3723 void initialize(Attributor &A) override { 3724 Function *F = getAnchorScope(); 3725 if (!F || F->isDeclaration() || !A.isRunOn(*F)) { 3726 indicatePessimisticFixpoint(); 3727 return; 3728 } 3729 ToBeExploredFrom.insert(&F->getEntryBlock().front()); 3730 assumeLive(A, F->getEntryBlock()); 3731 } 3732 3733 /// See AbstractAttribute::getAsStr(). 3734 const std::string getAsStr() const override { 3735 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" + 3736 std::to_string(getAnchorScope()->size()) + "][#TBEP " + 3737 std::to_string(ToBeExploredFrom.size()) + "][#KDE " + 3738 std::to_string(KnownDeadEnds.size()) + "]"; 3739 } 3740 3741 /// See AbstractAttribute::manifest(...). 3742 ChangeStatus manifest(Attributor &A) override { 3743 assert(getState().isValidState() && 3744 "Attempted to manifest an invalid state!"); 3745 3746 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 3747 Function &F = *getAnchorScope(); 3748 3749 if (AssumedLiveBlocks.empty()) { 3750 A.deleteAfterManifest(F); 3751 return ChangeStatus::CHANGED; 3752 } 3753 3754 // Flag to determine if we can change an invoke to a call assuming the 3755 // callee is nounwind. This is not possible if the personality of the 3756 // function allows to catch asynchronous exceptions. 3757 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F); 3758 3759 KnownDeadEnds.set_union(ToBeExploredFrom); 3760 for (const Instruction *DeadEndI : KnownDeadEnds) { 3761 auto *CB = dyn_cast<CallBase>(DeadEndI); 3762 if (!CB) 3763 continue; 3764 const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>( 3765 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 3766 bool MayReturn = !NoReturnAA.isAssumedNoReturn(); 3767 if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB))) 3768 continue; 3769 3770 if (auto *II = dyn_cast<InvokeInst>(DeadEndI)) 3771 A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II)); 3772 else 3773 A.changeToUnreachableAfterManifest( 3774 const_cast<Instruction *>(DeadEndI->getNextNode())); 3775 HasChanged = ChangeStatus::CHANGED; 3776 } 3777 3778 STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted."); 3779 for (BasicBlock &BB : F) 3780 if (!AssumedLiveBlocks.count(&BB)) { 3781 A.deleteAfterManifest(BB); 3782 ++BUILD_STAT_NAME(AAIsDead, BasicBlock); 3783 HasChanged = ChangeStatus::CHANGED; 3784 } 3785 3786 return HasChanged; 3787 } 3788 3789 /// See AbstractAttribute::updateImpl(...). 3790 ChangeStatus updateImpl(Attributor &A) override; 3791 3792 bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override { 3793 assert(From->getParent() == getAnchorScope() && 3794 To->getParent() == getAnchorScope() && 3795 "Used AAIsDead of the wrong function"); 3796 return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To)); 3797 } 3798 3799 /// See AbstractAttribute::trackStatistics() 3800 void trackStatistics() const override {} 3801 3802 /// Returns true if the function is assumed dead. 3803 bool isAssumedDead() const override { return false; } 3804 3805 /// See AAIsDead::isKnownDead(). 3806 bool isKnownDead() const override { return false; } 3807 3808 /// See AAIsDead::isAssumedDead(BasicBlock *). 3809 bool isAssumedDead(const BasicBlock *BB) const override { 3810 assert(BB->getParent() == getAnchorScope() && 3811 "BB must be in the same anchor scope function."); 3812 3813 if (!getAssumed()) 3814 return false; 3815 return !AssumedLiveBlocks.count(BB); 3816 } 3817 3818 /// See AAIsDead::isKnownDead(BasicBlock *). 3819 bool isKnownDead(const BasicBlock *BB) const override { 3820 return getKnown() && isAssumedDead(BB); 3821 } 3822 3823 /// See AAIsDead::isAssumed(Instruction *I). 3824 bool isAssumedDead(const Instruction *I) const override { 3825 assert(I->getParent()->getParent() == getAnchorScope() && 3826 "Instruction must be in the same anchor scope function."); 3827 3828 if (!getAssumed()) 3829 return false; 3830 3831 // If it is not in AssumedLiveBlocks then it for sure dead. 3832 // Otherwise, it can still be after noreturn call in a live block. 3833 if (!AssumedLiveBlocks.count(I->getParent())) 3834 return true; 3835 3836 // If it is not after a liveness barrier it is live. 3837 const Instruction *PrevI = I->getPrevNode(); 3838 while (PrevI) { 3839 if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI)) 3840 return true; 3841 PrevI = PrevI->getPrevNode(); 3842 } 3843 return false; 3844 } 3845 3846 /// See AAIsDead::isKnownDead(Instruction *I). 3847 bool isKnownDead(const Instruction *I) const override { 3848 return getKnown() && isAssumedDead(I); 3849 } 3850 3851 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A 3852 /// that internal function called from \p BB should now be looked at. 3853 bool assumeLive(Attributor &A, const BasicBlock &BB) { 3854 if (!AssumedLiveBlocks.insert(&BB).second) 3855 return false; 3856 3857 // We assume that all of BB is (probably) live now and if there are calls to 3858 // internal functions we will assume that those are now live as well. This 3859 // is a performance optimization for blocks with calls to a lot of internal 3860 // functions. It can however cause dead functions to be treated as live. 3861 for (const Instruction &I : BB) 3862 if (const auto *CB = dyn_cast<CallBase>(&I)) 3863 if (const Function *F = CB->getCalledFunction()) 3864 if (F->hasLocalLinkage()) 3865 A.markLiveInternalFunction(*F); 3866 return true; 3867 } 3868 3869 /// Collection of instructions that need to be explored again, e.g., we 3870 /// did assume they do not transfer control to (one of their) successors. 3871 SmallSetVector<const Instruction *, 8> ToBeExploredFrom; 3872 3873 /// Collection of instructions that are known to not transfer control. 3874 SmallSetVector<const Instruction *, 8> KnownDeadEnds; 3875 3876 /// Collection of all assumed live edges 3877 DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges; 3878 3879 /// Collection of all assumed live BasicBlocks. 3880 DenseSet<const BasicBlock *> AssumedLiveBlocks; 3881 }; 3882 3883 static bool 3884 identifyAliveSuccessors(Attributor &A, const CallBase &CB, 3885 AbstractAttribute &AA, 3886 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3887 const IRPosition &IPos = IRPosition::callsite_function(CB); 3888 3889 const auto &NoReturnAA = 3890 A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL); 3891 if (NoReturnAA.isAssumedNoReturn()) 3892 return !NoReturnAA.isKnownNoReturn(); 3893 if (CB.isTerminator()) 3894 AliveSuccessors.push_back(&CB.getSuccessor(0)->front()); 3895 else 3896 AliveSuccessors.push_back(CB.getNextNode()); 3897 return false; 3898 } 3899 3900 static bool 3901 identifyAliveSuccessors(Attributor &A, const InvokeInst &II, 3902 AbstractAttribute &AA, 3903 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3904 bool UsedAssumedInformation = 3905 identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors); 3906 3907 // First, determine if we can change an invoke to a call assuming the 3908 // callee is nounwind. This is not possible if the personality of the 3909 // function allows to catch asynchronous exceptions. 3910 if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) { 3911 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3912 } else { 3913 const IRPosition &IPos = IRPosition::callsite_function(II); 3914 const auto &AANoUnw = 3915 A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL); 3916 if (AANoUnw.isAssumedNoUnwind()) { 3917 UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind(); 3918 } else { 3919 AliveSuccessors.push_back(&II.getUnwindDest()->front()); 3920 } 3921 } 3922 return UsedAssumedInformation; 3923 } 3924 3925 static bool 3926 identifyAliveSuccessors(Attributor &A, const BranchInst &BI, 3927 AbstractAttribute &AA, 3928 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3929 bool UsedAssumedInformation = false; 3930 if (BI.getNumSuccessors() == 1) { 3931 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3932 } else { 3933 Optional<Constant *> C = 3934 A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation); 3935 if (!C || isa_and_nonnull<UndefValue>(*C)) { 3936 // No value yet, assume both edges are dead. 3937 } else if (isa_and_nonnull<ConstantInt>(*C)) { 3938 const BasicBlock *SuccBB = 3939 BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue()); 3940 AliveSuccessors.push_back(&SuccBB->front()); 3941 } else { 3942 AliveSuccessors.push_back(&BI.getSuccessor(0)->front()); 3943 AliveSuccessors.push_back(&BI.getSuccessor(1)->front()); 3944 UsedAssumedInformation = false; 3945 } 3946 } 3947 return UsedAssumedInformation; 3948 } 3949 3950 static bool 3951 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI, 3952 AbstractAttribute &AA, 3953 SmallVectorImpl<const Instruction *> &AliveSuccessors) { 3954 bool UsedAssumedInformation = false; 3955 Optional<Constant *> C = 3956 A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation); 3957 if (!C || isa_and_nonnull<UndefValue>(C.value())) { 3958 // No value yet, assume all edges are dead. 3959 } else if (isa_and_nonnull<ConstantInt>(C.value())) { 3960 for (auto &CaseIt : SI.cases()) { 3961 if (CaseIt.getCaseValue() == C.value()) { 3962 AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front()); 3963 return UsedAssumedInformation; 3964 } 3965 } 3966 AliveSuccessors.push_back(&SI.getDefaultDest()->front()); 3967 return UsedAssumedInformation; 3968 } else { 3969 for (const BasicBlock *SuccBB : successors(SI.getParent())) 3970 AliveSuccessors.push_back(&SuccBB->front()); 3971 } 3972 return UsedAssumedInformation; 3973 } 3974 3975 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) { 3976 ChangeStatus Change = ChangeStatus::UNCHANGED; 3977 3978 LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/" 3979 << getAnchorScope()->size() << "] BBs and " 3980 << ToBeExploredFrom.size() << " exploration points and " 3981 << KnownDeadEnds.size() << " known dead ends\n"); 3982 3983 // Copy and clear the list of instructions we need to explore from. It is 3984 // refilled with instructions the next update has to look at. 3985 SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(), 3986 ToBeExploredFrom.end()); 3987 decltype(ToBeExploredFrom) NewToBeExploredFrom; 3988 3989 SmallVector<const Instruction *, 8> AliveSuccessors; 3990 while (!Worklist.empty()) { 3991 const Instruction *I = Worklist.pop_back_val(); 3992 LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n"); 3993 3994 // Fast forward for uninteresting instructions. We could look for UB here 3995 // though. 3996 while (!I->isTerminator() && !isa<CallBase>(I)) 3997 I = I->getNextNode(); 3998 3999 AliveSuccessors.clear(); 4000 4001 bool UsedAssumedInformation = false; 4002 switch (I->getOpcode()) { 4003 // TODO: look for (assumed) UB to backwards propagate "deadness". 4004 default: 4005 assert(I->isTerminator() && 4006 "Expected non-terminators to be handled already!"); 4007 for (const BasicBlock *SuccBB : successors(I->getParent())) 4008 AliveSuccessors.push_back(&SuccBB->front()); 4009 break; 4010 case Instruction::Call: 4011 UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I), 4012 *this, AliveSuccessors); 4013 break; 4014 case Instruction::Invoke: 4015 UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I), 4016 *this, AliveSuccessors); 4017 break; 4018 case Instruction::Br: 4019 UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I), 4020 *this, AliveSuccessors); 4021 break; 4022 case Instruction::Switch: 4023 UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I), 4024 *this, AliveSuccessors); 4025 break; 4026 } 4027 4028 if (UsedAssumedInformation) { 4029 NewToBeExploredFrom.insert(I); 4030 } else if (AliveSuccessors.empty() || 4031 (I->isTerminator() && 4032 AliveSuccessors.size() < I->getNumSuccessors())) { 4033 if (KnownDeadEnds.insert(I)) 4034 Change = ChangeStatus::CHANGED; 4035 } 4036 4037 LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: " 4038 << AliveSuccessors.size() << " UsedAssumedInformation: " 4039 << UsedAssumedInformation << "\n"); 4040 4041 for (const Instruction *AliveSuccessor : AliveSuccessors) { 4042 if (!I->isTerminator()) { 4043 assert(AliveSuccessors.size() == 1 && 4044 "Non-terminator expected to have a single successor!"); 4045 Worklist.push_back(AliveSuccessor); 4046 } else { 4047 // record the assumed live edge 4048 auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent()); 4049 if (AssumedLiveEdges.insert(Edge).second) 4050 Change = ChangeStatus::CHANGED; 4051 if (assumeLive(A, *AliveSuccessor->getParent())) 4052 Worklist.push_back(AliveSuccessor); 4053 } 4054 } 4055 } 4056 4057 // Check if the content of ToBeExploredFrom changed, ignore the order. 4058 if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() || 4059 llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) { 4060 return !ToBeExploredFrom.count(I); 4061 })) { 4062 Change = ChangeStatus::CHANGED; 4063 ToBeExploredFrom = std::move(NewToBeExploredFrom); 4064 } 4065 4066 // If we know everything is live there is no need to query for liveness. 4067 // Instead, indicating a pessimistic fixpoint will cause the state to be 4068 // "invalid" and all queries to be answered conservatively without lookups. 4069 // To be in this state we have to (1) finished the exploration and (3) not 4070 // discovered any non-trivial dead end and (2) not ruled unreachable code 4071 // dead. 4072 if (ToBeExploredFrom.empty() && 4073 getAnchorScope()->size() == AssumedLiveBlocks.size() && 4074 llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) { 4075 return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0; 4076 })) 4077 return indicatePessimisticFixpoint(); 4078 return Change; 4079 } 4080 4081 /// Liveness information for a call sites. 4082 struct AAIsDeadCallSite final : AAIsDeadFunction { 4083 AAIsDeadCallSite(const IRPosition &IRP, Attributor &A) 4084 : AAIsDeadFunction(IRP, A) {} 4085 4086 /// See AbstractAttribute::initialize(...). 4087 void initialize(Attributor &A) override { 4088 // TODO: Once we have call site specific value information we can provide 4089 // call site specific liveness information and then it makes 4090 // sense to specialize attributes for call sites instead of 4091 // redirecting requests to the callee. 4092 llvm_unreachable("Abstract attributes for liveness are not " 4093 "supported for call sites yet!"); 4094 } 4095 4096 /// See AbstractAttribute::updateImpl(...). 4097 ChangeStatus updateImpl(Attributor &A) override { 4098 return indicatePessimisticFixpoint(); 4099 } 4100 4101 /// See AbstractAttribute::trackStatistics() 4102 void trackStatistics() const override {} 4103 }; 4104 } // namespace 4105 4106 /// -------------------- Dereferenceable Argument Attribute -------------------- 4107 4108 namespace { 4109 struct AADereferenceableImpl : AADereferenceable { 4110 AADereferenceableImpl(const IRPosition &IRP, Attributor &A) 4111 : AADereferenceable(IRP, A) {} 4112 using StateType = DerefState; 4113 4114 /// See AbstractAttribute::initialize(...). 4115 void initialize(Attributor &A) override { 4116 Value &V = *getAssociatedValue().stripPointerCasts(); 4117 SmallVector<Attribute, 4> Attrs; 4118 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull}, 4119 Attrs, /* IgnoreSubsumingPositions */ false, &A); 4120 for (const Attribute &Attr : Attrs) 4121 takeKnownDerefBytesMaximum(Attr.getValueAsInt()); 4122 4123 const IRPosition &IRP = this->getIRPosition(); 4124 NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE); 4125 4126 bool CanBeNull, CanBeFreed; 4127 takeKnownDerefBytesMaximum(V.getPointerDereferenceableBytes( 4128 A.getDataLayout(), CanBeNull, CanBeFreed)); 4129 4130 bool IsFnInterface = IRP.isFnInterfaceKind(); 4131 Function *FnScope = IRP.getAnchorScope(); 4132 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) { 4133 indicatePessimisticFixpoint(); 4134 return; 4135 } 4136 4137 if (Instruction *CtxI = getCtxI()) 4138 followUsesInMBEC(*this, A, getState(), *CtxI); 4139 } 4140 4141 /// See AbstractAttribute::getState() 4142 /// { 4143 StateType &getState() override { return *this; } 4144 const StateType &getState() const override { return *this; } 4145 /// } 4146 4147 /// Helper function for collecting accessed bytes in must-be-executed-context 4148 void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I, 4149 DerefState &State) { 4150 const Value *UseV = U->get(); 4151 if (!UseV->getType()->isPointerTy()) 4152 return; 4153 4154 Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I); 4155 if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile()) 4156 return; 4157 4158 int64_t Offset; 4159 const Value *Base = GetPointerBaseWithConstantOffset( 4160 Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true); 4161 if (Base && Base == &getAssociatedValue()) 4162 State.addAccessedBytes(Offset, Loc->Size.getValue()); 4163 } 4164 4165 /// See followUsesInMBEC 4166 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4167 AADereferenceable::StateType &State) { 4168 bool IsNonNull = false; 4169 bool TrackUse = false; 4170 int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse( 4171 A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse); 4172 LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes 4173 << " for instruction " << *I << "\n"); 4174 4175 addAccessedBytesForUse(A, U, I, State); 4176 State.takeKnownDerefBytesMaximum(DerefBytes); 4177 return TrackUse; 4178 } 4179 4180 /// See AbstractAttribute::manifest(...). 4181 ChangeStatus manifest(Attributor &A) override { 4182 ChangeStatus Change = AADereferenceable::manifest(A); 4183 if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) { 4184 removeAttrs({Attribute::DereferenceableOrNull}); 4185 return ChangeStatus::CHANGED; 4186 } 4187 return Change; 4188 } 4189 4190 void getDeducedAttributes(LLVMContext &Ctx, 4191 SmallVectorImpl<Attribute> &Attrs) const override { 4192 // TODO: Add *_globally support 4193 if (isAssumedNonNull()) 4194 Attrs.emplace_back(Attribute::getWithDereferenceableBytes( 4195 Ctx, getAssumedDereferenceableBytes())); 4196 else 4197 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes( 4198 Ctx, getAssumedDereferenceableBytes())); 4199 } 4200 4201 /// See AbstractAttribute::getAsStr(). 4202 const std::string getAsStr() const override { 4203 if (!getAssumedDereferenceableBytes()) 4204 return "unknown-dereferenceable"; 4205 return std::string("dereferenceable") + 4206 (isAssumedNonNull() ? "" : "_or_null") + 4207 (isAssumedGlobal() ? "_globally" : "") + "<" + 4208 std::to_string(getKnownDereferenceableBytes()) + "-" + 4209 std::to_string(getAssumedDereferenceableBytes()) + ">"; 4210 } 4211 }; 4212 4213 /// Dereferenceable attribute for a floating value. 4214 struct AADereferenceableFloating : AADereferenceableImpl { 4215 AADereferenceableFloating(const IRPosition &IRP, Attributor &A) 4216 : AADereferenceableImpl(IRP, A) {} 4217 4218 /// See AbstractAttribute::updateImpl(...). 4219 ChangeStatus updateImpl(Attributor &A) override { 4220 4221 bool Stripped; 4222 bool UsedAssumedInformation = false; 4223 SmallVector<AA::ValueAndContext> Values; 4224 if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values, 4225 AA::AnyScope, UsedAssumedInformation)) { 4226 Values.push_back({getAssociatedValue(), getCtxI()}); 4227 Stripped = false; 4228 } else { 4229 Stripped = Values.size() != 1 || 4230 Values.front().getValue() != &getAssociatedValue(); 4231 } 4232 4233 const DataLayout &DL = A.getDataLayout(); 4234 DerefState T; 4235 4236 auto VisitValueCB = [&](const Value &V) -> bool { 4237 unsigned IdxWidth = 4238 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace()); 4239 APInt Offset(IdxWidth, 0); 4240 const Value *Base = stripAndAccumulateOffsets( 4241 A, *this, &V, DL, Offset, /* GetMinOffset */ false, 4242 /* AllowNonInbounds */ true); 4243 4244 const auto &AA = A.getAAFor<AADereferenceable>( 4245 *this, IRPosition::value(*Base), DepClassTy::REQUIRED); 4246 int64_t DerefBytes = 0; 4247 if (!Stripped && this == &AA) { 4248 // Use IR information if we did not strip anything. 4249 // TODO: track globally. 4250 bool CanBeNull, CanBeFreed; 4251 DerefBytes = 4252 Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); 4253 T.GlobalState.indicatePessimisticFixpoint(); 4254 } else { 4255 const DerefState &DS = AA.getState(); 4256 DerefBytes = DS.DerefBytesState.getAssumed(); 4257 T.GlobalState &= DS.GlobalState; 4258 } 4259 4260 // For now we do not try to "increase" dereferenceability due to negative 4261 // indices as we first have to come up with code to deal with loops and 4262 // for overflows of the dereferenceable bytes. 4263 int64_t OffsetSExt = Offset.getSExtValue(); 4264 if (OffsetSExt < 0) 4265 OffsetSExt = 0; 4266 4267 T.takeAssumedDerefBytesMinimum( 4268 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4269 4270 if (this == &AA) { 4271 if (!Stripped) { 4272 // If nothing was stripped IR information is all we got. 4273 T.takeKnownDerefBytesMaximum( 4274 std::max(int64_t(0), DerefBytes - OffsetSExt)); 4275 T.indicatePessimisticFixpoint(); 4276 } else if (OffsetSExt > 0) { 4277 // If something was stripped but there is circular reasoning we look 4278 // for the offset. If it is positive we basically decrease the 4279 // dereferenceable bytes in a circluar loop now, which will simply 4280 // drive them down to the known value in a very slow way which we 4281 // can accelerate. 4282 T.indicatePessimisticFixpoint(); 4283 } 4284 } 4285 4286 return T.isValidState(); 4287 }; 4288 4289 for (const auto &VAC : Values) 4290 if (!VisitValueCB(*VAC.getValue())) 4291 return indicatePessimisticFixpoint(); 4292 4293 return clampStateAndIndicateChange(getState(), T); 4294 } 4295 4296 /// See AbstractAttribute::trackStatistics() 4297 void trackStatistics() const override { 4298 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable) 4299 } 4300 }; 4301 4302 /// Dereferenceable attribute for a return value. 4303 struct AADereferenceableReturned final 4304 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> { 4305 AADereferenceableReturned(const IRPosition &IRP, Attributor &A) 4306 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>( 4307 IRP, A) {} 4308 4309 /// See AbstractAttribute::trackStatistics() 4310 void trackStatistics() const override { 4311 STATS_DECLTRACK_FNRET_ATTR(dereferenceable) 4312 } 4313 }; 4314 4315 /// Dereferenceable attribute for an argument 4316 struct AADereferenceableArgument final 4317 : AAArgumentFromCallSiteArguments<AADereferenceable, 4318 AADereferenceableImpl> { 4319 using Base = 4320 AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>; 4321 AADereferenceableArgument(const IRPosition &IRP, Attributor &A) 4322 : Base(IRP, A) {} 4323 4324 /// See AbstractAttribute::trackStatistics() 4325 void trackStatistics() const override { 4326 STATS_DECLTRACK_ARG_ATTR(dereferenceable) 4327 } 4328 }; 4329 4330 /// Dereferenceable attribute for a call site argument. 4331 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating { 4332 AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A) 4333 : AADereferenceableFloating(IRP, A) {} 4334 4335 /// See AbstractAttribute::trackStatistics() 4336 void trackStatistics() const override { 4337 STATS_DECLTRACK_CSARG_ATTR(dereferenceable) 4338 } 4339 }; 4340 4341 /// Dereferenceable attribute deduction for a call site return value. 4342 struct AADereferenceableCallSiteReturned final 4343 : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> { 4344 using Base = 4345 AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>; 4346 AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A) 4347 : Base(IRP, A) {} 4348 4349 /// See AbstractAttribute::trackStatistics() 4350 void trackStatistics() const override { 4351 STATS_DECLTRACK_CS_ATTR(dereferenceable); 4352 } 4353 }; 4354 } // namespace 4355 4356 // ------------------------ Align Argument Attribute ------------------------ 4357 4358 namespace { 4359 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA, 4360 Value &AssociatedValue, const Use *U, 4361 const Instruction *I, bool &TrackUse) { 4362 // We need to follow common pointer manipulation uses to the accesses they 4363 // feed into. 4364 if (isa<CastInst>(I)) { 4365 // Follow all but ptr2int casts. 4366 TrackUse = !isa<PtrToIntInst>(I); 4367 return 0; 4368 } 4369 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 4370 if (GEP->hasAllConstantIndices()) 4371 TrackUse = true; 4372 return 0; 4373 } 4374 4375 MaybeAlign MA; 4376 if (const auto *CB = dyn_cast<CallBase>(I)) { 4377 if (CB->isBundleOperand(U) || CB->isCallee(U)) 4378 return 0; 4379 4380 unsigned ArgNo = CB->getArgOperandNo(U); 4381 IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo); 4382 // As long as we only use known information there is no need to track 4383 // dependences here. 4384 auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE); 4385 MA = MaybeAlign(AlignAA.getKnownAlign()); 4386 } 4387 4388 const DataLayout &DL = A.getDataLayout(); 4389 const Value *UseV = U->get(); 4390 if (auto *SI = dyn_cast<StoreInst>(I)) { 4391 if (SI->getPointerOperand() == UseV) 4392 MA = SI->getAlign(); 4393 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 4394 if (LI->getPointerOperand() == UseV) 4395 MA = LI->getAlign(); 4396 } 4397 4398 if (!MA || *MA <= QueryingAA.getKnownAlign()) 4399 return 0; 4400 4401 unsigned Alignment = MA->value(); 4402 int64_t Offset; 4403 4404 if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) { 4405 if (Base == &AssociatedValue) { 4406 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4407 // So we can say that the maximum power of two which is a divisor of 4408 // gcd(Offset, Alignment) is an alignment. 4409 4410 uint32_t gcd = 4411 greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment); 4412 Alignment = llvm::PowerOf2Floor(gcd); 4413 } 4414 } 4415 4416 return Alignment; 4417 } 4418 4419 struct AAAlignImpl : AAAlign { 4420 AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {} 4421 4422 /// See AbstractAttribute::initialize(...). 4423 void initialize(Attributor &A) override { 4424 SmallVector<Attribute, 4> Attrs; 4425 getAttrs({Attribute::Alignment}, Attrs); 4426 for (const Attribute &Attr : Attrs) 4427 takeKnownMaximum(Attr.getValueAsInt()); 4428 4429 Value &V = *getAssociatedValue().stripPointerCasts(); 4430 takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value()); 4431 4432 if (getIRPosition().isFnInterfaceKind() && 4433 (!getAnchorScope() || 4434 !A.isFunctionIPOAmendable(*getAssociatedFunction()))) { 4435 indicatePessimisticFixpoint(); 4436 return; 4437 } 4438 4439 if (Instruction *CtxI = getCtxI()) 4440 followUsesInMBEC(*this, A, getState(), *CtxI); 4441 } 4442 4443 /// See AbstractAttribute::manifest(...). 4444 ChangeStatus manifest(Attributor &A) override { 4445 ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED; 4446 4447 // Check for users that allow alignment annotations. 4448 Value &AssociatedValue = getAssociatedValue(); 4449 for (const Use &U : AssociatedValue.uses()) { 4450 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) { 4451 if (SI->getPointerOperand() == &AssociatedValue) 4452 if (SI->getAlign() < getAssumedAlign()) { 4453 STATS_DECLTRACK(AAAlign, Store, 4454 "Number of times alignment added to a store"); 4455 SI->setAlignment(getAssumedAlign()); 4456 LoadStoreChanged = ChangeStatus::CHANGED; 4457 } 4458 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) { 4459 if (LI->getPointerOperand() == &AssociatedValue) 4460 if (LI->getAlign() < getAssumedAlign()) { 4461 LI->setAlignment(getAssumedAlign()); 4462 STATS_DECLTRACK(AAAlign, Load, 4463 "Number of times alignment added to a load"); 4464 LoadStoreChanged = ChangeStatus::CHANGED; 4465 } 4466 } 4467 } 4468 4469 ChangeStatus Changed = AAAlign::manifest(A); 4470 4471 Align InheritAlign = 4472 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4473 if (InheritAlign >= getAssumedAlign()) 4474 return LoadStoreChanged; 4475 return Changed | LoadStoreChanged; 4476 } 4477 4478 // TODO: Provide a helper to determine the implied ABI alignment and check in 4479 // the existing manifest method and a new one for AAAlignImpl that value 4480 // to avoid making the alignment explicit if it did not improve. 4481 4482 /// See AbstractAttribute::getDeducedAttributes 4483 virtual void 4484 getDeducedAttributes(LLVMContext &Ctx, 4485 SmallVectorImpl<Attribute> &Attrs) const override { 4486 if (getAssumedAlign() > 1) 4487 Attrs.emplace_back( 4488 Attribute::getWithAlignment(Ctx, Align(getAssumedAlign()))); 4489 } 4490 4491 /// See followUsesInMBEC 4492 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 4493 AAAlign::StateType &State) { 4494 bool TrackUse = false; 4495 4496 unsigned int KnownAlign = 4497 getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse); 4498 State.takeKnownMaximum(KnownAlign); 4499 4500 return TrackUse; 4501 } 4502 4503 /// See AbstractAttribute::getAsStr(). 4504 const std::string getAsStr() const override { 4505 return "align<" + std::to_string(getKnownAlign().value()) + "-" + 4506 std::to_string(getAssumedAlign().value()) + ">"; 4507 } 4508 }; 4509 4510 /// Align attribute for a floating value. 4511 struct AAAlignFloating : AAAlignImpl { 4512 AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {} 4513 4514 /// See AbstractAttribute::updateImpl(...). 4515 ChangeStatus updateImpl(Attributor &A) override { 4516 const DataLayout &DL = A.getDataLayout(); 4517 4518 bool Stripped; 4519 bool UsedAssumedInformation = false; 4520 SmallVector<AA::ValueAndContext> Values; 4521 if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values, 4522 AA::AnyScope, UsedAssumedInformation)) { 4523 Values.push_back({getAssociatedValue(), getCtxI()}); 4524 Stripped = false; 4525 } else { 4526 Stripped = Values.size() != 1 || 4527 Values.front().getValue() != &getAssociatedValue(); 4528 } 4529 4530 StateType T; 4531 auto VisitValueCB = [&](Value &V) -> bool { 4532 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V)) 4533 return true; 4534 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V), 4535 DepClassTy::REQUIRED); 4536 if (!Stripped && this == &AA) { 4537 int64_t Offset; 4538 unsigned Alignment = 1; 4539 if (const Value *Base = 4540 GetPointerBaseWithConstantOffset(&V, Offset, DL)) { 4541 // TODO: Use AAAlign for the base too. 4542 Align PA = Base->getPointerAlignment(DL); 4543 // BasePointerAddr + Offset = Alignment * Q for some integer Q. 4544 // So we can say that the maximum power of two which is a divisor of 4545 // gcd(Offset, Alignment) is an alignment. 4546 4547 uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), 4548 uint32_t(PA.value())); 4549 Alignment = llvm::PowerOf2Floor(gcd); 4550 } else { 4551 Alignment = V.getPointerAlignment(DL).value(); 4552 } 4553 // Use only IR information if we did not strip anything. 4554 T.takeKnownMaximum(Alignment); 4555 T.indicatePessimisticFixpoint(); 4556 } else { 4557 // Use abstract attribute information. 4558 const AAAlign::StateType &DS = AA.getState(); 4559 T ^= DS; 4560 } 4561 return T.isValidState(); 4562 }; 4563 4564 for (const auto &VAC : Values) { 4565 if (!VisitValueCB(*VAC.getValue())) 4566 return indicatePessimisticFixpoint(); 4567 } 4568 4569 // TODO: If we know we visited all incoming values, thus no are assumed 4570 // dead, we can take the known information from the state T. 4571 return clampStateAndIndicateChange(getState(), T); 4572 } 4573 4574 /// See AbstractAttribute::trackStatistics() 4575 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) } 4576 }; 4577 4578 /// Align attribute for function return value. 4579 struct AAAlignReturned final 4580 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> { 4581 using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>; 4582 AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4583 4584 /// See AbstractAttribute::initialize(...). 4585 void initialize(Attributor &A) override { 4586 Base::initialize(A); 4587 Function *F = getAssociatedFunction(); 4588 if (!F || F->isDeclaration()) 4589 indicatePessimisticFixpoint(); 4590 } 4591 4592 /// See AbstractAttribute::trackStatistics() 4593 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) } 4594 }; 4595 4596 /// Align attribute for function argument. 4597 struct AAAlignArgument final 4598 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> { 4599 using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>; 4600 AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {} 4601 4602 /// See AbstractAttribute::manifest(...). 4603 ChangeStatus manifest(Attributor &A) override { 4604 // If the associated argument is involved in a must-tail call we give up 4605 // because we would need to keep the argument alignments of caller and 4606 // callee in-sync. Just does not seem worth the trouble right now. 4607 if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument())) 4608 return ChangeStatus::UNCHANGED; 4609 return Base::manifest(A); 4610 } 4611 4612 /// See AbstractAttribute::trackStatistics() 4613 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) } 4614 }; 4615 4616 struct AAAlignCallSiteArgument final : AAAlignFloating { 4617 AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A) 4618 : AAAlignFloating(IRP, A) {} 4619 4620 /// See AbstractAttribute::manifest(...). 4621 ChangeStatus manifest(Attributor &A) override { 4622 // If the associated argument is involved in a must-tail call we give up 4623 // because we would need to keep the argument alignments of caller and 4624 // callee in-sync. Just does not seem worth the trouble right now. 4625 if (Argument *Arg = getAssociatedArgument()) 4626 if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) 4627 return ChangeStatus::UNCHANGED; 4628 ChangeStatus Changed = AAAlignImpl::manifest(A); 4629 Align InheritAlign = 4630 getAssociatedValue().getPointerAlignment(A.getDataLayout()); 4631 if (InheritAlign >= getAssumedAlign()) 4632 Changed = ChangeStatus::UNCHANGED; 4633 return Changed; 4634 } 4635 4636 /// See AbstractAttribute::updateImpl(Attributor &A). 4637 ChangeStatus updateImpl(Attributor &A) override { 4638 ChangeStatus Changed = AAAlignFloating::updateImpl(A); 4639 if (Argument *Arg = getAssociatedArgument()) { 4640 // We only take known information from the argument 4641 // so we do not need to track a dependence. 4642 const auto &ArgAlignAA = A.getAAFor<AAAlign>( 4643 *this, IRPosition::argument(*Arg), DepClassTy::NONE); 4644 takeKnownMaximum(ArgAlignAA.getKnownAlign().value()); 4645 } 4646 return Changed; 4647 } 4648 4649 /// See AbstractAttribute::trackStatistics() 4650 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) } 4651 }; 4652 4653 /// Align attribute deduction for a call site return value. 4654 struct AAAlignCallSiteReturned final 4655 : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> { 4656 using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>; 4657 AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A) 4658 : Base(IRP, A) {} 4659 4660 /// See AbstractAttribute::initialize(...). 4661 void initialize(Attributor &A) override { 4662 Base::initialize(A); 4663 Function *F = getAssociatedFunction(); 4664 if (!F || F->isDeclaration()) 4665 indicatePessimisticFixpoint(); 4666 } 4667 4668 /// See AbstractAttribute::trackStatistics() 4669 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); } 4670 }; 4671 } // namespace 4672 4673 /// ------------------ Function No-Return Attribute ---------------------------- 4674 namespace { 4675 struct AANoReturnImpl : public AANoReturn { 4676 AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {} 4677 4678 /// See AbstractAttribute::initialize(...). 4679 void initialize(Attributor &A) override { 4680 AANoReturn::initialize(A); 4681 Function *F = getAssociatedFunction(); 4682 if (!F || F->isDeclaration()) 4683 indicatePessimisticFixpoint(); 4684 } 4685 4686 /// See AbstractAttribute::getAsStr(). 4687 const std::string getAsStr() const override { 4688 return getAssumed() ? "noreturn" : "may-return"; 4689 } 4690 4691 /// See AbstractAttribute::updateImpl(Attributor &A). 4692 virtual ChangeStatus updateImpl(Attributor &A) override { 4693 auto CheckForNoReturn = [](Instruction &) { return false; }; 4694 bool UsedAssumedInformation = false; 4695 if (!A.checkForAllInstructions(CheckForNoReturn, *this, 4696 {(unsigned)Instruction::Ret}, 4697 UsedAssumedInformation)) 4698 return indicatePessimisticFixpoint(); 4699 return ChangeStatus::UNCHANGED; 4700 } 4701 }; 4702 4703 struct AANoReturnFunction final : AANoReturnImpl { 4704 AANoReturnFunction(const IRPosition &IRP, Attributor &A) 4705 : AANoReturnImpl(IRP, A) {} 4706 4707 /// See AbstractAttribute::trackStatistics() 4708 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) } 4709 }; 4710 4711 /// NoReturn attribute deduction for a call sites. 4712 struct AANoReturnCallSite final : AANoReturnImpl { 4713 AANoReturnCallSite(const IRPosition &IRP, Attributor &A) 4714 : AANoReturnImpl(IRP, A) {} 4715 4716 /// See AbstractAttribute::initialize(...). 4717 void initialize(Attributor &A) override { 4718 AANoReturnImpl::initialize(A); 4719 if (Function *F = getAssociatedFunction()) { 4720 const IRPosition &FnPos = IRPosition::function(*F); 4721 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4722 if (!FnAA.isAssumedNoReturn()) 4723 indicatePessimisticFixpoint(); 4724 } 4725 } 4726 4727 /// See AbstractAttribute::updateImpl(...). 4728 ChangeStatus updateImpl(Attributor &A) override { 4729 // TODO: Once we have call site specific value information we can provide 4730 // call site specific liveness information and then it makes 4731 // sense to specialize attributes for call sites arguments instead of 4732 // redirecting requests to the callee argument. 4733 Function *F = getAssociatedFunction(); 4734 const IRPosition &FnPos = IRPosition::function(*F); 4735 auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED); 4736 return clampStateAndIndicateChange(getState(), FnAA.getState()); 4737 } 4738 4739 /// See AbstractAttribute::trackStatistics() 4740 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); } 4741 }; 4742 } // namespace 4743 4744 /// ----------------------- Instance Info --------------------------------- 4745 4746 namespace { 4747 /// A class to hold the state of for no-capture attributes. 4748 struct AAInstanceInfoImpl : public AAInstanceInfo { 4749 AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A) 4750 : AAInstanceInfo(IRP, A) {} 4751 4752 /// See AbstractAttribute::initialize(...). 4753 void initialize(Attributor &A) override { 4754 Value &V = getAssociatedValue(); 4755 if (auto *C = dyn_cast<Constant>(&V)) { 4756 if (C->isThreadDependent()) 4757 indicatePessimisticFixpoint(); 4758 else 4759 indicateOptimisticFixpoint(); 4760 return; 4761 } 4762 if (auto *CB = dyn_cast<CallBase>(&V)) 4763 if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() && 4764 !CB->mayReadFromMemory()) { 4765 indicateOptimisticFixpoint(); 4766 return; 4767 } 4768 } 4769 4770 /// See AbstractAttribute::updateImpl(...). 4771 ChangeStatus updateImpl(Attributor &A) override { 4772 ChangeStatus Changed = ChangeStatus::UNCHANGED; 4773 4774 Value &V = getAssociatedValue(); 4775 const Function *Scope = nullptr; 4776 if (auto *I = dyn_cast<Instruction>(&V)) 4777 Scope = I->getFunction(); 4778 if (auto *A = dyn_cast<Argument>(&V)) { 4779 Scope = A->getParent(); 4780 if (!Scope->hasLocalLinkage()) 4781 return Changed; 4782 } 4783 if (!Scope) 4784 return indicateOptimisticFixpoint(); 4785 4786 auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 4787 *this, IRPosition::function(*Scope), DepClassTy::OPTIONAL); 4788 if (NoRecurseAA.isAssumedNoRecurse()) 4789 return Changed; 4790 4791 auto UsePred = [&](const Use &U, bool &Follow) { 4792 const Instruction *UserI = dyn_cast<Instruction>(U.getUser()); 4793 if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) || 4794 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 4795 Follow = true; 4796 return true; 4797 } 4798 if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) || 4799 (isa<StoreInst>(UserI) && 4800 cast<StoreInst>(UserI)->getValueOperand() != U.get())) 4801 return true; 4802 if (auto *CB = dyn_cast<CallBase>(UserI)) { 4803 // This check is not guaranteeing uniqueness but for now that we cannot 4804 // end up with two versions of \p U thinking it was one. 4805 if (!CB->getCalledFunction() || 4806 !CB->getCalledFunction()->hasLocalLinkage()) 4807 return true; 4808 if (!CB->isArgOperand(&U)) 4809 return false; 4810 const auto &ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>( 4811 *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)), 4812 DepClassTy::OPTIONAL); 4813 if (!ArgInstanceInfoAA.isAssumedUniqueForAnalysis()) 4814 return false; 4815 // If this call base might reach the scope again we might forward the 4816 // argument back here. This is very conservative. 4817 if (AA::isPotentiallyReachable(A, *CB, *Scope, *this, nullptr)) 4818 return false; 4819 return true; 4820 } 4821 return false; 4822 }; 4823 4824 auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) { 4825 if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) { 4826 auto *Ptr = SI->getPointerOperand()->stripPointerCasts(); 4827 if (isa<AllocaInst>(Ptr) && AA::isDynamicallyUnique(A, *this, *Ptr)) 4828 return true; 4829 auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction( 4830 *SI->getFunction()); 4831 if (isAllocationFn(Ptr, TLI) && AA::isDynamicallyUnique(A, *this, *Ptr)) 4832 return true; 4833 } 4834 return false; 4835 }; 4836 4837 if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true, 4838 DepClassTy::OPTIONAL, 4839 /* IgnoreDroppableUses */ true, EquivalentUseCB)) 4840 return indicatePessimisticFixpoint(); 4841 4842 return Changed; 4843 } 4844 4845 /// See AbstractState::getAsStr(). 4846 const std::string getAsStr() const override { 4847 return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>"; 4848 } 4849 4850 /// See AbstractAttribute::trackStatistics() 4851 void trackStatistics() const override {} 4852 }; 4853 4854 /// InstanceInfo attribute for floating values. 4855 struct AAInstanceInfoFloating : AAInstanceInfoImpl { 4856 AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A) 4857 : AAInstanceInfoImpl(IRP, A) {} 4858 }; 4859 4860 /// NoCapture attribute for function arguments. 4861 struct AAInstanceInfoArgument final : AAInstanceInfoFloating { 4862 AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A) 4863 : AAInstanceInfoFloating(IRP, A) {} 4864 }; 4865 4866 /// InstanceInfo attribute for call site arguments. 4867 struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl { 4868 AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) 4869 : AAInstanceInfoImpl(IRP, A) {} 4870 4871 /// See AbstractAttribute::updateImpl(...). 4872 ChangeStatus updateImpl(Attributor &A) override { 4873 // TODO: Once we have call site specific value information we can provide 4874 // call site specific liveness information and then it makes 4875 // sense to specialize attributes for call sites arguments instead of 4876 // redirecting requests to the callee argument. 4877 Argument *Arg = getAssociatedArgument(); 4878 if (!Arg) 4879 return indicatePessimisticFixpoint(); 4880 const IRPosition &ArgPos = IRPosition::argument(*Arg); 4881 auto &ArgAA = 4882 A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED); 4883 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 4884 } 4885 }; 4886 4887 /// InstanceInfo attribute for function return value. 4888 struct AAInstanceInfoReturned final : AAInstanceInfoImpl { 4889 AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A) 4890 : AAInstanceInfoImpl(IRP, A) { 4891 llvm_unreachable("InstanceInfo is not applicable to function returns!"); 4892 } 4893 4894 /// See AbstractAttribute::initialize(...). 4895 void initialize(Attributor &A) override { 4896 llvm_unreachable("InstanceInfo is not applicable to function returns!"); 4897 } 4898 4899 /// See AbstractAttribute::updateImpl(...). 4900 ChangeStatus updateImpl(Attributor &A) override { 4901 llvm_unreachable("InstanceInfo is not applicable to function returns!"); 4902 } 4903 }; 4904 4905 /// InstanceInfo attribute deduction for a call site return value. 4906 struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating { 4907 AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) 4908 : AAInstanceInfoFloating(IRP, A) {} 4909 }; 4910 } // namespace 4911 4912 /// ----------------------- Variable Capturing --------------------------------- 4913 4914 namespace { 4915 /// A class to hold the state of for no-capture attributes. 4916 struct AANoCaptureImpl : public AANoCapture { 4917 AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {} 4918 4919 /// See AbstractAttribute::initialize(...). 4920 void initialize(Attributor &A) override { 4921 if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) { 4922 indicateOptimisticFixpoint(); 4923 return; 4924 } 4925 Function *AnchorScope = getAnchorScope(); 4926 if (isFnInterfaceKind() && 4927 (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) { 4928 indicatePessimisticFixpoint(); 4929 return; 4930 } 4931 4932 // You cannot "capture" null in the default address space. 4933 if (isa<ConstantPointerNull>(getAssociatedValue()) && 4934 getAssociatedValue().getType()->getPointerAddressSpace() == 0) { 4935 indicateOptimisticFixpoint(); 4936 return; 4937 } 4938 4939 const Function *F = 4940 isArgumentPosition() ? getAssociatedFunction() : AnchorScope; 4941 4942 // Check what state the associated function can actually capture. 4943 if (F) 4944 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 4945 else 4946 indicatePessimisticFixpoint(); 4947 } 4948 4949 /// See AbstractAttribute::updateImpl(...). 4950 ChangeStatus updateImpl(Attributor &A) override; 4951 4952 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...). 4953 virtual void 4954 getDeducedAttributes(LLVMContext &Ctx, 4955 SmallVectorImpl<Attribute> &Attrs) const override { 4956 if (!isAssumedNoCaptureMaybeReturned()) 4957 return; 4958 4959 if (isArgumentPosition()) { 4960 if (isAssumedNoCapture()) 4961 Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture)); 4962 else if (ManifestInternal) 4963 Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned")); 4964 } 4965 } 4966 4967 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known 4968 /// depending on the ability of the function associated with \p IRP to capture 4969 /// state in memory and through "returning/throwing", respectively. 4970 static void determineFunctionCaptureCapabilities(const IRPosition &IRP, 4971 const Function &F, 4972 BitIntegerState &State) { 4973 // TODO: Once we have memory behavior attributes we should use them here. 4974 4975 // If we know we cannot communicate or write to memory, we do not care about 4976 // ptr2int anymore. 4977 if (F.onlyReadsMemory() && F.doesNotThrow() && 4978 F.getReturnType()->isVoidTy()) { 4979 State.addKnownBits(NO_CAPTURE); 4980 return; 4981 } 4982 4983 // A function cannot capture state in memory if it only reads memory, it can 4984 // however return/throw state and the state might be influenced by the 4985 // pointer value, e.g., loading from a returned pointer might reveal a bit. 4986 if (F.onlyReadsMemory()) 4987 State.addKnownBits(NOT_CAPTURED_IN_MEM); 4988 4989 // A function cannot communicate state back if it does not through 4990 // exceptions and doesn not return values. 4991 if (F.doesNotThrow() && F.getReturnType()->isVoidTy()) 4992 State.addKnownBits(NOT_CAPTURED_IN_RET); 4993 4994 // Check existing "returned" attributes. 4995 int ArgNo = IRP.getCalleeArgNo(); 4996 if (F.doesNotThrow() && ArgNo >= 0) { 4997 for (unsigned u = 0, e = F.arg_size(); u < e; ++u) 4998 if (F.hasParamAttribute(u, Attribute::Returned)) { 4999 if (u == unsigned(ArgNo)) 5000 State.removeAssumedBits(NOT_CAPTURED_IN_RET); 5001 else if (F.onlyReadsMemory()) 5002 State.addKnownBits(NO_CAPTURE); 5003 else 5004 State.addKnownBits(NOT_CAPTURED_IN_RET); 5005 break; 5006 } 5007 } 5008 } 5009 5010 /// See AbstractState::getAsStr(). 5011 const std::string getAsStr() const override { 5012 if (isKnownNoCapture()) 5013 return "known not-captured"; 5014 if (isAssumedNoCapture()) 5015 return "assumed not-captured"; 5016 if (isKnownNoCaptureMaybeReturned()) 5017 return "known not-captured-maybe-returned"; 5018 if (isAssumedNoCaptureMaybeReturned()) 5019 return "assumed not-captured-maybe-returned"; 5020 return "assumed-captured"; 5021 } 5022 5023 /// Check the use \p U and update \p State accordingly. Return true if we 5024 /// should continue to update the state. 5025 bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U, 5026 bool &Follow) { 5027 Instruction *UInst = cast<Instruction>(U.getUser()); 5028 LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in " 5029 << *UInst << "\n"); 5030 5031 // Deal with ptr2int by following uses. 5032 if (isa<PtrToIntInst>(UInst)) { 5033 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n"); 5034 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 5035 /* Return */ true); 5036 } 5037 5038 // For stores we already checked if we can follow them, if they make it 5039 // here we give up. 5040 if (isa<StoreInst>(UInst)) 5041 return isCapturedIn(State, /* Memory */ true, /* Integer */ false, 5042 /* Return */ false); 5043 5044 // Explicitly catch return instructions. 5045 if (isa<ReturnInst>(UInst)) { 5046 if (UInst->getFunction() == getAnchorScope()) 5047 return isCapturedIn(State, /* Memory */ false, /* Integer */ false, 5048 /* Return */ true); 5049 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 5050 /* Return */ true); 5051 } 5052 5053 // For now we only use special logic for call sites. However, the tracker 5054 // itself knows about a lot of other non-capturing cases already. 5055 auto *CB = dyn_cast<CallBase>(UInst); 5056 if (!CB || !CB->isArgOperand(&U)) 5057 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 5058 /* Return */ true); 5059 5060 unsigned ArgNo = CB->getArgOperandNo(&U); 5061 const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo); 5062 // If we have a abstract no-capture attribute for the argument we can use 5063 // it to justify a non-capture attribute here. This allows recursion! 5064 auto &ArgNoCaptureAA = 5065 A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED); 5066 if (ArgNoCaptureAA.isAssumedNoCapture()) 5067 return isCapturedIn(State, /* Memory */ false, /* Integer */ false, 5068 /* Return */ false); 5069 if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 5070 Follow = true; 5071 return isCapturedIn(State, /* Memory */ false, /* Integer */ false, 5072 /* Return */ false); 5073 } 5074 5075 // Lastly, we could not find a reason no-capture can be assumed so we don't. 5076 return isCapturedIn(State, /* Memory */ true, /* Integer */ true, 5077 /* Return */ true); 5078 } 5079 5080 /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and 5081 /// \p CapturedInRet, then return true if we should continue updating the 5082 /// state. 5083 static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem, 5084 bool CapturedInInt, bool CapturedInRet) { 5085 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int " 5086 << CapturedInInt << "|Ret " << CapturedInRet << "]\n"); 5087 if (CapturedInMem) 5088 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM); 5089 if (CapturedInInt) 5090 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT); 5091 if (CapturedInRet) 5092 State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET); 5093 return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED); 5094 } 5095 }; 5096 5097 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) { 5098 const IRPosition &IRP = getIRPosition(); 5099 Value *V = isArgumentPosition() ? IRP.getAssociatedArgument() 5100 : &IRP.getAssociatedValue(); 5101 if (!V) 5102 return indicatePessimisticFixpoint(); 5103 5104 const Function *F = 5105 isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope(); 5106 assert(F && "Expected a function!"); 5107 const IRPosition &FnPos = IRPosition::function(*F); 5108 5109 AANoCapture::StateType T; 5110 5111 // Readonly means we cannot capture through memory. 5112 bool IsKnown; 5113 if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) { 5114 T.addKnownBits(NOT_CAPTURED_IN_MEM); 5115 if (IsKnown) 5116 addKnownBits(NOT_CAPTURED_IN_MEM); 5117 } 5118 5119 // Make sure all returned values are different than the underlying value. 5120 // TODO: we could do this in a more sophisticated way inside 5121 // AAReturnedValues, e.g., track all values that escape through returns 5122 // directly somehow. 5123 auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) { 5124 if (!RVAA.getState().isValidState()) 5125 return false; 5126 bool SeenConstant = false; 5127 for (auto &It : RVAA.returned_values()) { 5128 if (isa<Constant>(It.first)) { 5129 if (SeenConstant) 5130 return false; 5131 SeenConstant = true; 5132 } else if (!isa<Argument>(It.first) || 5133 It.first == getAssociatedArgument()) 5134 return false; 5135 } 5136 return true; 5137 }; 5138 5139 const auto &NoUnwindAA = 5140 A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL); 5141 if (NoUnwindAA.isAssumedNoUnwind()) { 5142 bool IsVoidTy = F->getReturnType()->isVoidTy(); 5143 const AAReturnedValues *RVAA = 5144 IsVoidTy ? nullptr 5145 : &A.getAAFor<AAReturnedValues>(*this, FnPos, 5146 5147 DepClassTy::OPTIONAL); 5148 if (IsVoidTy || CheckReturnedArgs(*RVAA)) { 5149 T.addKnownBits(NOT_CAPTURED_IN_RET); 5150 if (T.isKnown(NOT_CAPTURED_IN_MEM)) 5151 return ChangeStatus::UNCHANGED; 5152 if (NoUnwindAA.isKnownNoUnwind() && 5153 (IsVoidTy || RVAA->getState().isAtFixpoint())) { 5154 addKnownBits(NOT_CAPTURED_IN_RET); 5155 if (isKnown(NOT_CAPTURED_IN_MEM)) 5156 return indicateOptimisticFixpoint(); 5157 } 5158 } 5159 } 5160 5161 auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) { 5162 const auto &DerefAA = A.getAAFor<AADereferenceable>( 5163 *this, IRPosition::value(*O), DepClassTy::OPTIONAL); 5164 return DerefAA.getAssumedDereferenceableBytes(); 5165 }; 5166 5167 auto UseCheck = [&](const Use &U, bool &Follow) -> bool { 5168 switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) { 5169 case UseCaptureKind::NO_CAPTURE: 5170 return true; 5171 case UseCaptureKind::MAY_CAPTURE: 5172 return checkUse(A, T, U, Follow); 5173 case UseCaptureKind::PASSTHROUGH: 5174 Follow = true; 5175 return true; 5176 } 5177 llvm_unreachable("Unexpected use capture kind!"); 5178 }; 5179 5180 if (!A.checkForAllUses(UseCheck, *this, *V)) 5181 return indicatePessimisticFixpoint(); 5182 5183 AANoCapture::StateType &S = getState(); 5184 auto Assumed = S.getAssumed(); 5185 S.intersectAssumedBits(T.getAssumed()); 5186 if (!isAssumedNoCaptureMaybeReturned()) 5187 return indicatePessimisticFixpoint(); 5188 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED 5189 : ChangeStatus::CHANGED; 5190 } 5191 5192 /// NoCapture attribute for function arguments. 5193 struct AANoCaptureArgument final : AANoCaptureImpl { 5194 AANoCaptureArgument(const IRPosition &IRP, Attributor &A) 5195 : AANoCaptureImpl(IRP, A) {} 5196 5197 /// See AbstractAttribute::trackStatistics() 5198 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) } 5199 }; 5200 5201 /// NoCapture attribute for call site arguments. 5202 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl { 5203 AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A) 5204 : AANoCaptureImpl(IRP, A) {} 5205 5206 /// See AbstractAttribute::initialize(...). 5207 void initialize(Attributor &A) override { 5208 if (Argument *Arg = getAssociatedArgument()) 5209 if (Arg->hasByValAttr()) 5210 indicateOptimisticFixpoint(); 5211 AANoCaptureImpl::initialize(A); 5212 } 5213 5214 /// See AbstractAttribute::updateImpl(...). 5215 ChangeStatus updateImpl(Attributor &A) override { 5216 // TODO: Once we have call site specific value information we can provide 5217 // call site specific liveness information and then it makes 5218 // sense to specialize attributes for call sites arguments instead of 5219 // redirecting requests to the callee argument. 5220 Argument *Arg = getAssociatedArgument(); 5221 if (!Arg) 5222 return indicatePessimisticFixpoint(); 5223 const IRPosition &ArgPos = IRPosition::argument(*Arg); 5224 auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED); 5225 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 5226 } 5227 5228 /// See AbstractAttribute::trackStatistics() 5229 void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)}; 5230 }; 5231 5232 /// NoCapture attribute for floating values. 5233 struct AANoCaptureFloating final : AANoCaptureImpl { 5234 AANoCaptureFloating(const IRPosition &IRP, Attributor &A) 5235 : AANoCaptureImpl(IRP, A) {} 5236 5237 /// See AbstractAttribute::trackStatistics() 5238 void trackStatistics() const override { 5239 STATS_DECLTRACK_FLOATING_ATTR(nocapture) 5240 } 5241 }; 5242 5243 /// NoCapture attribute for function return value. 5244 struct AANoCaptureReturned final : AANoCaptureImpl { 5245 AANoCaptureReturned(const IRPosition &IRP, Attributor &A) 5246 : AANoCaptureImpl(IRP, A) { 5247 llvm_unreachable("NoCapture is not applicable to function returns!"); 5248 } 5249 5250 /// See AbstractAttribute::initialize(...). 5251 void initialize(Attributor &A) override { 5252 llvm_unreachable("NoCapture is not applicable to function returns!"); 5253 } 5254 5255 /// See AbstractAttribute::updateImpl(...). 5256 ChangeStatus updateImpl(Attributor &A) override { 5257 llvm_unreachable("NoCapture is not applicable to function returns!"); 5258 } 5259 5260 /// See AbstractAttribute::trackStatistics() 5261 void trackStatistics() const override {} 5262 }; 5263 5264 /// NoCapture attribute deduction for a call site return value. 5265 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl { 5266 AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A) 5267 : AANoCaptureImpl(IRP, A) {} 5268 5269 /// See AbstractAttribute::initialize(...). 5270 void initialize(Attributor &A) override { 5271 const Function *F = getAnchorScope(); 5272 // Check what state the associated function can actually capture. 5273 determineFunctionCaptureCapabilities(getIRPosition(), *F, *this); 5274 } 5275 5276 /// See AbstractAttribute::trackStatistics() 5277 void trackStatistics() const override { 5278 STATS_DECLTRACK_CSRET_ATTR(nocapture) 5279 } 5280 }; 5281 } // namespace 5282 5283 /// ------------------ Value Simplify Attribute ---------------------------- 5284 5285 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) { 5286 // FIXME: Add a typecast support. 5287 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5288 SimplifiedAssociatedValue, Other, Ty); 5289 if (SimplifiedAssociatedValue == Optional<Value *>(nullptr)) 5290 return false; 5291 5292 LLVM_DEBUG({ 5293 if (SimplifiedAssociatedValue) 5294 dbgs() << "[ValueSimplify] is assumed to be " 5295 << **SimplifiedAssociatedValue << "\n"; 5296 else 5297 dbgs() << "[ValueSimplify] is assumed to be <none>\n"; 5298 }); 5299 return true; 5300 } 5301 5302 namespace { 5303 struct AAValueSimplifyImpl : AAValueSimplify { 5304 AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A) 5305 : AAValueSimplify(IRP, A) {} 5306 5307 /// See AbstractAttribute::initialize(...). 5308 void initialize(Attributor &A) override { 5309 if (getAssociatedValue().getType()->isVoidTy()) 5310 indicatePessimisticFixpoint(); 5311 if (A.hasSimplificationCallback(getIRPosition())) 5312 indicatePessimisticFixpoint(); 5313 } 5314 5315 /// See AbstractAttribute::getAsStr(). 5316 const std::string getAsStr() const override { 5317 LLVM_DEBUG({ 5318 dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " "; 5319 if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue) 5320 dbgs() << "SAV: " << **SimplifiedAssociatedValue << " "; 5321 }); 5322 return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple") 5323 : "not-simple"; 5324 } 5325 5326 /// See AbstractAttribute::trackStatistics() 5327 void trackStatistics() const override {} 5328 5329 /// See AAValueSimplify::getAssumedSimplifiedValue() 5330 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5331 return SimplifiedAssociatedValue; 5332 } 5333 5334 /// Ensure the return value is \p V with type \p Ty, if not possible return 5335 /// nullptr. If \p Check is true we will only verify such an operation would 5336 /// suceed and return a non-nullptr value if that is the case. No IR is 5337 /// generated or modified. 5338 static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI, 5339 bool Check) { 5340 if (auto *TypedV = AA::getWithType(V, Ty)) 5341 return TypedV; 5342 if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty)) 5343 return Check ? &V 5344 : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(&V, &Ty, 5345 "", CtxI); 5346 return nullptr; 5347 } 5348 5349 /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble. 5350 /// If \p Check is true we will only verify such an operation would suceed and 5351 /// return a non-nullptr value if that is the case. No IR is generated or 5352 /// modified. 5353 static Value *reproduceInst(Attributor &A, 5354 const AbstractAttribute &QueryingAA, 5355 Instruction &I, Type &Ty, Instruction *CtxI, 5356 bool Check, ValueToValueMapTy &VMap) { 5357 assert(CtxI && "Cannot reproduce an instruction without context!"); 5358 if (Check && (I.mayReadFromMemory() || 5359 !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr, 5360 /* TLI */ nullptr))) 5361 return nullptr; 5362 for (Value *Op : I.operands()) { 5363 Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap); 5364 if (!NewOp) { 5365 assert(Check && "Manifest of new value unexpectedly failed!"); 5366 return nullptr; 5367 } 5368 if (!Check) 5369 VMap[Op] = NewOp; 5370 } 5371 if (Check) 5372 return &I; 5373 5374 Instruction *CloneI = I.clone(); 5375 // TODO: Try to salvage debug information here. 5376 CloneI->setDebugLoc(DebugLoc()); 5377 VMap[&I] = CloneI; 5378 CloneI->insertBefore(CtxI); 5379 RemapInstruction(CloneI, VMap); 5380 return CloneI; 5381 } 5382 5383 /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble. 5384 /// If \p Check is true we will only verify such an operation would suceed and 5385 /// return a non-nullptr value if that is the case. No IR is generated or 5386 /// modified. 5387 static Value *reproduceValue(Attributor &A, 5388 const AbstractAttribute &QueryingAA, Value &V, 5389 Type &Ty, Instruction *CtxI, bool Check, 5390 ValueToValueMapTy &VMap) { 5391 if (const auto &NewV = VMap.lookup(&V)) 5392 return NewV; 5393 bool UsedAssumedInformation = false; 5394 Optional<Value *> SimpleV = A.getAssumedSimplified( 5395 V, QueryingAA, UsedAssumedInformation, AA::Interprocedural); 5396 if (!SimpleV.has_value()) 5397 return PoisonValue::get(&Ty); 5398 Value *EffectiveV = &V; 5399 if (SimpleV.value()) 5400 EffectiveV = SimpleV.value(); 5401 if (auto *C = dyn_cast<Constant>(EffectiveV)) 5402 return C; 5403 if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI), 5404 A.getInfoCache())) 5405 return ensureType(A, *EffectiveV, Ty, CtxI, Check); 5406 if (auto *I = dyn_cast<Instruction>(EffectiveV)) 5407 if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap)) 5408 return ensureType(A, *NewV, Ty, CtxI, Check); 5409 return nullptr; 5410 } 5411 5412 /// Return a value we can use as replacement for the associated one, or 5413 /// nullptr if we don't have one that makes sense. 5414 Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const { 5415 Value *NewV = SimplifiedAssociatedValue 5416 ? SimplifiedAssociatedValue.value() 5417 : UndefValue::get(getAssociatedType()); 5418 if (NewV && NewV != &getAssociatedValue()) { 5419 ValueToValueMapTy VMap; 5420 // First verify we can reprduce the value with the required type at the 5421 // context location before we actually start modifying the IR. 5422 if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI, 5423 /* CheckOnly */ true, VMap)) 5424 return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI, 5425 /* CheckOnly */ false, VMap); 5426 } 5427 return nullptr; 5428 } 5429 5430 /// Helper function for querying AAValueSimplify and updating candicate. 5431 /// \param IRP The value position we are trying to unify with SimplifiedValue 5432 bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA, 5433 const IRPosition &IRP, bool Simplify = true) { 5434 bool UsedAssumedInformation = false; 5435 Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue(); 5436 if (Simplify) 5437 QueryingValueSimplified = A.getAssumedSimplified( 5438 IRP, QueryingAA, UsedAssumedInformation, AA::Interprocedural); 5439 return unionAssumed(QueryingValueSimplified); 5440 } 5441 5442 /// Returns a candidate is found or not 5443 template <typename AAType> bool askSimplifiedValueFor(Attributor &A) { 5444 if (!getAssociatedValue().getType()->isIntegerTy()) 5445 return false; 5446 5447 // This will also pass the call base context. 5448 const auto &AA = 5449 A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE); 5450 5451 Optional<Constant *> COpt = AA.getAssumedConstant(A); 5452 5453 if (!COpt) { 5454 SimplifiedAssociatedValue = llvm::None; 5455 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5456 return true; 5457 } 5458 if (auto *C = *COpt) { 5459 SimplifiedAssociatedValue = C; 5460 A.recordDependence(AA, *this, DepClassTy::OPTIONAL); 5461 return true; 5462 } 5463 return false; 5464 } 5465 5466 bool askSimplifiedValueForOtherAAs(Attributor &A) { 5467 if (askSimplifiedValueFor<AAValueConstantRange>(A)) 5468 return true; 5469 if (askSimplifiedValueFor<AAPotentialConstantValues>(A)) 5470 return true; 5471 return false; 5472 } 5473 5474 /// See AbstractAttribute::manifest(...). 5475 ChangeStatus manifest(Attributor &A) override { 5476 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5477 for (auto &U : getAssociatedValue().uses()) { 5478 // Check if we need to adjust the insertion point to make sure the IR is 5479 // valid. 5480 Instruction *IP = dyn_cast<Instruction>(U.getUser()); 5481 if (auto *PHI = dyn_cast_or_null<PHINode>(IP)) 5482 IP = PHI->getIncomingBlock(U)->getTerminator(); 5483 if (auto *NewV = manifestReplacementValue(A, IP)) { 5484 LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() 5485 << " -> " << *NewV << " :: " << *this << "\n"); 5486 if (A.changeUseAfterManifest(U, *NewV)) 5487 Changed = ChangeStatus::CHANGED; 5488 } 5489 } 5490 5491 return Changed | AAValueSimplify::manifest(A); 5492 } 5493 5494 /// See AbstractState::indicatePessimisticFixpoint(...). 5495 ChangeStatus indicatePessimisticFixpoint() override { 5496 SimplifiedAssociatedValue = &getAssociatedValue(); 5497 return AAValueSimplify::indicatePessimisticFixpoint(); 5498 } 5499 }; 5500 5501 struct AAValueSimplifyArgument final : AAValueSimplifyImpl { 5502 AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A) 5503 : AAValueSimplifyImpl(IRP, A) {} 5504 5505 void initialize(Attributor &A) override { 5506 AAValueSimplifyImpl::initialize(A); 5507 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) 5508 indicatePessimisticFixpoint(); 5509 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated, 5510 Attribute::StructRet, Attribute::Nest, Attribute::ByVal}, 5511 /* IgnoreSubsumingPositions */ true)) 5512 indicatePessimisticFixpoint(); 5513 } 5514 5515 /// See AbstractAttribute::updateImpl(...). 5516 ChangeStatus updateImpl(Attributor &A) override { 5517 // Byval is only replacable if it is readonly otherwise we would write into 5518 // the replaced value and not the copy that byval creates implicitly. 5519 Argument *Arg = getAssociatedArgument(); 5520 if (Arg->hasByValAttr()) { 5521 // TODO: We probably need to verify synchronization is not an issue, e.g., 5522 // there is no race by not copying a constant byval. 5523 bool IsKnown; 5524 if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown)) 5525 return indicatePessimisticFixpoint(); 5526 } 5527 5528 auto Before = SimplifiedAssociatedValue; 5529 5530 auto PredForCallSite = [&](AbstractCallSite ACS) { 5531 const IRPosition &ACSArgPos = 5532 IRPosition::callsite_argument(ACS, getCallSiteArgNo()); 5533 // Check if a coresponding argument was found or if it is on not 5534 // associated (which can happen for callback calls). 5535 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 5536 return false; 5537 5538 // Simplify the argument operand explicitly and check if the result is 5539 // valid in the current scope. This avoids refering to simplified values 5540 // in other functions, e.g., we don't want to say a an argument in a 5541 // static function is actually an argument in a different function. 5542 bool UsedAssumedInformation = false; 5543 Optional<Constant *> SimpleArgOp = 5544 A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation); 5545 if (!SimpleArgOp) 5546 return true; 5547 if (!SimpleArgOp.value()) 5548 return false; 5549 if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp)) 5550 return false; 5551 return unionAssumed(*SimpleArgOp); 5552 }; 5553 5554 // Generate a answer specific to a call site context. 5555 bool Success; 5556 bool UsedAssumedInformation = false; 5557 if (hasCallBaseContext() && 5558 getCallBaseContext()->getCalledFunction() == Arg->getParent()) 5559 Success = PredForCallSite( 5560 AbstractCallSite(&getCallBaseContext()->getCalledOperandUse())); 5561 else 5562 Success = A.checkForAllCallSites(PredForCallSite, *this, true, 5563 UsedAssumedInformation); 5564 5565 if (!Success) 5566 if (!askSimplifiedValueForOtherAAs(A)) 5567 return indicatePessimisticFixpoint(); 5568 5569 // If a candicate was found in this update, return CHANGED. 5570 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5571 : ChangeStatus ::CHANGED; 5572 } 5573 5574 /// See AbstractAttribute::trackStatistics() 5575 void trackStatistics() const override { 5576 STATS_DECLTRACK_ARG_ATTR(value_simplify) 5577 } 5578 }; 5579 5580 struct AAValueSimplifyReturned : AAValueSimplifyImpl { 5581 AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A) 5582 : AAValueSimplifyImpl(IRP, A) {} 5583 5584 /// See AAValueSimplify::getAssumedSimplifiedValue() 5585 Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override { 5586 if (!isValidState()) 5587 return nullptr; 5588 return SimplifiedAssociatedValue; 5589 } 5590 5591 /// See AbstractAttribute::updateImpl(...). 5592 ChangeStatus updateImpl(Attributor &A) override { 5593 auto Before = SimplifiedAssociatedValue; 5594 5595 auto ReturnInstCB = [&](Instruction &I) { 5596 auto &RI = cast<ReturnInst>(I); 5597 return checkAndUpdate( 5598 A, *this, 5599 IRPosition::value(*RI.getReturnValue(), getCallBaseContext())); 5600 }; 5601 5602 bool UsedAssumedInformation = false; 5603 if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret}, 5604 UsedAssumedInformation)) 5605 if (!askSimplifiedValueForOtherAAs(A)) 5606 return indicatePessimisticFixpoint(); 5607 5608 // If a candicate was found in this update, return CHANGED. 5609 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5610 : ChangeStatus ::CHANGED; 5611 } 5612 5613 ChangeStatus manifest(Attributor &A) override { 5614 // We queried AAValueSimplify for the returned values so they will be 5615 // replaced if a simplified form was found. Nothing to do here. 5616 return ChangeStatus::UNCHANGED; 5617 } 5618 5619 /// See AbstractAttribute::trackStatistics() 5620 void trackStatistics() const override { 5621 STATS_DECLTRACK_FNRET_ATTR(value_simplify) 5622 } 5623 }; 5624 5625 struct AAValueSimplifyFloating : AAValueSimplifyImpl { 5626 AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A) 5627 : AAValueSimplifyImpl(IRP, A) {} 5628 5629 /// See AbstractAttribute::initialize(...). 5630 void initialize(Attributor &A) override { 5631 AAValueSimplifyImpl::initialize(A); 5632 Value &V = getAnchorValue(); 5633 5634 // TODO: add other stuffs 5635 if (isa<Constant>(V)) 5636 indicatePessimisticFixpoint(); 5637 } 5638 5639 /// See AbstractAttribute::updateImpl(...). 5640 ChangeStatus updateImpl(Attributor &A) override { 5641 auto Before = SimplifiedAssociatedValue; 5642 if (!askSimplifiedValueForOtherAAs(A)) 5643 return indicatePessimisticFixpoint(); 5644 5645 // If a candicate was found in this update, return CHANGED. 5646 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5647 : ChangeStatus ::CHANGED; 5648 } 5649 5650 /// See AbstractAttribute::trackStatistics() 5651 void trackStatistics() const override { 5652 STATS_DECLTRACK_FLOATING_ATTR(value_simplify) 5653 } 5654 }; 5655 5656 struct AAValueSimplifyFunction : AAValueSimplifyImpl { 5657 AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A) 5658 : AAValueSimplifyImpl(IRP, A) {} 5659 5660 /// See AbstractAttribute::initialize(...). 5661 void initialize(Attributor &A) override { 5662 SimplifiedAssociatedValue = nullptr; 5663 indicateOptimisticFixpoint(); 5664 } 5665 /// See AbstractAttribute::initialize(...). 5666 ChangeStatus updateImpl(Attributor &A) override { 5667 llvm_unreachable( 5668 "AAValueSimplify(Function|CallSite)::updateImpl will not be called"); 5669 } 5670 /// See AbstractAttribute::trackStatistics() 5671 void trackStatistics() const override { 5672 STATS_DECLTRACK_FN_ATTR(value_simplify) 5673 } 5674 }; 5675 5676 struct AAValueSimplifyCallSite : AAValueSimplifyFunction { 5677 AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A) 5678 : AAValueSimplifyFunction(IRP, A) {} 5679 /// See AbstractAttribute::trackStatistics() 5680 void trackStatistics() const override { 5681 STATS_DECLTRACK_CS_ATTR(value_simplify) 5682 } 5683 }; 5684 5685 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl { 5686 AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A) 5687 : AAValueSimplifyImpl(IRP, A) {} 5688 5689 void initialize(Attributor &A) override { 5690 AAValueSimplifyImpl::initialize(A); 5691 Function *Fn = getAssociatedFunction(); 5692 if (!Fn) { 5693 indicatePessimisticFixpoint(); 5694 return; 5695 } 5696 for (Argument &Arg : Fn->args()) { 5697 if (Arg.hasReturnedAttr()) { 5698 auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()), 5699 Arg.getArgNo()); 5700 if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT && 5701 checkAndUpdate(A, *this, IRP)) 5702 indicateOptimisticFixpoint(); 5703 else 5704 indicatePessimisticFixpoint(); 5705 return; 5706 } 5707 } 5708 } 5709 5710 /// See AbstractAttribute::updateImpl(...). 5711 ChangeStatus updateImpl(Attributor &A) override { 5712 auto Before = SimplifiedAssociatedValue; 5713 auto &RetAA = A.getAAFor<AAReturnedValues>( 5714 *this, IRPosition::function(*getAssociatedFunction()), 5715 DepClassTy::REQUIRED); 5716 auto PredForReturned = 5717 [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) { 5718 bool UsedAssumedInformation = false; 5719 Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent( 5720 &RetVal, *cast<CallBase>(getCtxI()), *this, 5721 UsedAssumedInformation); 5722 SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice( 5723 SimplifiedAssociatedValue, CSRetVal, getAssociatedType()); 5724 return SimplifiedAssociatedValue != Optional<Value *>(nullptr); 5725 }; 5726 if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned)) 5727 if (!askSimplifiedValueForOtherAAs(A)) 5728 return indicatePessimisticFixpoint(); 5729 return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED 5730 : ChangeStatus ::CHANGED; 5731 } 5732 5733 void trackStatistics() const override { 5734 STATS_DECLTRACK_CSRET_ATTR(value_simplify) 5735 } 5736 }; 5737 5738 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating { 5739 AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A) 5740 : AAValueSimplifyFloating(IRP, A) {} 5741 5742 /// See AbstractAttribute::manifest(...). 5743 ChangeStatus manifest(Attributor &A) override { 5744 ChangeStatus Changed = ChangeStatus::UNCHANGED; 5745 // TODO: We should avoid simplification duplication to begin with. 5746 auto *FloatAA = A.lookupAAFor<AAValueSimplify>( 5747 IRPosition::value(getAssociatedValue()), this, DepClassTy::NONE); 5748 if (FloatAA && FloatAA->getState().isValidState()) 5749 return Changed; 5750 5751 if (auto *NewV = manifestReplacementValue(A, getCtxI())) { 5752 Use &U = cast<CallBase>(&getAnchorValue()) 5753 ->getArgOperandUse(getCallSiteArgNo()); 5754 if (A.changeUseAfterManifest(U, *NewV)) 5755 Changed = ChangeStatus::CHANGED; 5756 } 5757 5758 return Changed | AAValueSimplify::manifest(A); 5759 } 5760 5761 void trackStatistics() const override { 5762 STATS_DECLTRACK_CSARG_ATTR(value_simplify) 5763 } 5764 }; 5765 } // namespace 5766 5767 /// ----------------------- Heap-To-Stack Conversion --------------------------- 5768 namespace { 5769 struct AAHeapToStackFunction final : public AAHeapToStack { 5770 5771 struct AllocationInfo { 5772 /// The call that allocates the memory. 5773 CallBase *const CB; 5774 5775 /// The library function id for the allocation. 5776 LibFunc LibraryFunctionId = NotLibFunc; 5777 5778 /// The status wrt. a rewrite. 5779 enum { 5780 STACK_DUE_TO_USE, 5781 STACK_DUE_TO_FREE, 5782 INVALID, 5783 } Status = STACK_DUE_TO_USE; 5784 5785 /// Flag to indicate if we encountered a use that might free this allocation 5786 /// but which is not in the deallocation infos. 5787 bool HasPotentiallyFreeingUnknownUses = false; 5788 5789 /// Flag to indicate that we should place the new alloca in the function 5790 /// entry block rather than where the call site (CB) is. 5791 bool MoveAllocaIntoEntry = true; 5792 5793 /// The set of free calls that use this allocation. 5794 SmallSetVector<CallBase *, 1> PotentialFreeCalls{}; 5795 }; 5796 5797 struct DeallocationInfo { 5798 /// The call that deallocates the memory. 5799 CallBase *const CB; 5800 5801 /// Flag to indicate if we don't know all objects this deallocation might 5802 /// free. 5803 bool MightFreeUnknownObjects = false; 5804 5805 /// The set of allocation calls that are potentially freed. 5806 SmallSetVector<CallBase *, 1> PotentialAllocationCalls{}; 5807 }; 5808 5809 AAHeapToStackFunction(const IRPosition &IRP, Attributor &A) 5810 : AAHeapToStack(IRP, A) {} 5811 5812 ~AAHeapToStackFunction() { 5813 // Ensure we call the destructor so we release any memory allocated in the 5814 // sets. 5815 for (auto &It : AllocationInfos) 5816 It.second->~AllocationInfo(); 5817 for (auto &It : DeallocationInfos) 5818 It.second->~DeallocationInfo(); 5819 } 5820 5821 void initialize(Attributor &A) override { 5822 AAHeapToStack::initialize(A); 5823 5824 const Function *F = getAnchorScope(); 5825 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5826 5827 auto AllocationIdentifierCB = [&](Instruction &I) { 5828 CallBase *CB = dyn_cast<CallBase>(&I); 5829 if (!CB) 5830 return true; 5831 if (isFreeCall(CB, TLI)) { 5832 DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB}; 5833 return true; 5834 } 5835 // To do heap to stack, we need to know that the allocation itself is 5836 // removable once uses are rewritten, and that we can initialize the 5837 // alloca to the same pattern as the original allocation result. 5838 if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) { 5839 auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext()); 5840 if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) { 5841 AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB}; 5842 AllocationInfos[CB] = AI; 5843 if (TLI) 5844 TLI->getLibFunc(*CB, AI->LibraryFunctionId); 5845 } 5846 } 5847 return true; 5848 }; 5849 5850 bool UsedAssumedInformation = false; 5851 bool Success = A.checkForAllCallLikeInstructions( 5852 AllocationIdentifierCB, *this, UsedAssumedInformation, 5853 /* CheckBBLivenessOnly */ false, 5854 /* CheckPotentiallyDead */ true); 5855 (void)Success; 5856 assert(Success && "Did not expect the call base visit callback to fail!"); 5857 5858 Attributor::SimplifictionCallbackTy SCB = 5859 [](const IRPosition &, const AbstractAttribute *, 5860 bool &) -> Optional<Value *> { return nullptr; }; 5861 for (const auto &It : AllocationInfos) 5862 A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first), 5863 SCB); 5864 for (const auto &It : DeallocationInfos) 5865 A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first), 5866 SCB); 5867 } 5868 5869 const std::string getAsStr() const override { 5870 unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0; 5871 for (const auto &It : AllocationInfos) { 5872 if (It.second->Status == AllocationInfo::INVALID) 5873 ++NumInvalidMallocs; 5874 else 5875 ++NumH2SMallocs; 5876 } 5877 return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" + 5878 std::to_string(NumInvalidMallocs); 5879 } 5880 5881 /// See AbstractAttribute::trackStatistics(). 5882 void trackStatistics() const override { 5883 STATS_DECL( 5884 MallocCalls, Function, 5885 "Number of malloc/calloc/aligned_alloc calls converted to allocas"); 5886 for (auto &It : AllocationInfos) 5887 if (It.second->Status != AllocationInfo::INVALID) 5888 ++BUILD_STAT_NAME(MallocCalls, Function); 5889 } 5890 5891 bool isAssumedHeapToStack(const CallBase &CB) const override { 5892 if (isValidState()) 5893 if (AllocationInfo *AI = 5894 AllocationInfos.lookup(const_cast<CallBase *>(&CB))) 5895 return AI->Status != AllocationInfo::INVALID; 5896 return false; 5897 } 5898 5899 bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override { 5900 if (!isValidState()) 5901 return false; 5902 5903 for (auto &It : AllocationInfos) { 5904 AllocationInfo &AI = *It.second; 5905 if (AI.Status == AllocationInfo::INVALID) 5906 continue; 5907 5908 if (AI.PotentialFreeCalls.count(&CB)) 5909 return true; 5910 } 5911 5912 return false; 5913 } 5914 5915 ChangeStatus manifest(Attributor &A) override { 5916 assert(getState().isValidState() && 5917 "Attempted to manifest an invalid state!"); 5918 5919 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 5920 Function *F = getAnchorScope(); 5921 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 5922 5923 for (auto &It : AllocationInfos) { 5924 AllocationInfo &AI = *It.second; 5925 if (AI.Status == AllocationInfo::INVALID) 5926 continue; 5927 5928 for (CallBase *FreeCall : AI.PotentialFreeCalls) { 5929 LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n"); 5930 A.deleteAfterManifest(*FreeCall); 5931 HasChanged = ChangeStatus::CHANGED; 5932 } 5933 5934 LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB 5935 << "\n"); 5936 5937 auto Remark = [&](OptimizationRemark OR) { 5938 LibFunc IsAllocShared; 5939 if (TLI->getLibFunc(*AI.CB, IsAllocShared)) 5940 if (IsAllocShared == LibFunc___kmpc_alloc_shared) 5941 return OR << "Moving globalized variable to the stack."; 5942 return OR << "Moving memory allocation from the heap to the stack."; 5943 }; 5944 if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 5945 A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark); 5946 else 5947 A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark); 5948 5949 const DataLayout &DL = A.getInfoCache().getDL(); 5950 Value *Size; 5951 Optional<APInt> SizeAPI = getSize(A, *this, AI); 5952 if (SizeAPI) { 5953 Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI); 5954 } else { 5955 LLVMContext &Ctx = AI.CB->getContext(); 5956 ObjectSizeOpts Opts; 5957 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts); 5958 SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB); 5959 assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() && 5960 cast<ConstantInt>(SizeOffsetPair.second)->isZero()); 5961 Size = SizeOffsetPair.first; 5962 } 5963 5964 Instruction *IP = 5965 AI.MoveAllocaIntoEntry ? &F->getEntryBlock().front() : AI.CB; 5966 5967 Align Alignment(1); 5968 if (MaybeAlign RetAlign = AI.CB->getRetAlign()) 5969 Alignment = std::max(Alignment, *RetAlign); 5970 if (Value *Align = getAllocAlignment(AI.CB, TLI)) { 5971 Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align); 5972 assert(AlignmentAPI && AlignmentAPI.value().getZExtValue() > 0 && 5973 "Expected an alignment during manifest!"); 5974 Alignment = std::max( 5975 Alignment, assumeAligned(AlignmentAPI.value().getZExtValue())); 5976 } 5977 5978 // TODO: Hoist the alloca towards the function entry. 5979 unsigned AS = DL.getAllocaAddrSpace(); 5980 Instruction *Alloca = 5981 new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment, 5982 AI.CB->getName() + ".h2s", IP); 5983 5984 if (Alloca->getType() != AI.CB->getType()) 5985 Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 5986 Alloca, AI.CB->getType(), "malloc_cast", AI.CB); 5987 5988 auto *I8Ty = Type::getInt8Ty(F->getContext()); 5989 auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty); 5990 assert(InitVal && 5991 "Must be able to materialize initial memory state of allocation"); 5992 5993 A.changeAfterManifest(IRPosition::inst(*AI.CB), *Alloca); 5994 5995 if (auto *II = dyn_cast<InvokeInst>(AI.CB)) { 5996 auto *NBB = II->getNormalDest(); 5997 BranchInst::Create(NBB, AI.CB->getParent()); 5998 A.deleteAfterManifest(*AI.CB); 5999 } else { 6000 A.deleteAfterManifest(*AI.CB); 6001 } 6002 6003 // Initialize the alloca with the same value as used by the allocation 6004 // function. We can skip undef as the initial value of an alloc is 6005 // undef, and the memset would simply end up being DSEd. 6006 if (!isa<UndefValue>(InitVal)) { 6007 IRBuilder<> Builder(Alloca->getNextNode()); 6008 // TODO: Use alignment above if align!=1 6009 Builder.CreateMemSet(Alloca, InitVal, Size, None); 6010 } 6011 HasChanged = ChangeStatus::CHANGED; 6012 } 6013 6014 return HasChanged; 6015 } 6016 6017 Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA, 6018 Value &V) { 6019 bool UsedAssumedInformation = false; 6020 Optional<Constant *> SimpleV = 6021 A.getAssumedConstant(V, AA, UsedAssumedInformation); 6022 if (!SimpleV) 6023 return APInt(64, 0); 6024 if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.value())) 6025 return CI->getValue(); 6026 return llvm::None; 6027 } 6028 6029 Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA, 6030 AllocationInfo &AI) { 6031 auto Mapper = [&](const Value *V) -> const Value * { 6032 bool UsedAssumedInformation = false; 6033 if (Optional<Constant *> SimpleV = 6034 A.getAssumedConstant(*V, AA, UsedAssumedInformation)) 6035 if (*SimpleV) 6036 return *SimpleV; 6037 return V; 6038 }; 6039 6040 const Function *F = getAnchorScope(); 6041 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 6042 return getAllocSize(AI.CB, TLI, Mapper); 6043 } 6044 6045 /// Collection of all malloc-like calls in a function with associated 6046 /// information. 6047 MapVector<CallBase *, AllocationInfo *> AllocationInfos; 6048 6049 /// Collection of all free-like calls in a function with associated 6050 /// information. 6051 MapVector<CallBase *, DeallocationInfo *> DeallocationInfos; 6052 6053 ChangeStatus updateImpl(Attributor &A) override; 6054 }; 6055 6056 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) { 6057 ChangeStatus Changed = ChangeStatus::UNCHANGED; 6058 const Function *F = getAnchorScope(); 6059 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 6060 6061 const auto &LivenessAA = 6062 A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE); 6063 6064 MustBeExecutedContextExplorer &Explorer = 6065 A.getInfoCache().getMustBeExecutedContextExplorer(); 6066 6067 bool StackIsAccessibleByOtherThreads = 6068 A.getInfoCache().stackIsAccessibleByOtherThreads(); 6069 6070 LoopInfo *LI = 6071 A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(*F); 6072 Optional<bool> MayContainIrreducibleControl; 6073 auto IsInLoop = [&](BasicBlock &BB) { 6074 if (&F->getEntryBlock() == &BB) 6075 return false; 6076 if (!MayContainIrreducibleControl.has_value()) 6077 MayContainIrreducibleControl = mayContainIrreducibleControl(*F, LI); 6078 if (MayContainIrreducibleControl.value()) 6079 return true; 6080 if (!LI) 6081 return true; 6082 return LI->getLoopFor(&BB) != nullptr; 6083 }; 6084 6085 // Flag to ensure we update our deallocation information at most once per 6086 // updateImpl call and only if we use the free check reasoning. 6087 bool HasUpdatedFrees = false; 6088 6089 auto UpdateFrees = [&]() { 6090 HasUpdatedFrees = true; 6091 6092 for (auto &It : DeallocationInfos) { 6093 DeallocationInfo &DI = *It.second; 6094 // For now we cannot use deallocations that have unknown inputs, skip 6095 // them. 6096 if (DI.MightFreeUnknownObjects) 6097 continue; 6098 6099 // No need to analyze dead calls, ignore them instead. 6100 bool UsedAssumedInformation = false; 6101 if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation, 6102 /* CheckBBLivenessOnly */ true)) 6103 continue; 6104 6105 // Use the non-optimistic version to get the freed object. 6106 Value *Obj = getUnderlyingObject(DI.CB->getArgOperand(0)); 6107 if (!Obj) { 6108 LLVM_DEBUG(dbgs() << "[H2S] Unknown underlying object for free!\n"); 6109 DI.MightFreeUnknownObjects = true; 6110 continue; 6111 } 6112 6113 // Free of null and undef can be ignored as no-ops (or UB in the latter 6114 // case). 6115 if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj)) 6116 continue; 6117 6118 CallBase *ObjCB = dyn_cast<CallBase>(Obj); 6119 if (!ObjCB) { 6120 LLVM_DEBUG(dbgs() << "[H2S] Free of a non-call object: " << *Obj 6121 << "\n"); 6122 DI.MightFreeUnknownObjects = true; 6123 continue; 6124 } 6125 6126 AllocationInfo *AI = AllocationInfos.lookup(ObjCB); 6127 if (!AI) { 6128 LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj 6129 << "\n"); 6130 DI.MightFreeUnknownObjects = true; 6131 continue; 6132 } 6133 6134 DI.PotentialAllocationCalls.insert(ObjCB); 6135 } 6136 }; 6137 6138 auto FreeCheck = [&](AllocationInfo &AI) { 6139 // If the stack is not accessible by other threads, the "must-free" logic 6140 // doesn't apply as the pointer could be shared and needs to be places in 6141 // "shareable" memory. 6142 if (!StackIsAccessibleByOtherThreads) { 6143 auto &NoSyncAA = 6144 A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL); 6145 if (!NoSyncAA.isAssumedNoSync()) { 6146 LLVM_DEBUG( 6147 dbgs() << "[H2S] found an escaping use, stack is not accessible by " 6148 "other threads and function is not nosync:\n"); 6149 return false; 6150 } 6151 } 6152 if (!HasUpdatedFrees) 6153 UpdateFrees(); 6154 6155 // TODO: Allow multi exit functions that have different free calls. 6156 if (AI.PotentialFreeCalls.size() != 1) { 6157 LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but " 6158 << AI.PotentialFreeCalls.size() << "\n"); 6159 return false; 6160 } 6161 CallBase *UniqueFree = *AI.PotentialFreeCalls.begin(); 6162 DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree); 6163 if (!DI) { 6164 LLVM_DEBUG( 6165 dbgs() << "[H2S] unique free call was not known as deallocation call " 6166 << *UniqueFree << "\n"); 6167 return false; 6168 } 6169 if (DI->MightFreeUnknownObjects) { 6170 LLVM_DEBUG( 6171 dbgs() << "[H2S] unique free call might free unknown allocations\n"); 6172 return false; 6173 } 6174 if (DI->PotentialAllocationCalls.empty()) 6175 return true; 6176 if (DI->PotentialAllocationCalls.size() > 1) { 6177 LLVM_DEBUG(dbgs() << "[H2S] unique free call might free " 6178 << DI->PotentialAllocationCalls.size() 6179 << " different allocations\n"); 6180 return false; 6181 } 6182 if (*DI->PotentialAllocationCalls.begin() != AI.CB) { 6183 LLVM_DEBUG( 6184 dbgs() 6185 << "[H2S] unique free call not known to free this allocation but " 6186 << **DI->PotentialAllocationCalls.begin() << "\n"); 6187 return false; 6188 } 6189 Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode(); 6190 if (!Explorer.findInContextOf(UniqueFree, CtxI)) { 6191 LLVM_DEBUG( 6192 dbgs() 6193 << "[H2S] unique free call might not be executed with the allocation " 6194 << *UniqueFree << "\n"); 6195 return false; 6196 } 6197 return true; 6198 }; 6199 6200 auto UsesCheck = [&](AllocationInfo &AI) { 6201 bool ValidUsesOnly = true; 6202 6203 auto Pred = [&](const Use &U, bool &Follow) -> bool { 6204 Instruction *UserI = cast<Instruction>(U.getUser()); 6205 if (isa<LoadInst>(UserI)) 6206 return true; 6207 if (auto *SI = dyn_cast<StoreInst>(UserI)) { 6208 if (SI->getValueOperand() == U.get()) { 6209 LLVM_DEBUG(dbgs() 6210 << "[H2S] escaping store to memory: " << *UserI << "\n"); 6211 ValidUsesOnly = false; 6212 } else { 6213 // A store into the malloc'ed memory is fine. 6214 } 6215 return true; 6216 } 6217 if (auto *CB = dyn_cast<CallBase>(UserI)) { 6218 if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd()) 6219 return true; 6220 if (DeallocationInfos.count(CB)) { 6221 AI.PotentialFreeCalls.insert(CB); 6222 return true; 6223 } 6224 6225 unsigned ArgNo = CB->getArgOperandNo(&U); 6226 6227 const auto &NoCaptureAA = A.getAAFor<AANoCapture>( 6228 *this, IRPosition::callsite_argument(*CB, ArgNo), 6229 DepClassTy::OPTIONAL); 6230 6231 // If a call site argument use is nofree, we are fine. 6232 const auto &ArgNoFreeAA = A.getAAFor<AANoFree>( 6233 *this, IRPosition::callsite_argument(*CB, ArgNo), 6234 DepClassTy::OPTIONAL); 6235 6236 bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture(); 6237 bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree(); 6238 if (MaybeCaptured || 6239 (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared && 6240 MaybeFreed)) { 6241 AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed; 6242 6243 // Emit a missed remark if this is missed OpenMP globalization. 6244 auto Remark = [&](OptimizationRemarkMissed ORM) { 6245 return ORM 6246 << "Could not move globalized variable to the stack. " 6247 "Variable is potentially captured in call. Mark " 6248 "parameter as `__attribute__((noescape))` to override."; 6249 }; 6250 6251 if (ValidUsesOnly && 6252 AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared) 6253 A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark); 6254 6255 LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n"); 6256 ValidUsesOnly = false; 6257 } 6258 return true; 6259 } 6260 6261 if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) || 6262 isa<PHINode>(UserI) || isa<SelectInst>(UserI)) { 6263 Follow = true; 6264 return true; 6265 } 6266 // Unknown user for which we can not track uses further (in a way that 6267 // makes sense). 6268 LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n"); 6269 ValidUsesOnly = false; 6270 return true; 6271 }; 6272 if (!A.checkForAllUses(Pred, *this, *AI.CB)) 6273 return false; 6274 return ValidUsesOnly; 6275 }; 6276 6277 // The actual update starts here. We look at all allocations and depending on 6278 // their status perform the appropriate check(s). 6279 for (auto &It : AllocationInfos) { 6280 AllocationInfo &AI = *It.second; 6281 if (AI.Status == AllocationInfo::INVALID) 6282 continue; 6283 6284 if (Value *Align = getAllocAlignment(AI.CB, TLI)) { 6285 Optional<APInt> APAlign = getAPInt(A, *this, *Align); 6286 if (!APAlign) { 6287 // Can't generate an alloca which respects the required alignment 6288 // on the allocation. 6289 LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB 6290 << "\n"); 6291 AI.Status = AllocationInfo::INVALID; 6292 Changed = ChangeStatus::CHANGED; 6293 continue; 6294 } 6295 if (APAlign->ugt(llvm::Value::MaximumAlignment) || 6296 !APAlign->isPowerOf2()) { 6297 LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign 6298 << "\n"); 6299 AI.Status = AllocationInfo::INVALID; 6300 Changed = ChangeStatus::CHANGED; 6301 continue; 6302 } 6303 } 6304 6305 Optional<APInt> Size = getSize(A, *this, AI); 6306 if (MaxHeapToStackSize != -1) { 6307 if (!Size || Size.value().ugt(MaxHeapToStackSize)) { 6308 LLVM_DEBUG({ 6309 if (!Size) 6310 dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n"; 6311 else 6312 dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. " 6313 << MaxHeapToStackSize << "\n"; 6314 }); 6315 6316 AI.Status = AllocationInfo::INVALID; 6317 Changed = ChangeStatus::CHANGED; 6318 continue; 6319 } 6320 } 6321 6322 switch (AI.Status) { 6323 case AllocationInfo::STACK_DUE_TO_USE: 6324 if (UsesCheck(AI)) 6325 break; 6326 AI.Status = AllocationInfo::STACK_DUE_TO_FREE; 6327 LLVM_FALLTHROUGH; 6328 case AllocationInfo::STACK_DUE_TO_FREE: 6329 if (FreeCheck(AI)) 6330 break; 6331 AI.Status = AllocationInfo::INVALID; 6332 Changed = ChangeStatus::CHANGED; 6333 break; 6334 case AllocationInfo::INVALID: 6335 llvm_unreachable("Invalid allocations should never reach this point!"); 6336 }; 6337 6338 // Check if we still think we can move it into the entry block. 6339 if (AI.MoveAllocaIntoEntry && 6340 (!Size.has_value() || IsInLoop(*AI.CB->getParent()))) 6341 AI.MoveAllocaIntoEntry = false; 6342 } 6343 6344 return Changed; 6345 } 6346 } // namespace 6347 6348 /// ----------------------- Privatizable Pointers ------------------------------ 6349 namespace { 6350 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr { 6351 AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A) 6352 : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {} 6353 6354 ChangeStatus indicatePessimisticFixpoint() override { 6355 AAPrivatizablePtr::indicatePessimisticFixpoint(); 6356 PrivatizableType = nullptr; 6357 return ChangeStatus::CHANGED; 6358 } 6359 6360 /// Identify the type we can chose for a private copy of the underlying 6361 /// argument. None means it is not clear yet, nullptr means there is none. 6362 virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0; 6363 6364 /// Return a privatizable type that encloses both T0 and T1. 6365 /// TODO: This is merely a stub for now as we should manage a mapping as well. 6366 Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) { 6367 if (!T0) 6368 return T1; 6369 if (!T1) 6370 return T0; 6371 if (T0 == T1) 6372 return T0; 6373 return nullptr; 6374 } 6375 6376 Optional<Type *> getPrivatizableType() const override { 6377 return PrivatizableType; 6378 } 6379 6380 const std::string getAsStr() const override { 6381 return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]"; 6382 } 6383 6384 protected: 6385 Optional<Type *> PrivatizableType; 6386 }; 6387 6388 // TODO: Do this for call site arguments (probably also other values) as well. 6389 6390 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl { 6391 AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A) 6392 : AAPrivatizablePtrImpl(IRP, A) {} 6393 6394 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6395 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6396 // If this is a byval argument and we know all the call sites (so we can 6397 // rewrite them), there is no need to check them explicitly. 6398 bool UsedAssumedInformation = false; 6399 SmallVector<Attribute, 1> Attrs; 6400 getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true); 6401 if (!Attrs.empty() && 6402 A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this, 6403 true, UsedAssumedInformation)) 6404 return Attrs[0].getValueAsType(); 6405 6406 Optional<Type *> Ty; 6407 unsigned ArgNo = getIRPosition().getCallSiteArgNo(); 6408 6409 // Make sure the associated call site argument has the same type at all call 6410 // sites and it is an allocation we know is safe to privatize, for now that 6411 // means we only allow alloca instructions. 6412 // TODO: We can additionally analyze the accesses in the callee to create 6413 // the type from that information instead. That is a little more 6414 // involved and will be done in a follow up patch. 6415 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6416 IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo); 6417 // Check if a coresponding argument was found or if it is one not 6418 // associated (which can happen for callback calls). 6419 if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID) 6420 return false; 6421 6422 // Check that all call sites agree on a type. 6423 auto &PrivCSArgAA = 6424 A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED); 6425 Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType(); 6426 6427 LLVM_DEBUG({ 6428 dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: "; 6429 if (CSTy && CSTy.value()) 6430 CSTy.value()->print(dbgs()); 6431 else if (CSTy) 6432 dbgs() << "<nullptr>"; 6433 else 6434 dbgs() << "<none>"; 6435 }); 6436 6437 Ty = combineTypes(Ty, CSTy); 6438 6439 LLVM_DEBUG({ 6440 dbgs() << " : New Type: "; 6441 if (Ty && Ty.value()) 6442 Ty.value()->print(dbgs()); 6443 else if (Ty) 6444 dbgs() << "<nullptr>"; 6445 else 6446 dbgs() << "<none>"; 6447 dbgs() << "\n"; 6448 }); 6449 6450 return !Ty || Ty.value(); 6451 }; 6452 6453 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, 6454 UsedAssumedInformation)) 6455 return nullptr; 6456 return Ty; 6457 } 6458 6459 /// See AbstractAttribute::updateImpl(...). 6460 ChangeStatus updateImpl(Attributor &A) override { 6461 PrivatizableType = identifyPrivatizableType(A); 6462 if (!PrivatizableType) 6463 return ChangeStatus::UNCHANGED; 6464 if (!PrivatizableType.value()) 6465 return indicatePessimisticFixpoint(); 6466 6467 // The dependence is optional so we don't give up once we give up on the 6468 // alignment. 6469 A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()), 6470 DepClassTy::OPTIONAL); 6471 6472 // Avoid arguments with padding for now. 6473 if (!getIRPosition().hasAttr(Attribute::ByVal) && 6474 !isDenselyPacked(*PrivatizableType, A.getInfoCache().getDL())) { 6475 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n"); 6476 return indicatePessimisticFixpoint(); 6477 } 6478 6479 // Collect the types that will replace the privatizable type in the function 6480 // signature. 6481 SmallVector<Type *, 16> ReplacementTypes; 6482 identifyReplacementTypes(*PrivatizableType, ReplacementTypes); 6483 6484 // Verify callee and caller agree on how the promoted argument would be 6485 // passed. 6486 Function &Fn = *getIRPosition().getAnchorScope(); 6487 const auto *TTI = 6488 A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn); 6489 if (!TTI) { 6490 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function " 6491 << Fn.getName() << "\n"); 6492 return indicatePessimisticFixpoint(); 6493 } 6494 6495 auto CallSiteCheck = [&](AbstractCallSite ACS) { 6496 CallBase *CB = ACS.getInstruction(); 6497 return TTI->areTypesABICompatible( 6498 CB->getCaller(), CB->getCalledFunction(), ReplacementTypes); 6499 }; 6500 bool UsedAssumedInformation = false; 6501 if (!A.checkForAllCallSites(CallSiteCheck, *this, true, 6502 UsedAssumedInformation)) { 6503 LLVM_DEBUG( 6504 dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for " 6505 << Fn.getName() << "\n"); 6506 return indicatePessimisticFixpoint(); 6507 } 6508 6509 // Register a rewrite of the argument. 6510 Argument *Arg = getAssociatedArgument(); 6511 if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) { 6512 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n"); 6513 return indicatePessimisticFixpoint(); 6514 } 6515 6516 unsigned ArgNo = Arg->getArgNo(); 6517 6518 // Helper to check if for the given call site the associated argument is 6519 // passed to a callback where the privatization would be different. 6520 auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) { 6521 SmallVector<const Use *, 4> CallbackUses; 6522 AbstractCallSite::getCallbackUses(CB, CallbackUses); 6523 for (const Use *U : CallbackUses) { 6524 AbstractCallSite CBACS(U); 6525 assert(CBACS && CBACS.isCallbackCall()); 6526 for (Argument &CBArg : CBACS.getCalledFunction()->args()) { 6527 int CBArgNo = CBACS.getCallArgOperandNo(CBArg); 6528 6529 LLVM_DEBUG({ 6530 dbgs() 6531 << "[AAPrivatizablePtr] Argument " << *Arg 6532 << "check if can be privatized in the context of its parent (" 6533 << Arg->getParent()->getName() 6534 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6535 "callback (" 6536 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6537 << ")\n[AAPrivatizablePtr] " << CBArg << " : " 6538 << CBACS.getCallArgOperand(CBArg) << " vs " 6539 << CB.getArgOperand(ArgNo) << "\n" 6540 << "[AAPrivatizablePtr] " << CBArg << " : " 6541 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n"; 6542 }); 6543 6544 if (CBArgNo != int(ArgNo)) 6545 continue; 6546 const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6547 *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED); 6548 if (CBArgPrivAA.isValidState()) { 6549 auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType(); 6550 if (!CBArgPrivTy) 6551 continue; 6552 if (CBArgPrivTy.value() == PrivatizableType) 6553 continue; 6554 } 6555 6556 LLVM_DEBUG({ 6557 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6558 << " cannot be privatized in the context of its parent (" 6559 << Arg->getParent()->getName() 6560 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6561 "callback (" 6562 << CBArgNo << "@" << CBACS.getCalledFunction()->getName() 6563 << ").\n[AAPrivatizablePtr] for which the argument " 6564 "privatization is not compatible.\n"; 6565 }); 6566 return false; 6567 } 6568 } 6569 return true; 6570 }; 6571 6572 // Helper to check if for the given call site the associated argument is 6573 // passed to a direct call where the privatization would be different. 6574 auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) { 6575 CallBase *DC = cast<CallBase>(ACS.getInstruction()); 6576 int DCArgNo = ACS.getCallArgOperandNo(ArgNo); 6577 assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() && 6578 "Expected a direct call operand for callback call operand"); 6579 6580 LLVM_DEBUG({ 6581 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6582 << " check if be privatized in the context of its parent (" 6583 << Arg->getParent()->getName() 6584 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6585 "direct call of (" 6586 << DCArgNo << "@" << DC->getCalledFunction()->getName() 6587 << ").\n"; 6588 }); 6589 6590 Function *DCCallee = DC->getCalledFunction(); 6591 if (unsigned(DCArgNo) < DCCallee->arg_size()) { 6592 const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>( 6593 *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)), 6594 DepClassTy::REQUIRED); 6595 if (DCArgPrivAA.isValidState()) { 6596 auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType(); 6597 if (!DCArgPrivTy) 6598 return true; 6599 if (DCArgPrivTy.value() == PrivatizableType) 6600 return true; 6601 } 6602 } 6603 6604 LLVM_DEBUG({ 6605 dbgs() << "[AAPrivatizablePtr] Argument " << *Arg 6606 << " cannot be privatized in the context of its parent (" 6607 << Arg->getParent()->getName() 6608 << ")\n[AAPrivatizablePtr] because it is an argument in a " 6609 "direct call of (" 6610 << ACS.getInstruction()->getCalledFunction()->getName() 6611 << ").\n[AAPrivatizablePtr] for which the argument " 6612 "privatization is not compatible.\n"; 6613 }); 6614 return false; 6615 }; 6616 6617 // Helper to check if the associated argument is used at the given abstract 6618 // call site in a way that is incompatible with the privatization assumed 6619 // here. 6620 auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) { 6621 if (ACS.isDirectCall()) 6622 return IsCompatiblePrivArgOfCallback(*ACS.getInstruction()); 6623 if (ACS.isCallbackCall()) 6624 return IsCompatiblePrivArgOfDirectCS(ACS); 6625 return false; 6626 }; 6627 6628 if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true, 6629 UsedAssumedInformation)) 6630 return indicatePessimisticFixpoint(); 6631 6632 return ChangeStatus::UNCHANGED; 6633 } 6634 6635 /// Given a type to private \p PrivType, collect the constituates (which are 6636 /// used) in \p ReplacementTypes. 6637 static void 6638 identifyReplacementTypes(Type *PrivType, 6639 SmallVectorImpl<Type *> &ReplacementTypes) { 6640 // TODO: For now we expand the privatization type to the fullest which can 6641 // lead to dead arguments that need to be removed later. 6642 assert(PrivType && "Expected privatizable type!"); 6643 6644 // Traverse the type, extract constituate types on the outermost level. 6645 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6646 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) 6647 ReplacementTypes.push_back(PrivStructType->getElementType(u)); 6648 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6649 ReplacementTypes.append(PrivArrayType->getNumElements(), 6650 PrivArrayType->getElementType()); 6651 } else { 6652 ReplacementTypes.push_back(PrivType); 6653 } 6654 } 6655 6656 /// Initialize \p Base according to the type \p PrivType at position \p IP. 6657 /// The values needed are taken from the arguments of \p F starting at 6658 /// position \p ArgNo. 6659 static void createInitialization(Type *PrivType, Value &Base, Function &F, 6660 unsigned ArgNo, Instruction &IP) { 6661 assert(PrivType && "Expected privatizable type!"); 6662 6663 IRBuilder<NoFolder> IRB(&IP); 6664 const DataLayout &DL = F.getParent()->getDataLayout(); 6665 6666 // Traverse the type, build GEPs and stores. 6667 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6668 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6669 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6670 Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo(); 6671 Value *Ptr = 6672 constructPointer(PointeeTy, PrivType, &Base, 6673 PrivStructLayout->getElementOffset(u), IRB, DL); 6674 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6675 } 6676 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6677 Type *PointeeTy = PrivArrayType->getElementType(); 6678 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6679 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6680 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6681 Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base, 6682 u * PointeeTySize, IRB, DL); 6683 new StoreInst(F.getArg(ArgNo + u), Ptr, &IP); 6684 } 6685 } else { 6686 new StoreInst(F.getArg(ArgNo), &Base, &IP); 6687 } 6688 } 6689 6690 /// Extract values from \p Base according to the type \p PrivType at the 6691 /// call position \p ACS. The values are appended to \p ReplacementValues. 6692 void createReplacementValues(Align Alignment, Type *PrivType, 6693 AbstractCallSite ACS, Value *Base, 6694 SmallVectorImpl<Value *> &ReplacementValues) { 6695 assert(Base && "Expected base value!"); 6696 assert(PrivType && "Expected privatizable type!"); 6697 Instruction *IP = ACS.getInstruction(); 6698 6699 IRBuilder<NoFolder> IRB(IP); 6700 const DataLayout &DL = IP->getModule()->getDataLayout(); 6701 6702 Type *PrivPtrType = PrivType->getPointerTo(); 6703 if (Base->getType() != PrivPtrType) 6704 Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 6705 Base, PrivPtrType, "", ACS.getInstruction()); 6706 6707 // Traverse the type, build GEPs and loads. 6708 if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) { 6709 const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType); 6710 for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) { 6711 Type *PointeeTy = PrivStructType->getElementType(u); 6712 Value *Ptr = 6713 constructPointer(PointeeTy->getPointerTo(), PrivType, Base, 6714 PrivStructLayout->getElementOffset(u), IRB, DL); 6715 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6716 L->setAlignment(Alignment); 6717 ReplacementValues.push_back(L); 6718 } 6719 } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) { 6720 Type *PointeeTy = PrivArrayType->getElementType(); 6721 uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy); 6722 Type *PointeePtrTy = PointeeTy->getPointerTo(); 6723 for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) { 6724 Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base, 6725 u * PointeeTySize, IRB, DL); 6726 LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); 6727 L->setAlignment(Alignment); 6728 ReplacementValues.push_back(L); 6729 } 6730 } else { 6731 LoadInst *L = new LoadInst(PrivType, Base, "", IP); 6732 L->setAlignment(Alignment); 6733 ReplacementValues.push_back(L); 6734 } 6735 } 6736 6737 /// See AbstractAttribute::manifest(...) 6738 ChangeStatus manifest(Attributor &A) override { 6739 if (!PrivatizableType) 6740 return ChangeStatus::UNCHANGED; 6741 assert(PrivatizableType.value() && "Expected privatizable type!"); 6742 6743 // Collect all tail calls in the function as we cannot allow new allocas to 6744 // escape into tail recursion. 6745 // TODO: Be smarter about new allocas escaping into tail calls. 6746 SmallVector<CallInst *, 16> TailCalls; 6747 bool UsedAssumedInformation = false; 6748 if (!A.checkForAllInstructions( 6749 [&](Instruction &I) { 6750 CallInst &CI = cast<CallInst>(I); 6751 if (CI.isTailCall()) 6752 TailCalls.push_back(&CI); 6753 return true; 6754 }, 6755 *this, {Instruction::Call}, UsedAssumedInformation)) 6756 return ChangeStatus::UNCHANGED; 6757 6758 Argument *Arg = getAssociatedArgument(); 6759 // Query AAAlign attribute for alignment of associated argument to 6760 // determine the best alignment of loads. 6761 const auto &AlignAA = 6762 A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE); 6763 6764 // Callback to repair the associated function. A new alloca is placed at the 6765 // beginning and initialized with the values passed through arguments. The 6766 // new alloca replaces the use of the old pointer argument. 6767 Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB = 6768 [=](const Attributor::ArgumentReplacementInfo &ARI, 6769 Function &ReplacementFn, Function::arg_iterator ArgIt) { 6770 BasicBlock &EntryBB = ReplacementFn.getEntryBlock(); 6771 Instruction *IP = &*EntryBB.getFirstInsertionPt(); 6772 const DataLayout &DL = IP->getModule()->getDataLayout(); 6773 unsigned AS = DL.getAllocaAddrSpace(); 6774 Instruction *AI = new AllocaInst(PrivatizableType.value(), AS, 6775 Arg->getName() + ".priv", IP); 6776 createInitialization(PrivatizableType.value(), *AI, ReplacementFn, 6777 ArgIt->getArgNo(), *IP); 6778 6779 if (AI->getType() != Arg->getType()) 6780 AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast( 6781 AI, Arg->getType(), "", IP); 6782 Arg->replaceAllUsesWith(AI); 6783 6784 for (CallInst *CI : TailCalls) 6785 CI->setTailCall(false); 6786 }; 6787 6788 // Callback to repair a call site of the associated function. The elements 6789 // of the privatizable type are loaded prior to the call and passed to the 6790 // new function version. 6791 Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB = 6792 [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI, 6793 AbstractCallSite ACS, 6794 SmallVectorImpl<Value *> &NewArgOperands) { 6795 // When no alignment is specified for the load instruction, 6796 // natural alignment is assumed. 6797 createReplacementValues( 6798 AlignAA.getAssumedAlign(), *PrivatizableType, ACS, 6799 ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()), 6800 NewArgOperands); 6801 }; 6802 6803 // Collect the types that will replace the privatizable type in the function 6804 // signature. 6805 SmallVector<Type *, 16> ReplacementTypes; 6806 identifyReplacementTypes(*PrivatizableType, ReplacementTypes); 6807 6808 // Register a rewrite of the argument. 6809 if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes, 6810 std::move(FnRepairCB), 6811 std::move(ACSRepairCB))) 6812 return ChangeStatus::CHANGED; 6813 return ChangeStatus::UNCHANGED; 6814 } 6815 6816 /// See AbstractAttribute::trackStatistics() 6817 void trackStatistics() const override { 6818 STATS_DECLTRACK_ARG_ATTR(privatizable_ptr); 6819 } 6820 }; 6821 6822 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl { 6823 AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A) 6824 : AAPrivatizablePtrImpl(IRP, A) {} 6825 6826 /// See AbstractAttribute::initialize(...). 6827 virtual void initialize(Attributor &A) override { 6828 // TODO: We can privatize more than arguments. 6829 indicatePessimisticFixpoint(); 6830 } 6831 6832 ChangeStatus updateImpl(Attributor &A) override { 6833 llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::" 6834 "updateImpl will not be called"); 6835 } 6836 6837 /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...) 6838 Optional<Type *> identifyPrivatizableType(Attributor &A) override { 6839 Value *Obj = getUnderlyingObject(&getAssociatedValue()); 6840 if (!Obj) { 6841 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n"); 6842 return nullptr; 6843 } 6844 6845 if (auto *AI = dyn_cast<AllocaInst>(Obj)) 6846 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 6847 if (CI->isOne()) 6848 return AI->getAllocatedType(); 6849 if (auto *Arg = dyn_cast<Argument>(Obj)) { 6850 auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>( 6851 *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED); 6852 if (PrivArgAA.isAssumedPrivatizablePtr()) 6853 return PrivArgAA.getPrivatizableType(); 6854 } 6855 6856 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid " 6857 "alloca nor privatizable argument: " 6858 << *Obj << "!\n"); 6859 return nullptr; 6860 } 6861 6862 /// See AbstractAttribute::trackStatistics() 6863 void trackStatistics() const override { 6864 STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr); 6865 } 6866 }; 6867 6868 struct AAPrivatizablePtrCallSiteArgument final 6869 : public AAPrivatizablePtrFloating { 6870 AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A) 6871 : AAPrivatizablePtrFloating(IRP, A) {} 6872 6873 /// See AbstractAttribute::initialize(...). 6874 void initialize(Attributor &A) override { 6875 if (getIRPosition().hasAttr(Attribute::ByVal)) 6876 indicateOptimisticFixpoint(); 6877 } 6878 6879 /// See AbstractAttribute::updateImpl(...). 6880 ChangeStatus updateImpl(Attributor &A) override { 6881 PrivatizableType = identifyPrivatizableType(A); 6882 if (!PrivatizableType) 6883 return ChangeStatus::UNCHANGED; 6884 if (!PrivatizableType.value()) 6885 return indicatePessimisticFixpoint(); 6886 6887 const IRPosition &IRP = getIRPosition(); 6888 auto &NoCaptureAA = 6889 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED); 6890 if (!NoCaptureAA.isAssumedNoCapture()) { 6891 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n"); 6892 return indicatePessimisticFixpoint(); 6893 } 6894 6895 auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED); 6896 if (!NoAliasAA.isAssumedNoAlias()) { 6897 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n"); 6898 return indicatePessimisticFixpoint(); 6899 } 6900 6901 bool IsKnown; 6902 if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) { 6903 LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n"); 6904 return indicatePessimisticFixpoint(); 6905 } 6906 6907 return ChangeStatus::UNCHANGED; 6908 } 6909 6910 /// See AbstractAttribute::trackStatistics() 6911 void trackStatistics() const override { 6912 STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr); 6913 } 6914 }; 6915 6916 struct AAPrivatizablePtrCallSiteReturned final 6917 : public AAPrivatizablePtrFloating { 6918 AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A) 6919 : AAPrivatizablePtrFloating(IRP, A) {} 6920 6921 /// See AbstractAttribute::initialize(...). 6922 void initialize(Attributor &A) override { 6923 // TODO: We can privatize more than arguments. 6924 indicatePessimisticFixpoint(); 6925 } 6926 6927 /// See AbstractAttribute::trackStatistics() 6928 void trackStatistics() const override { 6929 STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr); 6930 } 6931 }; 6932 6933 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating { 6934 AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A) 6935 : AAPrivatizablePtrFloating(IRP, A) {} 6936 6937 /// See AbstractAttribute::initialize(...). 6938 void initialize(Attributor &A) override { 6939 // TODO: We can privatize more than arguments. 6940 indicatePessimisticFixpoint(); 6941 } 6942 6943 /// See AbstractAttribute::trackStatistics() 6944 void trackStatistics() const override { 6945 STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr); 6946 } 6947 }; 6948 } // namespace 6949 6950 /// -------------------- Memory Behavior Attributes ---------------------------- 6951 /// Includes read-none, read-only, and write-only. 6952 /// ---------------------------------------------------------------------------- 6953 namespace { 6954 struct AAMemoryBehaviorImpl : public AAMemoryBehavior { 6955 AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A) 6956 : AAMemoryBehavior(IRP, A) {} 6957 6958 /// See AbstractAttribute::initialize(...). 6959 void initialize(Attributor &A) override { 6960 intersectAssumedBits(BEST_STATE); 6961 getKnownStateFromValue(getIRPosition(), getState()); 6962 AAMemoryBehavior::initialize(A); 6963 } 6964 6965 /// Return the memory behavior information encoded in the IR for \p IRP. 6966 static void getKnownStateFromValue(const IRPosition &IRP, 6967 BitIntegerState &State, 6968 bool IgnoreSubsumingPositions = false) { 6969 SmallVector<Attribute, 2> Attrs; 6970 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 6971 for (const Attribute &Attr : Attrs) { 6972 switch (Attr.getKindAsEnum()) { 6973 case Attribute::ReadNone: 6974 State.addKnownBits(NO_ACCESSES); 6975 break; 6976 case Attribute::ReadOnly: 6977 State.addKnownBits(NO_WRITES); 6978 break; 6979 case Attribute::WriteOnly: 6980 State.addKnownBits(NO_READS); 6981 break; 6982 default: 6983 llvm_unreachable("Unexpected attribute!"); 6984 } 6985 } 6986 6987 if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) { 6988 if (!I->mayReadFromMemory()) 6989 State.addKnownBits(NO_READS); 6990 if (!I->mayWriteToMemory()) 6991 State.addKnownBits(NO_WRITES); 6992 } 6993 } 6994 6995 /// See AbstractAttribute::getDeducedAttributes(...). 6996 void getDeducedAttributes(LLVMContext &Ctx, 6997 SmallVectorImpl<Attribute> &Attrs) const override { 6998 assert(Attrs.size() == 0); 6999 if (isAssumedReadNone()) 7000 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 7001 else if (isAssumedReadOnly()) 7002 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly)); 7003 else if (isAssumedWriteOnly()) 7004 Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly)); 7005 assert(Attrs.size() <= 1); 7006 } 7007 7008 /// See AbstractAttribute::manifest(...). 7009 ChangeStatus manifest(Attributor &A) override { 7010 if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true)) 7011 return ChangeStatus::UNCHANGED; 7012 7013 const IRPosition &IRP = getIRPosition(); 7014 7015 // Check if we would improve the existing attributes first. 7016 SmallVector<Attribute, 4> DeducedAttrs; 7017 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 7018 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 7019 return IRP.hasAttr(Attr.getKindAsEnum(), 7020 /* IgnoreSubsumingPositions */ true); 7021 })) 7022 return ChangeStatus::UNCHANGED; 7023 7024 // Clear existing attributes. 7025 IRP.removeAttrs(AttrKinds); 7026 7027 // Use the generic manifest method. 7028 return IRAttribute::manifest(A); 7029 } 7030 7031 /// See AbstractState::getAsStr(). 7032 const std::string getAsStr() const override { 7033 if (isAssumedReadNone()) 7034 return "readnone"; 7035 if (isAssumedReadOnly()) 7036 return "readonly"; 7037 if (isAssumedWriteOnly()) 7038 return "writeonly"; 7039 return "may-read/write"; 7040 } 7041 7042 /// The set of IR attributes AAMemoryBehavior deals with. 7043 static const Attribute::AttrKind AttrKinds[3]; 7044 }; 7045 7046 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = { 7047 Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly}; 7048 7049 /// Memory behavior attribute for a floating value. 7050 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl { 7051 AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A) 7052 : AAMemoryBehaviorImpl(IRP, A) {} 7053 7054 /// See AbstractAttribute::updateImpl(...). 7055 ChangeStatus updateImpl(Attributor &A) override; 7056 7057 /// See AbstractAttribute::trackStatistics() 7058 void trackStatistics() const override { 7059 if (isAssumedReadNone()) 7060 STATS_DECLTRACK_FLOATING_ATTR(readnone) 7061 else if (isAssumedReadOnly()) 7062 STATS_DECLTRACK_FLOATING_ATTR(readonly) 7063 else if (isAssumedWriteOnly()) 7064 STATS_DECLTRACK_FLOATING_ATTR(writeonly) 7065 } 7066 7067 private: 7068 /// Return true if users of \p UserI might access the underlying 7069 /// variable/location described by \p U and should therefore be analyzed. 7070 bool followUsersOfUseIn(Attributor &A, const Use &U, 7071 const Instruction *UserI); 7072 7073 /// Update the state according to the effect of use \p U in \p UserI. 7074 void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI); 7075 }; 7076 7077 /// Memory behavior attribute for function argument. 7078 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating { 7079 AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A) 7080 : AAMemoryBehaviorFloating(IRP, A) {} 7081 7082 /// See AbstractAttribute::initialize(...). 7083 void initialize(Attributor &A) override { 7084 intersectAssumedBits(BEST_STATE); 7085 const IRPosition &IRP = getIRPosition(); 7086 // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we 7087 // can query it when we use has/getAttr. That would allow us to reuse the 7088 // initialize of the base class here. 7089 bool HasByVal = 7090 IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true); 7091 getKnownStateFromValue(IRP, getState(), 7092 /* IgnoreSubsumingPositions */ HasByVal); 7093 7094 // Initialize the use vector with all direct uses of the associated value. 7095 Argument *Arg = getAssociatedArgument(); 7096 if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) 7097 indicatePessimisticFixpoint(); 7098 } 7099 7100 ChangeStatus manifest(Attributor &A) override { 7101 // TODO: Pointer arguments are not supported on vectors of pointers yet. 7102 if (!getAssociatedValue().getType()->isPointerTy()) 7103 return ChangeStatus::UNCHANGED; 7104 7105 // TODO: From readattrs.ll: "inalloca parameters are always 7106 // considered written" 7107 if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) { 7108 removeKnownBits(NO_WRITES); 7109 removeAssumedBits(NO_WRITES); 7110 } 7111 return AAMemoryBehaviorFloating::manifest(A); 7112 } 7113 7114 /// See AbstractAttribute::trackStatistics() 7115 void trackStatistics() const override { 7116 if (isAssumedReadNone()) 7117 STATS_DECLTRACK_ARG_ATTR(readnone) 7118 else if (isAssumedReadOnly()) 7119 STATS_DECLTRACK_ARG_ATTR(readonly) 7120 else if (isAssumedWriteOnly()) 7121 STATS_DECLTRACK_ARG_ATTR(writeonly) 7122 } 7123 }; 7124 7125 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument { 7126 AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A) 7127 : AAMemoryBehaviorArgument(IRP, A) {} 7128 7129 /// See AbstractAttribute::initialize(...). 7130 void initialize(Attributor &A) override { 7131 // If we don't have an associated attribute this is either a variadic call 7132 // or an indirect call, either way, nothing to do here. 7133 Argument *Arg = getAssociatedArgument(); 7134 if (!Arg) { 7135 indicatePessimisticFixpoint(); 7136 return; 7137 } 7138 if (Arg->hasByValAttr()) { 7139 addKnownBits(NO_WRITES); 7140 removeKnownBits(NO_READS); 7141 removeAssumedBits(NO_READS); 7142 } 7143 AAMemoryBehaviorArgument::initialize(A); 7144 if (getAssociatedFunction()->isDeclaration()) 7145 indicatePessimisticFixpoint(); 7146 } 7147 7148 /// See AbstractAttribute::updateImpl(...). 7149 ChangeStatus updateImpl(Attributor &A) override { 7150 // TODO: Once we have call site specific value information we can provide 7151 // call site specific liveness liveness information and then it makes 7152 // sense to specialize attributes for call sites arguments instead of 7153 // redirecting requests to the callee argument. 7154 Argument *Arg = getAssociatedArgument(); 7155 const IRPosition &ArgPos = IRPosition::argument(*Arg); 7156 auto &ArgAA = 7157 A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED); 7158 return clampStateAndIndicateChange(getState(), ArgAA.getState()); 7159 } 7160 7161 /// See AbstractAttribute::trackStatistics() 7162 void trackStatistics() const override { 7163 if (isAssumedReadNone()) 7164 STATS_DECLTRACK_CSARG_ATTR(readnone) 7165 else if (isAssumedReadOnly()) 7166 STATS_DECLTRACK_CSARG_ATTR(readonly) 7167 else if (isAssumedWriteOnly()) 7168 STATS_DECLTRACK_CSARG_ATTR(writeonly) 7169 } 7170 }; 7171 7172 /// Memory behavior attribute for a call site return position. 7173 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating { 7174 AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A) 7175 : AAMemoryBehaviorFloating(IRP, A) {} 7176 7177 /// See AbstractAttribute::initialize(...). 7178 void initialize(Attributor &A) override { 7179 AAMemoryBehaviorImpl::initialize(A); 7180 Function *F = getAssociatedFunction(); 7181 if (!F || F->isDeclaration()) 7182 indicatePessimisticFixpoint(); 7183 } 7184 7185 /// See AbstractAttribute::manifest(...). 7186 ChangeStatus manifest(Attributor &A) override { 7187 // We do not annotate returned values. 7188 return ChangeStatus::UNCHANGED; 7189 } 7190 7191 /// See AbstractAttribute::trackStatistics() 7192 void trackStatistics() const override {} 7193 }; 7194 7195 /// An AA to represent the memory behavior function attributes. 7196 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl { 7197 AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A) 7198 : AAMemoryBehaviorImpl(IRP, A) {} 7199 7200 /// See AbstractAttribute::updateImpl(Attributor &A). 7201 virtual ChangeStatus updateImpl(Attributor &A) override; 7202 7203 /// See AbstractAttribute::manifest(...). 7204 ChangeStatus manifest(Attributor &A) override { 7205 Function &F = cast<Function>(getAnchorValue()); 7206 if (isAssumedReadNone()) { 7207 F.removeFnAttr(Attribute::ArgMemOnly); 7208 F.removeFnAttr(Attribute::InaccessibleMemOnly); 7209 F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); 7210 } 7211 return AAMemoryBehaviorImpl::manifest(A); 7212 } 7213 7214 /// See AbstractAttribute::trackStatistics() 7215 void trackStatistics() const override { 7216 if (isAssumedReadNone()) 7217 STATS_DECLTRACK_FN_ATTR(readnone) 7218 else if (isAssumedReadOnly()) 7219 STATS_DECLTRACK_FN_ATTR(readonly) 7220 else if (isAssumedWriteOnly()) 7221 STATS_DECLTRACK_FN_ATTR(writeonly) 7222 } 7223 }; 7224 7225 /// AAMemoryBehavior attribute for call sites. 7226 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl { 7227 AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A) 7228 : AAMemoryBehaviorImpl(IRP, A) {} 7229 7230 /// See AbstractAttribute::initialize(...). 7231 void initialize(Attributor &A) override { 7232 AAMemoryBehaviorImpl::initialize(A); 7233 Function *F = getAssociatedFunction(); 7234 if (!F || F->isDeclaration()) 7235 indicatePessimisticFixpoint(); 7236 } 7237 7238 /// See AbstractAttribute::updateImpl(...). 7239 ChangeStatus updateImpl(Attributor &A) override { 7240 // TODO: Once we have call site specific value information we can provide 7241 // call site specific liveness liveness information and then it makes 7242 // sense to specialize attributes for call sites arguments instead of 7243 // redirecting requests to the callee argument. 7244 Function *F = getAssociatedFunction(); 7245 const IRPosition &FnPos = IRPosition::function(*F); 7246 auto &FnAA = 7247 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED); 7248 return clampStateAndIndicateChange(getState(), FnAA.getState()); 7249 } 7250 7251 /// See AbstractAttribute::trackStatistics() 7252 void trackStatistics() const override { 7253 if (isAssumedReadNone()) 7254 STATS_DECLTRACK_CS_ATTR(readnone) 7255 else if (isAssumedReadOnly()) 7256 STATS_DECLTRACK_CS_ATTR(readonly) 7257 else if (isAssumedWriteOnly()) 7258 STATS_DECLTRACK_CS_ATTR(writeonly) 7259 } 7260 }; 7261 7262 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) { 7263 7264 // The current assumed state used to determine a change. 7265 auto AssumedState = getAssumed(); 7266 7267 auto CheckRWInst = [&](Instruction &I) { 7268 // If the instruction has an own memory behavior state, use it to restrict 7269 // the local state. No further analysis is required as the other memory 7270 // state is as optimistic as it gets. 7271 if (const auto *CB = dyn_cast<CallBase>(&I)) { 7272 const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>( 7273 *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED); 7274 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7275 return !isAtFixpoint(); 7276 } 7277 7278 // Remove access kind modifiers if necessary. 7279 if (I.mayReadFromMemory()) 7280 removeAssumedBits(NO_READS); 7281 if (I.mayWriteToMemory()) 7282 removeAssumedBits(NO_WRITES); 7283 return !isAtFixpoint(); 7284 }; 7285 7286 bool UsedAssumedInformation = false; 7287 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 7288 UsedAssumedInformation)) 7289 return indicatePessimisticFixpoint(); 7290 7291 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7292 : ChangeStatus::UNCHANGED; 7293 } 7294 7295 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) { 7296 7297 const IRPosition &IRP = getIRPosition(); 7298 const IRPosition &FnPos = IRPosition::function_scope(IRP); 7299 AAMemoryBehavior::StateType &S = getState(); 7300 7301 // First, check the function scope. We take the known information and we avoid 7302 // work if the assumed information implies the current assumed information for 7303 // this attribute. This is a valid for all but byval arguments. 7304 Argument *Arg = IRP.getAssociatedArgument(); 7305 AAMemoryBehavior::base_t FnMemAssumedState = 7306 AAMemoryBehavior::StateType::getWorstState(); 7307 if (!Arg || !Arg->hasByValAttr()) { 7308 const auto &FnMemAA = 7309 A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL); 7310 FnMemAssumedState = FnMemAA.getAssumed(); 7311 S.addKnownBits(FnMemAA.getKnown()); 7312 if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed()) 7313 return ChangeStatus::UNCHANGED; 7314 } 7315 7316 // The current assumed state used to determine a change. 7317 auto AssumedState = S.getAssumed(); 7318 7319 // Make sure the value is not captured (except through "return"), if 7320 // it is, any information derived would be irrelevant anyway as we cannot 7321 // check the potential aliases introduced by the capture. However, no need 7322 // to fall back to anythign less optimistic than the function state. 7323 const auto &ArgNoCaptureAA = 7324 A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL); 7325 if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) { 7326 S.intersectAssumedBits(FnMemAssumedState); 7327 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7328 : ChangeStatus::UNCHANGED; 7329 } 7330 7331 // Visit and expand uses until all are analyzed or a fixpoint is reached. 7332 auto UsePred = [&](const Use &U, bool &Follow) -> bool { 7333 Instruction *UserI = cast<Instruction>(U.getUser()); 7334 LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI 7335 << " \n"); 7336 7337 // Droppable users, e.g., llvm::assume does not actually perform any action. 7338 if (UserI->isDroppable()) 7339 return true; 7340 7341 // Check if the users of UserI should also be visited. 7342 Follow = followUsersOfUseIn(A, U, UserI); 7343 7344 // If UserI might touch memory we analyze the use in detail. 7345 if (UserI->mayReadOrWriteMemory()) 7346 analyzeUseIn(A, U, UserI); 7347 7348 return !isAtFixpoint(); 7349 }; 7350 7351 if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) 7352 return indicatePessimisticFixpoint(); 7353 7354 return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED 7355 : ChangeStatus::UNCHANGED; 7356 } 7357 7358 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U, 7359 const Instruction *UserI) { 7360 // The loaded value is unrelated to the pointer argument, no need to 7361 // follow the users of the load. 7362 if (isa<LoadInst>(UserI) || isa<ReturnInst>(UserI)) 7363 return false; 7364 7365 // By default we follow all uses assuming UserI might leak information on U, 7366 // we have special handling for call sites operands though. 7367 const auto *CB = dyn_cast<CallBase>(UserI); 7368 if (!CB || !CB->isArgOperand(&U)) 7369 return true; 7370 7371 // If the use is a call argument known not to be captured, the users of 7372 // the call do not need to be visited because they have to be unrelated to 7373 // the input. Note that this check is not trivial even though we disallow 7374 // general capturing of the underlying argument. The reason is that the 7375 // call might the argument "through return", which we allow and for which we 7376 // need to check call users. 7377 if (U.get()->getType()->isPointerTy()) { 7378 unsigned ArgNo = CB->getArgOperandNo(&U); 7379 const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>( 7380 *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL); 7381 return !ArgNoCaptureAA.isAssumedNoCapture(); 7382 } 7383 7384 return true; 7385 } 7386 7387 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U, 7388 const Instruction *UserI) { 7389 assert(UserI->mayReadOrWriteMemory()); 7390 7391 switch (UserI->getOpcode()) { 7392 default: 7393 // TODO: Handle all atomics and other side-effect operations we know of. 7394 break; 7395 case Instruction::Load: 7396 // Loads cause the NO_READS property to disappear. 7397 removeAssumedBits(NO_READS); 7398 return; 7399 7400 case Instruction::Store: 7401 // Stores cause the NO_WRITES property to disappear if the use is the 7402 // pointer operand. Note that while capturing was taken care of somewhere 7403 // else we need to deal with stores of the value that is not looked through. 7404 if (cast<StoreInst>(UserI)->getPointerOperand() == U.get()) 7405 removeAssumedBits(NO_WRITES); 7406 else 7407 indicatePessimisticFixpoint(); 7408 return; 7409 7410 case Instruction::Call: 7411 case Instruction::CallBr: 7412 case Instruction::Invoke: { 7413 // For call sites we look at the argument memory behavior attribute (this 7414 // could be recursive!) in order to restrict our own state. 7415 const auto *CB = cast<CallBase>(UserI); 7416 7417 // Give up on operand bundles. 7418 if (CB->isBundleOperand(&U)) { 7419 indicatePessimisticFixpoint(); 7420 return; 7421 } 7422 7423 // Calling a function does read the function pointer, maybe write it if the 7424 // function is self-modifying. 7425 if (CB->isCallee(&U)) { 7426 removeAssumedBits(NO_READS); 7427 break; 7428 } 7429 7430 // Adjust the possible access behavior based on the information on the 7431 // argument. 7432 IRPosition Pos; 7433 if (U.get()->getType()->isPointerTy()) 7434 Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)); 7435 else 7436 Pos = IRPosition::callsite_function(*CB); 7437 const auto &MemBehaviorAA = 7438 A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL); 7439 // "assumed" has at most the same bits as the MemBehaviorAA assumed 7440 // and at least "known". 7441 intersectAssumedBits(MemBehaviorAA.getAssumed()); 7442 return; 7443 } 7444 }; 7445 7446 // Generally, look at the "may-properties" and adjust the assumed state if we 7447 // did not trigger special handling before. 7448 if (UserI->mayReadFromMemory()) 7449 removeAssumedBits(NO_READS); 7450 if (UserI->mayWriteToMemory()) 7451 removeAssumedBits(NO_WRITES); 7452 } 7453 } // namespace 7454 7455 /// -------------------- Memory Locations Attributes --------------------------- 7456 /// Includes read-none, argmemonly, inaccessiblememonly, 7457 /// inaccessiblememorargmemonly 7458 /// ---------------------------------------------------------------------------- 7459 7460 std::string AAMemoryLocation::getMemoryLocationsAsStr( 7461 AAMemoryLocation::MemoryLocationsKind MLK) { 7462 if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS)) 7463 return "all memory"; 7464 if (MLK == AAMemoryLocation::NO_LOCATIONS) 7465 return "no memory"; 7466 std::string S = "memory:"; 7467 if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM)) 7468 S += "stack,"; 7469 if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM)) 7470 S += "constant,"; 7471 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM)) 7472 S += "internal global,"; 7473 if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM)) 7474 S += "external global,"; 7475 if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM)) 7476 S += "argument,"; 7477 if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM)) 7478 S += "inaccessible,"; 7479 if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM)) 7480 S += "malloced,"; 7481 if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM)) 7482 S += "unknown,"; 7483 S.pop_back(); 7484 return S; 7485 } 7486 7487 namespace { 7488 struct AAMemoryLocationImpl : public AAMemoryLocation { 7489 7490 AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A) 7491 : AAMemoryLocation(IRP, A), Allocator(A.Allocator) { 7492 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7493 AccessKind2Accesses[u] = nullptr; 7494 } 7495 7496 ~AAMemoryLocationImpl() { 7497 // The AccessSets are allocated via a BumpPtrAllocator, we call 7498 // the destructor manually. 7499 for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u) 7500 if (AccessKind2Accesses[u]) 7501 AccessKind2Accesses[u]->~AccessSet(); 7502 } 7503 7504 /// See AbstractAttribute::initialize(...). 7505 void initialize(Attributor &A) override { 7506 intersectAssumedBits(BEST_STATE); 7507 getKnownStateFromValue(A, getIRPosition(), getState()); 7508 AAMemoryLocation::initialize(A); 7509 } 7510 7511 /// Return the memory behavior information encoded in the IR for \p IRP. 7512 static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP, 7513 BitIntegerState &State, 7514 bool IgnoreSubsumingPositions = false) { 7515 // For internal functions we ignore `argmemonly` and 7516 // `inaccessiblememorargmemonly` as we might break it via interprocedural 7517 // constant propagation. It is unclear if this is the best way but it is 7518 // unlikely this will cause real performance problems. If we are deriving 7519 // attributes for the anchor function we even remove the attribute in 7520 // addition to ignoring it. 7521 bool UseArgMemOnly = true; 7522 Function *AnchorFn = IRP.getAnchorScope(); 7523 if (AnchorFn && A.isRunOn(*AnchorFn)) 7524 UseArgMemOnly = !AnchorFn->hasLocalLinkage(); 7525 7526 SmallVector<Attribute, 2> Attrs; 7527 IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions); 7528 for (const Attribute &Attr : Attrs) { 7529 switch (Attr.getKindAsEnum()) { 7530 case Attribute::ReadNone: 7531 State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM); 7532 break; 7533 case Attribute::InaccessibleMemOnly: 7534 State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 7535 break; 7536 case Attribute::ArgMemOnly: 7537 if (UseArgMemOnly) 7538 State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true)); 7539 else 7540 IRP.removeAttrs({Attribute::ArgMemOnly}); 7541 break; 7542 case Attribute::InaccessibleMemOrArgMemOnly: 7543 if (UseArgMemOnly) 7544 State.addKnownBits(inverseLocation( 7545 NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 7546 else 7547 IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly}); 7548 break; 7549 default: 7550 llvm_unreachable("Unexpected attribute!"); 7551 } 7552 } 7553 } 7554 7555 /// See AbstractAttribute::getDeducedAttributes(...). 7556 void getDeducedAttributes(LLVMContext &Ctx, 7557 SmallVectorImpl<Attribute> &Attrs) const override { 7558 assert(Attrs.size() == 0); 7559 if (isAssumedReadNone()) { 7560 Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone)); 7561 } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) { 7562 if (isAssumedInaccessibleMemOnly()) 7563 Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly)); 7564 else if (isAssumedArgMemOnly()) 7565 Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly)); 7566 else if (isAssumedInaccessibleOrArgMemOnly()) 7567 Attrs.push_back( 7568 Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly)); 7569 } 7570 assert(Attrs.size() <= 1); 7571 } 7572 7573 /// See AbstractAttribute::manifest(...). 7574 ChangeStatus manifest(Attributor &A) override { 7575 const IRPosition &IRP = getIRPosition(); 7576 7577 // Check if we would improve the existing attributes first. 7578 SmallVector<Attribute, 4> DeducedAttrs; 7579 getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs); 7580 if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) { 7581 return IRP.hasAttr(Attr.getKindAsEnum(), 7582 /* IgnoreSubsumingPositions */ true); 7583 })) 7584 return ChangeStatus::UNCHANGED; 7585 7586 // Clear existing attributes. 7587 IRP.removeAttrs(AttrKinds); 7588 if (isAssumedReadNone()) 7589 IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds); 7590 7591 // Use the generic manifest method. 7592 return IRAttribute::manifest(A); 7593 } 7594 7595 /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...). 7596 bool checkForAllAccessesToMemoryKind( 7597 function_ref<bool(const Instruction *, const Value *, AccessKind, 7598 MemoryLocationsKind)> 7599 Pred, 7600 MemoryLocationsKind RequestedMLK) const override { 7601 if (!isValidState()) 7602 return false; 7603 7604 MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation(); 7605 if (AssumedMLK == NO_LOCATIONS) 7606 return true; 7607 7608 unsigned Idx = 0; 7609 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; 7610 CurMLK *= 2, ++Idx) { 7611 if (CurMLK & RequestedMLK) 7612 continue; 7613 7614 if (const AccessSet *Accesses = AccessKind2Accesses[Idx]) 7615 for (const AccessInfo &AI : *Accesses) 7616 if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK)) 7617 return false; 7618 } 7619 7620 return true; 7621 } 7622 7623 ChangeStatus indicatePessimisticFixpoint() override { 7624 // If we give up and indicate a pessimistic fixpoint this instruction will 7625 // become an access for all potential access kinds: 7626 // TODO: Add pointers for argmemonly and globals to improve the results of 7627 // checkForAllAccessesToMemoryKind. 7628 bool Changed = false; 7629 MemoryLocationsKind KnownMLK = getKnown(); 7630 Instruction *I = dyn_cast<Instruction>(&getAssociatedValue()); 7631 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) 7632 if (!(CurMLK & KnownMLK)) 7633 updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed, 7634 getAccessKindFromInst(I)); 7635 return AAMemoryLocation::indicatePessimisticFixpoint(); 7636 } 7637 7638 protected: 7639 /// Helper struct to tie together an instruction that has a read or write 7640 /// effect with the pointer it accesses (if any). 7641 struct AccessInfo { 7642 7643 /// The instruction that caused the access. 7644 const Instruction *I; 7645 7646 /// The base pointer that is accessed, or null if unknown. 7647 const Value *Ptr; 7648 7649 /// The kind of access (read/write/read+write). 7650 AccessKind Kind; 7651 7652 bool operator==(const AccessInfo &RHS) const { 7653 return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind; 7654 } 7655 bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const { 7656 if (LHS.I != RHS.I) 7657 return LHS.I < RHS.I; 7658 if (LHS.Ptr != RHS.Ptr) 7659 return LHS.Ptr < RHS.Ptr; 7660 if (LHS.Kind != RHS.Kind) 7661 return LHS.Kind < RHS.Kind; 7662 return false; 7663 } 7664 }; 7665 7666 /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the 7667 /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind. 7668 using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>; 7669 AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()]; 7670 7671 /// Categorize the pointer arguments of CB that might access memory in 7672 /// AccessedLoc and update the state and access map accordingly. 7673 void 7674 categorizeArgumentPointerLocations(Attributor &A, CallBase &CB, 7675 AAMemoryLocation::StateType &AccessedLocs, 7676 bool &Changed); 7677 7678 /// Return the kind(s) of location that may be accessed by \p V. 7679 AAMemoryLocation::MemoryLocationsKind 7680 categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed); 7681 7682 /// Return the access kind as determined by \p I. 7683 AccessKind getAccessKindFromInst(const Instruction *I) { 7684 AccessKind AK = READ_WRITE; 7685 if (I) { 7686 AK = I->mayReadFromMemory() ? READ : NONE; 7687 AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE)); 7688 } 7689 return AK; 7690 } 7691 7692 /// Update the state \p State and the AccessKind2Accesses given that \p I is 7693 /// an access of kind \p AK to a \p MLK memory location with the access 7694 /// pointer \p Ptr. 7695 void updateStateAndAccessesMap(AAMemoryLocation::StateType &State, 7696 MemoryLocationsKind MLK, const Instruction *I, 7697 const Value *Ptr, bool &Changed, 7698 AccessKind AK = READ_WRITE) { 7699 7700 assert(isPowerOf2_32(MLK) && "Expected a single location set!"); 7701 auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)]; 7702 if (!Accesses) 7703 Accesses = new (Allocator) AccessSet(); 7704 Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second; 7705 State.removeAssumedBits(MLK); 7706 } 7707 7708 /// Determine the underlying locations kinds for \p Ptr, e.g., globals or 7709 /// arguments, and update the state and access map accordingly. 7710 void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr, 7711 AAMemoryLocation::StateType &State, bool &Changed); 7712 7713 /// Used to allocate access sets. 7714 BumpPtrAllocator &Allocator; 7715 7716 /// The set of IR attributes AAMemoryLocation deals with. 7717 static const Attribute::AttrKind AttrKinds[4]; 7718 }; 7719 7720 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = { 7721 Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly, 7722 Attribute::InaccessibleMemOrArgMemOnly}; 7723 7724 void AAMemoryLocationImpl::categorizePtrValue( 7725 Attributor &A, const Instruction &I, const Value &Ptr, 7726 AAMemoryLocation::StateType &State, bool &Changed) { 7727 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for " 7728 << Ptr << " [" 7729 << getMemoryLocationsAsStr(State.getAssumed()) << "]\n"); 7730 7731 SmallSetVector<Value *, 8> Objects; 7732 bool UsedAssumedInformation = false; 7733 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I, 7734 UsedAssumedInformation, 7735 AA::Intraprocedural)) { 7736 LLVM_DEBUG( 7737 dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n"); 7738 updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed, 7739 getAccessKindFromInst(&I)); 7740 return; 7741 } 7742 7743 for (Value *Obj : Objects) { 7744 // TODO: recognize the TBAA used for constant accesses. 7745 MemoryLocationsKind MLK = NO_LOCATIONS; 7746 if (isa<UndefValue>(Obj)) 7747 continue; 7748 if (isa<Argument>(Obj)) { 7749 // TODO: For now we do not treat byval arguments as local copies performed 7750 // on the call edge, though, we should. To make that happen we need to 7751 // teach various passes, e.g., DSE, about the copy effect of a byval. That 7752 // would also allow us to mark functions only accessing byval arguments as 7753 // readnone again, atguably their acceses have no effect outside of the 7754 // function, like accesses to allocas. 7755 MLK = NO_ARGUMENT_MEM; 7756 } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) { 7757 // Reading constant memory is not treated as a read "effect" by the 7758 // function attr pass so we won't neither. Constants defined by TBAA are 7759 // similar. (We know we do not write it because it is constant.) 7760 if (auto *GVar = dyn_cast<GlobalVariable>(GV)) 7761 if (GVar->isConstant()) 7762 continue; 7763 7764 if (GV->hasLocalLinkage()) 7765 MLK = NO_GLOBAL_INTERNAL_MEM; 7766 else 7767 MLK = NO_GLOBAL_EXTERNAL_MEM; 7768 } else if (isa<ConstantPointerNull>(Obj) && 7769 !NullPointerIsDefined(getAssociatedFunction(), 7770 Ptr.getType()->getPointerAddressSpace())) { 7771 continue; 7772 } else if (isa<AllocaInst>(Obj)) { 7773 MLK = NO_LOCAL_MEM; 7774 } else if (const auto *CB = dyn_cast<CallBase>(Obj)) { 7775 const auto &NoAliasAA = A.getAAFor<AANoAlias>( 7776 *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL); 7777 if (NoAliasAA.isAssumedNoAlias()) 7778 MLK = NO_MALLOCED_MEM; 7779 else 7780 MLK = NO_UNKOWN_MEM; 7781 } else { 7782 MLK = NO_UNKOWN_MEM; 7783 } 7784 7785 assert(MLK != NO_LOCATIONS && "No location specified!"); 7786 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: " 7787 << *Obj << " -> " << getMemoryLocationsAsStr(MLK) 7788 << "\n"); 7789 updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed, 7790 getAccessKindFromInst(&I)); 7791 } 7792 7793 LLVM_DEBUG( 7794 dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: " 7795 << getMemoryLocationsAsStr(State.getAssumed()) << "\n"); 7796 } 7797 7798 void AAMemoryLocationImpl::categorizeArgumentPointerLocations( 7799 Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs, 7800 bool &Changed) { 7801 for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) { 7802 7803 // Skip non-pointer arguments. 7804 const Value *ArgOp = CB.getArgOperand(ArgNo); 7805 if (!ArgOp->getType()->isPtrOrPtrVectorTy()) 7806 continue; 7807 7808 // Skip readnone arguments. 7809 const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo); 7810 const auto &ArgOpMemLocationAA = 7811 A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL); 7812 7813 if (ArgOpMemLocationAA.isAssumedReadNone()) 7814 continue; 7815 7816 // Categorize potentially accessed pointer arguments as if there was an 7817 // access instruction with them as pointer. 7818 categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed); 7819 } 7820 } 7821 7822 AAMemoryLocation::MemoryLocationsKind 7823 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I, 7824 bool &Changed) { 7825 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for " 7826 << I << "\n"); 7827 7828 AAMemoryLocation::StateType AccessedLocs; 7829 AccessedLocs.intersectAssumedBits(NO_LOCATIONS); 7830 7831 if (auto *CB = dyn_cast<CallBase>(&I)) { 7832 7833 // First check if we assume any memory is access is visible. 7834 const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>( 7835 *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 7836 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I 7837 << " [" << CBMemLocationAA << "]\n"); 7838 7839 if (CBMemLocationAA.isAssumedReadNone()) 7840 return NO_LOCATIONS; 7841 7842 if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) { 7843 updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr, 7844 Changed, getAccessKindFromInst(&I)); 7845 return AccessedLocs.getAssumed(); 7846 } 7847 7848 uint32_t CBAssumedNotAccessedLocs = 7849 CBMemLocationAA.getAssumedNotAccessedLocation(); 7850 7851 // Set the argmemonly and global bit as we handle them separately below. 7852 uint32_t CBAssumedNotAccessedLocsNoArgMem = 7853 CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM; 7854 7855 for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) { 7856 if (CBAssumedNotAccessedLocsNoArgMem & CurMLK) 7857 continue; 7858 updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed, 7859 getAccessKindFromInst(&I)); 7860 } 7861 7862 // Now handle global memory if it might be accessed. This is slightly tricky 7863 // as NO_GLOBAL_MEM has multiple bits set. 7864 bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM); 7865 if (HasGlobalAccesses) { 7866 auto AccessPred = [&](const Instruction *, const Value *Ptr, 7867 AccessKind Kind, MemoryLocationsKind MLK) { 7868 updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed, 7869 getAccessKindFromInst(&I)); 7870 return true; 7871 }; 7872 if (!CBMemLocationAA.checkForAllAccessesToMemoryKind( 7873 AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false))) 7874 return AccessedLocs.getWorstState(); 7875 } 7876 7877 LLVM_DEBUG( 7878 dbgs() << "[AAMemoryLocation] Accessed state before argument handling: " 7879 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 7880 7881 // Now handle argument memory if it might be accessed. 7882 bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM); 7883 if (HasArgAccesses) 7884 categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed); 7885 7886 LLVM_DEBUG( 7887 dbgs() << "[AAMemoryLocation] Accessed state after argument handling: " 7888 << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n"); 7889 7890 return AccessedLocs.getAssumed(); 7891 } 7892 7893 if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) { 7894 LLVM_DEBUG( 7895 dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: " 7896 << I << " [" << *Ptr << "]\n"); 7897 categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed); 7898 return AccessedLocs.getAssumed(); 7899 } 7900 7901 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: " 7902 << I << "\n"); 7903 updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed, 7904 getAccessKindFromInst(&I)); 7905 return AccessedLocs.getAssumed(); 7906 } 7907 7908 /// An AA to represent the memory behavior function attributes. 7909 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl { 7910 AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A) 7911 : AAMemoryLocationImpl(IRP, A) {} 7912 7913 /// See AbstractAttribute::updateImpl(Attributor &A). 7914 virtual ChangeStatus updateImpl(Attributor &A) override { 7915 7916 const auto &MemBehaviorAA = 7917 A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE); 7918 if (MemBehaviorAA.isAssumedReadNone()) { 7919 if (MemBehaviorAA.isKnownReadNone()) 7920 return indicateOptimisticFixpoint(); 7921 assert(isAssumedReadNone() && 7922 "AAMemoryLocation was not read-none but AAMemoryBehavior was!"); 7923 A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL); 7924 return ChangeStatus::UNCHANGED; 7925 } 7926 7927 // The current assumed state used to determine a change. 7928 auto AssumedState = getAssumed(); 7929 bool Changed = false; 7930 7931 auto CheckRWInst = [&](Instruction &I) { 7932 MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed); 7933 LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I 7934 << ": " << getMemoryLocationsAsStr(MLK) << "\n"); 7935 removeAssumedBits(inverseLocation(MLK, false, false)); 7936 // Stop once only the valid bit set in the *not assumed location*, thus 7937 // once we don't actually exclude any memory locations in the state. 7938 return getAssumedNotAccessedLocation() != VALID_STATE; 7939 }; 7940 7941 bool UsedAssumedInformation = false; 7942 if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this, 7943 UsedAssumedInformation)) 7944 return indicatePessimisticFixpoint(); 7945 7946 Changed |= AssumedState != getAssumed(); 7947 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 7948 } 7949 7950 /// See AbstractAttribute::trackStatistics() 7951 void trackStatistics() const override { 7952 if (isAssumedReadNone()) 7953 STATS_DECLTRACK_FN_ATTR(readnone) 7954 else if (isAssumedArgMemOnly()) 7955 STATS_DECLTRACK_FN_ATTR(argmemonly) 7956 else if (isAssumedInaccessibleMemOnly()) 7957 STATS_DECLTRACK_FN_ATTR(inaccessiblememonly) 7958 else if (isAssumedInaccessibleOrArgMemOnly()) 7959 STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly) 7960 } 7961 }; 7962 7963 /// AAMemoryLocation attribute for call sites. 7964 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl { 7965 AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A) 7966 : AAMemoryLocationImpl(IRP, A) {} 7967 7968 /// See AbstractAttribute::initialize(...). 7969 void initialize(Attributor &A) override { 7970 AAMemoryLocationImpl::initialize(A); 7971 Function *F = getAssociatedFunction(); 7972 if (!F || F->isDeclaration()) 7973 indicatePessimisticFixpoint(); 7974 } 7975 7976 /// See AbstractAttribute::updateImpl(...). 7977 ChangeStatus updateImpl(Attributor &A) override { 7978 // TODO: Once we have call site specific value information we can provide 7979 // call site specific liveness liveness information and then it makes 7980 // sense to specialize attributes for call sites arguments instead of 7981 // redirecting requests to the callee argument. 7982 Function *F = getAssociatedFunction(); 7983 const IRPosition &FnPos = IRPosition::function(*F); 7984 auto &FnAA = 7985 A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED); 7986 bool Changed = false; 7987 auto AccessPred = [&](const Instruction *I, const Value *Ptr, 7988 AccessKind Kind, MemoryLocationsKind MLK) { 7989 updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed, 7990 getAccessKindFromInst(I)); 7991 return true; 7992 }; 7993 if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS)) 7994 return indicatePessimisticFixpoint(); 7995 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 7996 } 7997 7998 /// See AbstractAttribute::trackStatistics() 7999 void trackStatistics() const override { 8000 if (isAssumedReadNone()) 8001 STATS_DECLTRACK_CS_ATTR(readnone) 8002 } 8003 }; 8004 } // namespace 8005 8006 /// ------------------ Value Constant Range Attribute ------------------------- 8007 8008 namespace { 8009 struct AAValueConstantRangeImpl : AAValueConstantRange { 8010 using StateType = IntegerRangeState; 8011 AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A) 8012 : AAValueConstantRange(IRP, A) {} 8013 8014 /// See AbstractAttribute::initialize(..). 8015 void initialize(Attributor &A) override { 8016 if (A.hasSimplificationCallback(getIRPosition())) { 8017 indicatePessimisticFixpoint(); 8018 return; 8019 } 8020 8021 // Intersect a range given by SCEV. 8022 intersectKnown(getConstantRangeFromSCEV(A, getCtxI())); 8023 8024 // Intersect a range given by LVI. 8025 intersectKnown(getConstantRangeFromLVI(A, getCtxI())); 8026 } 8027 8028 /// See AbstractAttribute::getAsStr(). 8029 const std::string getAsStr() const override { 8030 std::string Str; 8031 llvm::raw_string_ostream OS(Str); 8032 OS << "range(" << getBitWidth() << ")<"; 8033 getKnown().print(OS); 8034 OS << " / "; 8035 getAssumed().print(OS); 8036 OS << ">"; 8037 return OS.str(); 8038 } 8039 8040 /// Helper function to get a SCEV expr for the associated value at program 8041 /// point \p I. 8042 const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const { 8043 if (!getAnchorScope()) 8044 return nullptr; 8045 8046 ScalarEvolution *SE = 8047 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 8048 *getAnchorScope()); 8049 8050 LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>( 8051 *getAnchorScope()); 8052 8053 if (!SE || !LI) 8054 return nullptr; 8055 8056 const SCEV *S = SE->getSCEV(&getAssociatedValue()); 8057 if (!I) 8058 return S; 8059 8060 return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent())); 8061 } 8062 8063 /// Helper function to get a range from SCEV for the associated value at 8064 /// program point \p I. 8065 ConstantRange getConstantRangeFromSCEV(Attributor &A, 8066 const Instruction *I = nullptr) const { 8067 if (!getAnchorScope()) 8068 return getWorstState(getBitWidth()); 8069 8070 ScalarEvolution *SE = 8071 A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>( 8072 *getAnchorScope()); 8073 8074 const SCEV *S = getSCEV(A, I); 8075 if (!SE || !S) 8076 return getWorstState(getBitWidth()); 8077 8078 return SE->getUnsignedRange(S); 8079 } 8080 8081 /// Helper function to get a range from LVI for the associated value at 8082 /// program point \p I. 8083 ConstantRange 8084 getConstantRangeFromLVI(Attributor &A, 8085 const Instruction *CtxI = nullptr) const { 8086 if (!getAnchorScope()) 8087 return getWorstState(getBitWidth()); 8088 8089 LazyValueInfo *LVI = 8090 A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>( 8091 *getAnchorScope()); 8092 8093 if (!LVI || !CtxI) 8094 return getWorstState(getBitWidth()); 8095 return LVI->getConstantRange(&getAssociatedValue(), 8096 const_cast<Instruction *>(CtxI)); 8097 } 8098 8099 /// Return true if \p CtxI is valid for querying outside analyses. 8100 /// This basically makes sure we do not ask intra-procedural analysis 8101 /// about a context in the wrong function or a context that violates 8102 /// dominance assumptions they might have. The \p AllowAACtxI flag indicates 8103 /// if the original context of this AA is OK or should be considered invalid. 8104 bool isValidCtxInstructionForOutsideAnalysis(Attributor &A, 8105 const Instruction *CtxI, 8106 bool AllowAACtxI) const { 8107 if (!CtxI || (!AllowAACtxI && CtxI == getCtxI())) 8108 return false; 8109 8110 // Our context might be in a different function, neither intra-procedural 8111 // analysis (ScalarEvolution nor LazyValueInfo) can handle that. 8112 if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction())) 8113 return false; 8114 8115 // If the context is not dominated by the value there are paths to the 8116 // context that do not define the value. This cannot be handled by 8117 // LazyValueInfo so we need to bail. 8118 if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) { 8119 InformationCache &InfoCache = A.getInfoCache(); 8120 const DominatorTree *DT = 8121 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( 8122 *I->getFunction()); 8123 return DT && DT->dominates(I, CtxI); 8124 } 8125 8126 return true; 8127 } 8128 8129 /// See AAValueConstantRange::getKnownConstantRange(..). 8130 ConstantRange 8131 getKnownConstantRange(Attributor &A, 8132 const Instruction *CtxI = nullptr) const override { 8133 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8134 /* AllowAACtxI */ false)) 8135 return getKnown(); 8136 8137 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8138 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8139 return getKnown().intersectWith(SCEVR).intersectWith(LVIR); 8140 } 8141 8142 /// See AAValueConstantRange::getAssumedConstantRange(..). 8143 ConstantRange 8144 getAssumedConstantRange(Attributor &A, 8145 const Instruction *CtxI = nullptr) const override { 8146 // TODO: Make SCEV use Attributor assumption. 8147 // We may be able to bound a variable range via assumptions in 8148 // Attributor. ex.) If x is assumed to be in [1, 3] and y is known to 8149 // evolve to x^2 + x, then we can say that y is in [2, 12]. 8150 if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI, 8151 /* AllowAACtxI */ false)) 8152 return getAssumed(); 8153 8154 ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI); 8155 ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI); 8156 return getAssumed().intersectWith(SCEVR).intersectWith(LVIR); 8157 } 8158 8159 /// Helper function to create MDNode for range metadata. 8160 static MDNode * 8161 getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx, 8162 const ConstantRange &AssumedConstantRange) { 8163 Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get( 8164 Ty, AssumedConstantRange.getLower())), 8165 ConstantAsMetadata::get(ConstantInt::get( 8166 Ty, AssumedConstantRange.getUpper()))}; 8167 return MDNode::get(Ctx, LowAndHigh); 8168 } 8169 8170 /// Return true if \p Assumed is included in \p KnownRanges. 8171 static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) { 8172 8173 if (Assumed.isFullSet()) 8174 return false; 8175 8176 if (!KnownRanges) 8177 return true; 8178 8179 // If multiple ranges are annotated in IR, we give up to annotate assumed 8180 // range for now. 8181 8182 // TODO: If there exists a known range which containts assumed range, we 8183 // can say assumed range is better. 8184 if (KnownRanges->getNumOperands() > 2) 8185 return false; 8186 8187 ConstantInt *Lower = 8188 mdconst::extract<ConstantInt>(KnownRanges->getOperand(0)); 8189 ConstantInt *Upper = 8190 mdconst::extract<ConstantInt>(KnownRanges->getOperand(1)); 8191 8192 ConstantRange Known(Lower->getValue(), Upper->getValue()); 8193 return Known.contains(Assumed) && Known != Assumed; 8194 } 8195 8196 /// Helper function to set range metadata. 8197 static bool 8198 setRangeMetadataIfisBetterRange(Instruction *I, 8199 const ConstantRange &AssumedConstantRange) { 8200 auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range); 8201 if (isBetterRange(AssumedConstantRange, OldRangeMD)) { 8202 if (!AssumedConstantRange.isEmptySet()) { 8203 I->setMetadata(LLVMContext::MD_range, 8204 getMDNodeForConstantRange(I->getType(), I->getContext(), 8205 AssumedConstantRange)); 8206 return true; 8207 } 8208 } 8209 return false; 8210 } 8211 8212 /// See AbstractAttribute::manifest() 8213 ChangeStatus manifest(Attributor &A) override { 8214 ChangeStatus Changed = ChangeStatus::UNCHANGED; 8215 ConstantRange AssumedConstantRange = getAssumedConstantRange(A); 8216 assert(!AssumedConstantRange.isFullSet() && "Invalid state"); 8217 8218 auto &V = getAssociatedValue(); 8219 if (!AssumedConstantRange.isEmptySet() && 8220 !AssumedConstantRange.isSingleElement()) { 8221 if (Instruction *I = dyn_cast<Instruction>(&V)) { 8222 assert(I == getCtxI() && "Should not annotate an instruction which is " 8223 "not the context instruction"); 8224 if (isa<CallInst>(I) || isa<LoadInst>(I)) 8225 if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange)) 8226 Changed = ChangeStatus::CHANGED; 8227 } 8228 } 8229 8230 return Changed; 8231 } 8232 }; 8233 8234 struct AAValueConstantRangeArgument final 8235 : AAArgumentFromCallSiteArguments< 8236 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8237 true /* BridgeCallBaseContext */> { 8238 using Base = AAArgumentFromCallSiteArguments< 8239 AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState, 8240 true /* BridgeCallBaseContext */>; 8241 AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A) 8242 : Base(IRP, A) {} 8243 8244 /// See AbstractAttribute::initialize(..). 8245 void initialize(Attributor &A) override { 8246 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8247 indicatePessimisticFixpoint(); 8248 } else { 8249 Base::initialize(A); 8250 } 8251 } 8252 8253 /// See AbstractAttribute::trackStatistics() 8254 void trackStatistics() const override { 8255 STATS_DECLTRACK_ARG_ATTR(value_range) 8256 } 8257 }; 8258 8259 struct AAValueConstantRangeReturned 8260 : AAReturnedFromReturnedValues<AAValueConstantRange, 8261 AAValueConstantRangeImpl, 8262 AAValueConstantRangeImpl::StateType, 8263 /* PropogateCallBaseContext */ true> { 8264 using Base = 8265 AAReturnedFromReturnedValues<AAValueConstantRange, 8266 AAValueConstantRangeImpl, 8267 AAValueConstantRangeImpl::StateType, 8268 /* PropogateCallBaseContext */ true>; 8269 AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A) 8270 : Base(IRP, A) {} 8271 8272 /// See AbstractAttribute::initialize(...). 8273 void initialize(Attributor &A) override {} 8274 8275 /// See AbstractAttribute::trackStatistics() 8276 void trackStatistics() const override { 8277 STATS_DECLTRACK_FNRET_ATTR(value_range) 8278 } 8279 }; 8280 8281 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl { 8282 AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A) 8283 : AAValueConstantRangeImpl(IRP, A) {} 8284 8285 /// See AbstractAttribute::initialize(...). 8286 void initialize(Attributor &A) override { 8287 AAValueConstantRangeImpl::initialize(A); 8288 if (isAtFixpoint()) 8289 return; 8290 8291 Value &V = getAssociatedValue(); 8292 8293 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8294 unionAssumed(ConstantRange(C->getValue())); 8295 indicateOptimisticFixpoint(); 8296 return; 8297 } 8298 8299 if (isa<UndefValue>(&V)) { 8300 // Collapse the undef state to 0. 8301 unionAssumed(ConstantRange(APInt(getBitWidth(), 0))); 8302 indicateOptimisticFixpoint(); 8303 return; 8304 } 8305 8306 if (isa<CallBase>(&V)) 8307 return; 8308 8309 if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V)) 8310 return; 8311 8312 // If it is a load instruction with range metadata, use it. 8313 if (LoadInst *LI = dyn_cast<LoadInst>(&V)) 8314 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) { 8315 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8316 return; 8317 } 8318 8319 // We can work with PHI and select instruction as we traverse their operands 8320 // during update. 8321 if (isa<SelectInst>(V) || isa<PHINode>(V)) 8322 return; 8323 8324 // Otherwise we give up. 8325 indicatePessimisticFixpoint(); 8326 8327 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: " 8328 << getAssociatedValue() << "\n"); 8329 } 8330 8331 bool calculateBinaryOperator( 8332 Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T, 8333 const Instruction *CtxI, 8334 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8335 Value *LHS = BinOp->getOperand(0); 8336 Value *RHS = BinOp->getOperand(1); 8337 8338 // Simplify the operands first. 8339 bool UsedAssumedInformation = false; 8340 const auto &SimplifiedLHS = A.getAssumedSimplified( 8341 IRPosition::value(*LHS, getCallBaseContext()), *this, 8342 UsedAssumedInformation, AA::Interprocedural); 8343 if (!SimplifiedLHS.has_value()) 8344 return true; 8345 if (!SimplifiedLHS.value()) 8346 return false; 8347 LHS = *SimplifiedLHS; 8348 8349 const auto &SimplifiedRHS = A.getAssumedSimplified( 8350 IRPosition::value(*RHS, getCallBaseContext()), *this, 8351 UsedAssumedInformation, AA::Interprocedural); 8352 if (!SimplifiedRHS.has_value()) 8353 return true; 8354 if (!SimplifiedRHS.value()) 8355 return false; 8356 RHS = *SimplifiedRHS; 8357 8358 // TODO: Allow non integers as well. 8359 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8360 return false; 8361 8362 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8363 *this, IRPosition::value(*LHS, getCallBaseContext()), 8364 DepClassTy::REQUIRED); 8365 QuerriedAAs.push_back(&LHSAA); 8366 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8367 8368 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8369 *this, IRPosition::value(*RHS, getCallBaseContext()), 8370 DepClassTy::REQUIRED); 8371 QuerriedAAs.push_back(&RHSAA); 8372 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8373 8374 auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange); 8375 8376 T.unionAssumed(AssumedRange); 8377 8378 // TODO: Track a known state too. 8379 8380 return T.isValidState(); 8381 } 8382 8383 bool calculateCastInst( 8384 Attributor &A, CastInst *CastI, IntegerRangeState &T, 8385 const Instruction *CtxI, 8386 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8387 assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!"); 8388 // TODO: Allow non integers as well. 8389 Value *OpV = CastI->getOperand(0); 8390 8391 // Simplify the operand first. 8392 bool UsedAssumedInformation = false; 8393 const auto &SimplifiedOpV = A.getAssumedSimplified( 8394 IRPosition::value(*OpV, getCallBaseContext()), *this, 8395 UsedAssumedInformation, AA::Interprocedural); 8396 if (!SimplifiedOpV.has_value()) 8397 return true; 8398 if (!SimplifiedOpV.value()) 8399 return false; 8400 OpV = *SimplifiedOpV; 8401 8402 if (!OpV->getType()->isIntegerTy()) 8403 return false; 8404 8405 auto &OpAA = A.getAAFor<AAValueConstantRange>( 8406 *this, IRPosition::value(*OpV, getCallBaseContext()), 8407 DepClassTy::REQUIRED); 8408 QuerriedAAs.push_back(&OpAA); 8409 T.unionAssumed( 8410 OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth())); 8411 return T.isValidState(); 8412 } 8413 8414 bool 8415 calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T, 8416 const Instruction *CtxI, 8417 SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) { 8418 Value *LHS = CmpI->getOperand(0); 8419 Value *RHS = CmpI->getOperand(1); 8420 8421 // Simplify the operands first. 8422 bool UsedAssumedInformation = false; 8423 const auto &SimplifiedLHS = A.getAssumedSimplified( 8424 IRPosition::value(*LHS, getCallBaseContext()), *this, 8425 UsedAssumedInformation, AA::Interprocedural); 8426 if (!SimplifiedLHS.has_value()) 8427 return true; 8428 if (!SimplifiedLHS.value()) 8429 return false; 8430 LHS = *SimplifiedLHS; 8431 8432 const auto &SimplifiedRHS = A.getAssumedSimplified( 8433 IRPosition::value(*RHS, getCallBaseContext()), *this, 8434 UsedAssumedInformation, AA::Interprocedural); 8435 if (!SimplifiedRHS.has_value()) 8436 return true; 8437 if (!SimplifiedRHS.value()) 8438 return false; 8439 RHS = *SimplifiedRHS; 8440 8441 // TODO: Allow non integers as well. 8442 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) 8443 return false; 8444 8445 auto &LHSAA = A.getAAFor<AAValueConstantRange>( 8446 *this, IRPosition::value(*LHS, getCallBaseContext()), 8447 DepClassTy::REQUIRED); 8448 QuerriedAAs.push_back(&LHSAA); 8449 auto &RHSAA = A.getAAFor<AAValueConstantRange>( 8450 *this, IRPosition::value(*RHS, getCallBaseContext()), 8451 DepClassTy::REQUIRED); 8452 QuerriedAAs.push_back(&RHSAA); 8453 auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI); 8454 auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI); 8455 8456 // If one of them is empty set, we can't decide. 8457 if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet()) 8458 return true; 8459 8460 bool MustTrue = false, MustFalse = false; 8461 8462 auto AllowedRegion = 8463 ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange); 8464 8465 if (AllowedRegion.intersectWith(LHSAARange).isEmptySet()) 8466 MustFalse = true; 8467 8468 if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange)) 8469 MustTrue = true; 8470 8471 assert((!MustTrue || !MustFalse) && 8472 "Either MustTrue or MustFalse should be false!"); 8473 8474 if (MustTrue) 8475 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1))); 8476 else if (MustFalse) 8477 T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0))); 8478 else 8479 T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true)); 8480 8481 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA 8482 << " " << RHSAA << "\n"); 8483 8484 // TODO: Track a known state too. 8485 return T.isValidState(); 8486 } 8487 8488 /// See AbstractAttribute::updateImpl(...). 8489 ChangeStatus updateImpl(Attributor &A) override { 8490 8491 IntegerRangeState T(getBitWidth()); 8492 auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool { 8493 Instruction *I = dyn_cast<Instruction>(&V); 8494 if (!I || isa<CallBase>(I)) { 8495 8496 // Simplify the operand first. 8497 bool UsedAssumedInformation = false; 8498 const auto &SimplifiedOpV = A.getAssumedSimplified( 8499 IRPosition::value(V, getCallBaseContext()), *this, 8500 UsedAssumedInformation, AA::Interprocedural); 8501 if (!SimplifiedOpV.has_value()) 8502 return true; 8503 if (!SimplifiedOpV.value()) 8504 return false; 8505 Value *VPtr = *SimplifiedOpV; 8506 8507 // If the value is not instruction, we query AA to Attributor. 8508 const auto &AA = A.getAAFor<AAValueConstantRange>( 8509 *this, IRPosition::value(*VPtr, getCallBaseContext()), 8510 DepClassTy::REQUIRED); 8511 8512 // Clamp operator is not used to utilize a program point CtxI. 8513 T.unionAssumed(AA.getAssumedConstantRange(A, CtxI)); 8514 8515 return T.isValidState(); 8516 } 8517 8518 SmallVector<const AAValueConstantRange *, 4> QuerriedAAs; 8519 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) { 8520 if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs)) 8521 return false; 8522 } else if (auto *CmpI = dyn_cast<CmpInst>(I)) { 8523 if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs)) 8524 return false; 8525 } else if (auto *CastI = dyn_cast<CastInst>(I)) { 8526 if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs)) 8527 return false; 8528 } else { 8529 // Give up with other instructions. 8530 // TODO: Add other instructions 8531 8532 T.indicatePessimisticFixpoint(); 8533 return false; 8534 } 8535 8536 // Catch circular reasoning in a pessimistic way for now. 8537 // TODO: Check how the range evolves and if we stripped anything, see also 8538 // AADereferenceable or AAAlign for similar situations. 8539 for (const AAValueConstantRange *QueriedAA : QuerriedAAs) { 8540 if (QueriedAA != this) 8541 continue; 8542 // If we are in a stady state we do not need to worry. 8543 if (T.getAssumed() == getState().getAssumed()) 8544 continue; 8545 T.indicatePessimisticFixpoint(); 8546 } 8547 8548 return T.isValidState(); 8549 }; 8550 8551 if (!VisitValueCB(getAssociatedValue(), getCtxI())) 8552 return indicatePessimisticFixpoint(); 8553 8554 // Ensure that long def-use chains can't cause circular reasoning either by 8555 // introducing a cutoff below. 8556 if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED) 8557 return ChangeStatus::UNCHANGED; 8558 if (++NumChanges > MaxNumChanges) { 8559 LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges 8560 << " but only " << MaxNumChanges 8561 << " are allowed to avoid cyclic reasoning."); 8562 return indicatePessimisticFixpoint(); 8563 } 8564 return ChangeStatus::CHANGED; 8565 } 8566 8567 /// See AbstractAttribute::trackStatistics() 8568 void trackStatistics() const override { 8569 STATS_DECLTRACK_FLOATING_ATTR(value_range) 8570 } 8571 8572 /// Tracker to bail after too many widening steps of the constant range. 8573 int NumChanges = 0; 8574 8575 /// Upper bound for the number of allowed changes (=widening steps) for the 8576 /// constant range before we give up. 8577 static constexpr int MaxNumChanges = 5; 8578 }; 8579 8580 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl { 8581 AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A) 8582 : AAValueConstantRangeImpl(IRP, A) {} 8583 8584 /// See AbstractAttribute::initialize(...). 8585 ChangeStatus updateImpl(Attributor &A) override { 8586 llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will " 8587 "not be called"); 8588 } 8589 8590 /// See AbstractAttribute::trackStatistics() 8591 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) } 8592 }; 8593 8594 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction { 8595 AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A) 8596 : AAValueConstantRangeFunction(IRP, A) {} 8597 8598 /// See AbstractAttribute::trackStatistics() 8599 void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) } 8600 }; 8601 8602 struct AAValueConstantRangeCallSiteReturned 8603 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8604 AAValueConstantRangeImpl, 8605 AAValueConstantRangeImpl::StateType, 8606 /* IntroduceCallBaseContext */ true> { 8607 AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A) 8608 : AACallSiteReturnedFromReturned<AAValueConstantRange, 8609 AAValueConstantRangeImpl, 8610 AAValueConstantRangeImpl::StateType, 8611 /* IntroduceCallBaseContext */ true>(IRP, 8612 A) { 8613 } 8614 8615 /// See AbstractAttribute::initialize(...). 8616 void initialize(Attributor &A) override { 8617 // If it is a load instruction with range metadata, use the metadata. 8618 if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue())) 8619 if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range)) 8620 intersectKnown(getConstantRangeFromMetadata(*RangeMD)); 8621 8622 AAValueConstantRangeImpl::initialize(A); 8623 } 8624 8625 /// See AbstractAttribute::trackStatistics() 8626 void trackStatistics() const override { 8627 STATS_DECLTRACK_CSRET_ATTR(value_range) 8628 } 8629 }; 8630 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating { 8631 AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A) 8632 : AAValueConstantRangeFloating(IRP, A) {} 8633 8634 /// See AbstractAttribute::manifest() 8635 ChangeStatus manifest(Attributor &A) override { 8636 return ChangeStatus::UNCHANGED; 8637 } 8638 8639 /// See AbstractAttribute::trackStatistics() 8640 void trackStatistics() const override { 8641 STATS_DECLTRACK_CSARG_ATTR(value_range) 8642 } 8643 }; 8644 } // namespace 8645 8646 /// ------------------ Potential Values Attribute ------------------------- 8647 8648 namespace { 8649 struct AAPotentialConstantValuesImpl : AAPotentialConstantValues { 8650 using StateType = PotentialConstantIntValuesState; 8651 8652 AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A) 8653 : AAPotentialConstantValues(IRP, A) {} 8654 8655 /// See AbstractAttribute::initialize(..). 8656 void initialize(Attributor &A) override { 8657 if (A.hasSimplificationCallback(getIRPosition())) 8658 indicatePessimisticFixpoint(); 8659 else 8660 AAPotentialConstantValues::initialize(A); 8661 } 8662 8663 bool fillSetWithConstantValues(Attributor &A, const IRPosition &IRP, SetTy &S, 8664 bool &ContainsUndef) { 8665 SmallVector<AA::ValueAndContext> Values; 8666 bool UsedAssumedInformation = false; 8667 if (!A.getAssumedSimplifiedValues(IRP, *this, Values, AA::Interprocedural, 8668 UsedAssumedInformation)) { 8669 if (!IRP.getAssociatedType()->isIntegerTy()) 8670 return false; 8671 auto &PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>( 8672 *this, IRP, DepClassTy::REQUIRED); 8673 if (!PotentialValuesAA.getState().isValidState()) 8674 return false; 8675 ContainsUndef = PotentialValuesAA.getState().undefIsContained(); 8676 S = PotentialValuesAA.getState().getAssumedSet(); 8677 return true; 8678 } 8679 8680 for (auto &It : Values) { 8681 if (isa<UndefValue>(It.getValue())) 8682 continue; 8683 auto *CI = dyn_cast<ConstantInt>(It.getValue()); 8684 if (!CI) 8685 return false; 8686 S.insert(CI->getValue()); 8687 } 8688 ContainsUndef = S.empty(); 8689 8690 return true; 8691 } 8692 8693 /// See AbstractAttribute::getAsStr(). 8694 const std::string getAsStr() const override { 8695 std::string Str; 8696 llvm::raw_string_ostream OS(Str); 8697 OS << getState(); 8698 return OS.str(); 8699 } 8700 8701 /// See AbstractAttribute::updateImpl(...). 8702 ChangeStatus updateImpl(Attributor &A) override { 8703 return indicatePessimisticFixpoint(); 8704 } 8705 }; 8706 8707 struct AAPotentialConstantValuesArgument final 8708 : AAArgumentFromCallSiteArguments<AAPotentialConstantValues, 8709 AAPotentialConstantValuesImpl, 8710 PotentialConstantIntValuesState> { 8711 using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues, 8712 AAPotentialConstantValuesImpl, 8713 PotentialConstantIntValuesState>; 8714 AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A) 8715 : Base(IRP, A) {} 8716 8717 /// See AbstractAttribute::initialize(..). 8718 void initialize(Attributor &A) override { 8719 if (!getAnchorScope() || getAnchorScope()->isDeclaration()) { 8720 indicatePessimisticFixpoint(); 8721 } else { 8722 Base::initialize(A); 8723 } 8724 } 8725 8726 /// See AbstractAttribute::trackStatistics() 8727 void trackStatistics() const override { 8728 STATS_DECLTRACK_ARG_ATTR(potential_values) 8729 } 8730 }; 8731 8732 struct AAPotentialConstantValuesReturned 8733 : AAReturnedFromReturnedValues<AAPotentialConstantValues, 8734 AAPotentialConstantValuesImpl> { 8735 using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues, 8736 AAPotentialConstantValuesImpl>; 8737 AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A) 8738 : Base(IRP, A) {} 8739 8740 /// See AbstractAttribute::trackStatistics() 8741 void trackStatistics() const override { 8742 STATS_DECLTRACK_FNRET_ATTR(potential_values) 8743 } 8744 }; 8745 8746 struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl { 8747 AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A) 8748 : AAPotentialConstantValuesImpl(IRP, A) {} 8749 8750 /// See AbstractAttribute::initialize(..). 8751 void initialize(Attributor &A) override { 8752 AAPotentialConstantValuesImpl::initialize(A); 8753 if (isAtFixpoint()) 8754 return; 8755 8756 Value &V = getAssociatedValue(); 8757 8758 if (auto *C = dyn_cast<ConstantInt>(&V)) { 8759 unionAssumed(C->getValue()); 8760 indicateOptimisticFixpoint(); 8761 return; 8762 } 8763 8764 if (isa<UndefValue>(&V)) { 8765 unionAssumedWithUndef(); 8766 indicateOptimisticFixpoint(); 8767 return; 8768 } 8769 8770 if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V)) 8771 return; 8772 8773 if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V)) 8774 return; 8775 8776 indicatePessimisticFixpoint(); 8777 8778 LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: " 8779 << getAssociatedValue() << "\n"); 8780 } 8781 8782 static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS, 8783 const APInt &RHS) { 8784 return ICmpInst::compare(LHS, RHS, ICI->getPredicate()); 8785 } 8786 8787 static APInt calculateCastInst(const CastInst *CI, const APInt &Src, 8788 uint32_t ResultBitWidth) { 8789 Instruction::CastOps CastOp = CI->getOpcode(); 8790 switch (CastOp) { 8791 default: 8792 llvm_unreachable("unsupported or not integer cast"); 8793 case Instruction::Trunc: 8794 return Src.trunc(ResultBitWidth); 8795 case Instruction::SExt: 8796 return Src.sext(ResultBitWidth); 8797 case Instruction::ZExt: 8798 return Src.zext(ResultBitWidth); 8799 case Instruction::BitCast: 8800 return Src; 8801 } 8802 } 8803 8804 static APInt calculateBinaryOperator(const BinaryOperator *BinOp, 8805 const APInt &LHS, const APInt &RHS, 8806 bool &SkipOperation, bool &Unsupported) { 8807 Instruction::BinaryOps BinOpcode = BinOp->getOpcode(); 8808 // Unsupported is set to true when the binary operator is not supported. 8809 // SkipOperation is set to true when UB occur with the given operand pair 8810 // (LHS, RHS). 8811 // TODO: we should look at nsw and nuw keywords to handle operations 8812 // that create poison or undef value. 8813 switch (BinOpcode) { 8814 default: 8815 Unsupported = true; 8816 return LHS; 8817 case Instruction::Add: 8818 return LHS + RHS; 8819 case Instruction::Sub: 8820 return LHS - RHS; 8821 case Instruction::Mul: 8822 return LHS * RHS; 8823 case Instruction::UDiv: 8824 if (RHS.isZero()) { 8825 SkipOperation = true; 8826 return LHS; 8827 } 8828 return LHS.udiv(RHS); 8829 case Instruction::SDiv: 8830 if (RHS.isZero()) { 8831 SkipOperation = true; 8832 return LHS; 8833 } 8834 return LHS.sdiv(RHS); 8835 case Instruction::URem: 8836 if (RHS.isZero()) { 8837 SkipOperation = true; 8838 return LHS; 8839 } 8840 return LHS.urem(RHS); 8841 case Instruction::SRem: 8842 if (RHS.isZero()) { 8843 SkipOperation = true; 8844 return LHS; 8845 } 8846 return LHS.srem(RHS); 8847 case Instruction::Shl: 8848 return LHS.shl(RHS); 8849 case Instruction::LShr: 8850 return LHS.lshr(RHS); 8851 case Instruction::AShr: 8852 return LHS.ashr(RHS); 8853 case Instruction::And: 8854 return LHS & RHS; 8855 case Instruction::Or: 8856 return LHS | RHS; 8857 case Instruction::Xor: 8858 return LHS ^ RHS; 8859 } 8860 } 8861 8862 bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp, 8863 const APInt &LHS, const APInt &RHS) { 8864 bool SkipOperation = false; 8865 bool Unsupported = false; 8866 APInt Result = 8867 calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported); 8868 if (Unsupported) 8869 return false; 8870 // If SkipOperation is true, we can ignore this operand pair (L, R). 8871 if (!SkipOperation) 8872 unionAssumed(Result); 8873 return isValidState(); 8874 } 8875 8876 ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) { 8877 auto AssumedBefore = getAssumed(); 8878 Value *LHS = ICI->getOperand(0); 8879 Value *RHS = ICI->getOperand(1); 8880 8881 bool LHSContainsUndef = false, RHSContainsUndef = false; 8882 SetTy LHSAAPVS, RHSAAPVS; 8883 if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS, 8884 LHSContainsUndef) || 8885 !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS, 8886 RHSContainsUndef)) 8887 return indicatePessimisticFixpoint(); 8888 8889 // TODO: make use of undef flag to limit potential values aggressively. 8890 bool MaybeTrue = false, MaybeFalse = false; 8891 const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0); 8892 if (LHSContainsUndef && RHSContainsUndef) { 8893 // The result of any comparison between undefs can be soundly replaced 8894 // with undef. 8895 unionAssumedWithUndef(); 8896 } else if (LHSContainsUndef) { 8897 for (const APInt &R : RHSAAPVS) { 8898 bool CmpResult = calculateICmpInst(ICI, Zero, R); 8899 MaybeTrue |= CmpResult; 8900 MaybeFalse |= !CmpResult; 8901 if (MaybeTrue & MaybeFalse) 8902 return indicatePessimisticFixpoint(); 8903 } 8904 } else if (RHSContainsUndef) { 8905 for (const APInt &L : LHSAAPVS) { 8906 bool CmpResult = calculateICmpInst(ICI, L, Zero); 8907 MaybeTrue |= CmpResult; 8908 MaybeFalse |= !CmpResult; 8909 if (MaybeTrue & MaybeFalse) 8910 return indicatePessimisticFixpoint(); 8911 } 8912 } else { 8913 for (const APInt &L : LHSAAPVS) { 8914 for (const APInt &R : RHSAAPVS) { 8915 bool CmpResult = calculateICmpInst(ICI, L, R); 8916 MaybeTrue |= CmpResult; 8917 MaybeFalse |= !CmpResult; 8918 if (MaybeTrue & MaybeFalse) 8919 return indicatePessimisticFixpoint(); 8920 } 8921 } 8922 } 8923 if (MaybeTrue) 8924 unionAssumed(APInt(/* numBits */ 1, /* val */ 1)); 8925 if (MaybeFalse) 8926 unionAssumed(APInt(/* numBits */ 1, /* val */ 0)); 8927 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 8928 : ChangeStatus::CHANGED; 8929 } 8930 8931 ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) { 8932 auto AssumedBefore = getAssumed(); 8933 Value *LHS = SI->getTrueValue(); 8934 Value *RHS = SI->getFalseValue(); 8935 8936 bool UsedAssumedInformation = false; 8937 Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this, 8938 UsedAssumedInformation); 8939 8940 // Check if we only need one operand. 8941 bool OnlyLeft = false, OnlyRight = false; 8942 if (C && *C && (*C)->isOneValue()) 8943 OnlyLeft = true; 8944 else if (C && *C && (*C)->isZeroValue()) 8945 OnlyRight = true; 8946 8947 bool LHSContainsUndef = false, RHSContainsUndef = false; 8948 SetTy LHSAAPVS, RHSAAPVS; 8949 if (!OnlyRight && !fillSetWithConstantValues(A, IRPosition::value(*LHS), 8950 LHSAAPVS, LHSContainsUndef)) 8951 return indicatePessimisticFixpoint(); 8952 8953 if (!OnlyLeft && !fillSetWithConstantValues(A, IRPosition::value(*RHS), 8954 RHSAAPVS, RHSContainsUndef)) 8955 return indicatePessimisticFixpoint(); 8956 8957 if (OnlyLeft || OnlyRight) { 8958 // select (true/false), lhs, rhs 8959 auto *OpAA = OnlyLeft ? &LHSAAPVS : &RHSAAPVS; 8960 auto Undef = OnlyLeft ? LHSContainsUndef : RHSContainsUndef; 8961 8962 if (Undef) 8963 unionAssumedWithUndef(); 8964 else { 8965 for (auto &It : *OpAA) 8966 unionAssumed(It); 8967 } 8968 8969 } else if (LHSContainsUndef && RHSContainsUndef) { 8970 // select i1 *, undef , undef => undef 8971 unionAssumedWithUndef(); 8972 } else { 8973 for (auto &It : LHSAAPVS) 8974 unionAssumed(It); 8975 for (auto &It : RHSAAPVS) 8976 unionAssumed(It); 8977 } 8978 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 8979 : ChangeStatus::CHANGED; 8980 } 8981 8982 ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) { 8983 auto AssumedBefore = getAssumed(); 8984 if (!CI->isIntegerCast()) 8985 return indicatePessimisticFixpoint(); 8986 assert(CI->getNumOperands() == 1 && "Expected cast to be unary!"); 8987 uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth(); 8988 Value *Src = CI->getOperand(0); 8989 8990 bool SrcContainsUndef = false; 8991 SetTy SrcPVS; 8992 if (!fillSetWithConstantValues(A, IRPosition::value(*Src), SrcPVS, 8993 SrcContainsUndef)) 8994 return indicatePessimisticFixpoint(); 8995 8996 if (SrcContainsUndef) 8997 unionAssumedWithUndef(); 8998 else { 8999 for (const APInt &S : SrcPVS) { 9000 APInt T = calculateCastInst(CI, S, ResultBitWidth); 9001 unionAssumed(T); 9002 } 9003 } 9004 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9005 : ChangeStatus::CHANGED; 9006 } 9007 9008 ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) { 9009 auto AssumedBefore = getAssumed(); 9010 Value *LHS = BinOp->getOperand(0); 9011 Value *RHS = BinOp->getOperand(1); 9012 9013 bool LHSContainsUndef = false, RHSContainsUndef = false; 9014 SetTy LHSAAPVS, RHSAAPVS; 9015 if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS, 9016 LHSContainsUndef) || 9017 !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS, 9018 RHSContainsUndef)) 9019 return indicatePessimisticFixpoint(); 9020 9021 const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0); 9022 9023 // TODO: make use of undef flag to limit potential values aggressively. 9024 if (LHSContainsUndef && RHSContainsUndef) { 9025 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero)) 9026 return indicatePessimisticFixpoint(); 9027 } else if (LHSContainsUndef) { 9028 for (const APInt &R : RHSAAPVS) { 9029 if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R)) 9030 return indicatePessimisticFixpoint(); 9031 } 9032 } else if (RHSContainsUndef) { 9033 for (const APInt &L : LHSAAPVS) { 9034 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero)) 9035 return indicatePessimisticFixpoint(); 9036 } 9037 } else { 9038 for (const APInt &L : LHSAAPVS) { 9039 for (const APInt &R : RHSAAPVS) { 9040 if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R)) 9041 return indicatePessimisticFixpoint(); 9042 } 9043 } 9044 } 9045 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9046 : ChangeStatus::CHANGED; 9047 } 9048 9049 /// See AbstractAttribute::updateImpl(...). 9050 ChangeStatus updateImpl(Attributor &A) override { 9051 Value &V = getAssociatedValue(); 9052 Instruction *I = dyn_cast<Instruction>(&V); 9053 9054 if (auto *ICI = dyn_cast<ICmpInst>(I)) 9055 return updateWithICmpInst(A, ICI); 9056 9057 if (auto *SI = dyn_cast<SelectInst>(I)) 9058 return updateWithSelectInst(A, SI); 9059 9060 if (auto *CI = dyn_cast<CastInst>(I)) 9061 return updateWithCastInst(A, CI); 9062 9063 if (auto *BinOp = dyn_cast<BinaryOperator>(I)) 9064 return updateWithBinaryOperator(A, BinOp); 9065 9066 return indicatePessimisticFixpoint(); 9067 } 9068 9069 /// See AbstractAttribute::trackStatistics() 9070 void trackStatistics() const override { 9071 STATS_DECLTRACK_FLOATING_ATTR(potential_values) 9072 } 9073 }; 9074 9075 struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl { 9076 AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A) 9077 : AAPotentialConstantValuesImpl(IRP, A) {} 9078 9079 /// See AbstractAttribute::initialize(...). 9080 ChangeStatus updateImpl(Attributor &A) override { 9081 llvm_unreachable( 9082 "AAPotentialConstantValues(Function|CallSite)::updateImpl will " 9083 "not be called"); 9084 } 9085 9086 /// See AbstractAttribute::trackStatistics() 9087 void trackStatistics() const override { 9088 STATS_DECLTRACK_FN_ATTR(potential_values) 9089 } 9090 }; 9091 9092 struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction { 9093 AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A) 9094 : AAPotentialConstantValuesFunction(IRP, A) {} 9095 9096 /// See AbstractAttribute::trackStatistics() 9097 void trackStatistics() const override { 9098 STATS_DECLTRACK_CS_ATTR(potential_values) 9099 } 9100 }; 9101 9102 struct AAPotentialConstantValuesCallSiteReturned 9103 : AACallSiteReturnedFromReturned<AAPotentialConstantValues, 9104 AAPotentialConstantValuesImpl> { 9105 AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP, 9106 Attributor &A) 9107 : AACallSiteReturnedFromReturned<AAPotentialConstantValues, 9108 AAPotentialConstantValuesImpl>(IRP, A) {} 9109 9110 /// See AbstractAttribute::trackStatistics() 9111 void trackStatistics() const override { 9112 STATS_DECLTRACK_CSRET_ATTR(potential_values) 9113 } 9114 }; 9115 9116 struct AAPotentialConstantValuesCallSiteArgument 9117 : AAPotentialConstantValuesFloating { 9118 AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP, 9119 Attributor &A) 9120 : AAPotentialConstantValuesFloating(IRP, A) {} 9121 9122 /// See AbstractAttribute::initialize(..). 9123 void initialize(Attributor &A) override { 9124 AAPotentialConstantValuesImpl::initialize(A); 9125 if (isAtFixpoint()) 9126 return; 9127 9128 Value &V = getAssociatedValue(); 9129 9130 if (auto *C = dyn_cast<ConstantInt>(&V)) { 9131 unionAssumed(C->getValue()); 9132 indicateOptimisticFixpoint(); 9133 return; 9134 } 9135 9136 if (isa<UndefValue>(&V)) { 9137 unionAssumedWithUndef(); 9138 indicateOptimisticFixpoint(); 9139 return; 9140 } 9141 } 9142 9143 /// See AbstractAttribute::updateImpl(...). 9144 ChangeStatus updateImpl(Attributor &A) override { 9145 Value &V = getAssociatedValue(); 9146 auto AssumedBefore = getAssumed(); 9147 auto &AA = A.getAAFor<AAPotentialConstantValues>( 9148 *this, IRPosition::value(V), DepClassTy::REQUIRED); 9149 const auto &S = AA.getAssumed(); 9150 unionAssumed(S); 9151 return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED 9152 : ChangeStatus::CHANGED; 9153 } 9154 9155 /// See AbstractAttribute::trackStatistics() 9156 void trackStatistics() const override { 9157 STATS_DECLTRACK_CSARG_ATTR(potential_values) 9158 } 9159 }; 9160 9161 /// ------------------------ NoUndef Attribute --------------------------------- 9162 struct AANoUndefImpl : AANoUndef { 9163 AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {} 9164 9165 /// See AbstractAttribute::initialize(...). 9166 void initialize(Attributor &A) override { 9167 if (getIRPosition().hasAttr({Attribute::NoUndef})) { 9168 indicateOptimisticFixpoint(); 9169 return; 9170 } 9171 Value &V = getAssociatedValue(); 9172 if (isa<UndefValue>(V)) 9173 indicatePessimisticFixpoint(); 9174 else if (isa<FreezeInst>(V)) 9175 indicateOptimisticFixpoint(); 9176 else if (getPositionKind() != IRPosition::IRP_RETURNED && 9177 isGuaranteedNotToBeUndefOrPoison(&V)) 9178 indicateOptimisticFixpoint(); 9179 else 9180 AANoUndef::initialize(A); 9181 } 9182 9183 /// See followUsesInMBEC 9184 bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I, 9185 AANoUndef::StateType &State) { 9186 const Value *UseV = U->get(); 9187 const DominatorTree *DT = nullptr; 9188 AssumptionCache *AC = nullptr; 9189 InformationCache &InfoCache = A.getInfoCache(); 9190 if (Function *F = getAnchorScope()) { 9191 DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 9192 AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 9193 } 9194 State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT)); 9195 bool TrackUse = false; 9196 // Track use for instructions which must produce undef or poison bits when 9197 // at least one operand contains such bits. 9198 if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I)) 9199 TrackUse = true; 9200 return TrackUse; 9201 } 9202 9203 /// See AbstractAttribute::getAsStr(). 9204 const std::string getAsStr() const override { 9205 return getAssumed() ? "noundef" : "may-undef-or-poison"; 9206 } 9207 9208 ChangeStatus manifest(Attributor &A) override { 9209 // We don't manifest noundef attribute for dead positions because the 9210 // associated values with dead positions would be replaced with undef 9211 // values. 9212 bool UsedAssumedInformation = false; 9213 if (A.isAssumedDead(getIRPosition(), nullptr, nullptr, 9214 UsedAssumedInformation)) 9215 return ChangeStatus::UNCHANGED; 9216 // A position whose simplified value does not have any value is 9217 // considered to be dead. We don't manifest noundef in such positions for 9218 // the same reason above. 9219 if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation, 9220 AA::Interprocedural) 9221 .has_value()) 9222 return ChangeStatus::UNCHANGED; 9223 return AANoUndef::manifest(A); 9224 } 9225 }; 9226 9227 struct AANoUndefFloating : public AANoUndefImpl { 9228 AANoUndefFloating(const IRPosition &IRP, Attributor &A) 9229 : AANoUndefImpl(IRP, A) {} 9230 9231 /// See AbstractAttribute::initialize(...). 9232 void initialize(Attributor &A) override { 9233 AANoUndefImpl::initialize(A); 9234 if (!getState().isAtFixpoint()) 9235 if (Instruction *CtxI = getCtxI()) 9236 followUsesInMBEC(*this, A, getState(), *CtxI); 9237 } 9238 9239 /// See AbstractAttribute::updateImpl(...). 9240 ChangeStatus updateImpl(Attributor &A) override { 9241 9242 SmallVector<AA::ValueAndContext> Values; 9243 bool UsedAssumedInformation = false; 9244 if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values, 9245 AA::AnyScope, UsedAssumedInformation)) { 9246 Values.push_back({getAssociatedValue(), getCtxI()}); 9247 } 9248 9249 StateType T; 9250 auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool { 9251 const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V), 9252 DepClassTy::REQUIRED); 9253 if (this == &AA) { 9254 T.indicatePessimisticFixpoint(); 9255 } else { 9256 const AANoUndef::StateType &S = 9257 static_cast<const AANoUndef::StateType &>(AA.getState()); 9258 T ^= S; 9259 } 9260 return T.isValidState(); 9261 }; 9262 9263 for (const auto &VAC : Values) 9264 if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI())) 9265 return indicatePessimisticFixpoint(); 9266 9267 return clampStateAndIndicateChange(getState(), T); 9268 } 9269 9270 /// See AbstractAttribute::trackStatistics() 9271 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9272 }; 9273 9274 struct AANoUndefReturned final 9275 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> { 9276 AANoUndefReturned(const IRPosition &IRP, Attributor &A) 9277 : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {} 9278 9279 /// See AbstractAttribute::trackStatistics() 9280 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) } 9281 }; 9282 9283 struct AANoUndefArgument final 9284 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> { 9285 AANoUndefArgument(const IRPosition &IRP, Attributor &A) 9286 : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {} 9287 9288 /// See AbstractAttribute::trackStatistics() 9289 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) } 9290 }; 9291 9292 struct AANoUndefCallSiteArgument final : AANoUndefFloating { 9293 AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A) 9294 : AANoUndefFloating(IRP, A) {} 9295 9296 /// See AbstractAttribute::trackStatistics() 9297 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) } 9298 }; 9299 9300 struct AANoUndefCallSiteReturned final 9301 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> { 9302 AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A) 9303 : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {} 9304 9305 /// See AbstractAttribute::trackStatistics() 9306 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) } 9307 }; 9308 9309 struct AACallEdgesImpl : public AACallEdges { 9310 AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {} 9311 9312 virtual const SetVector<Function *> &getOptimisticEdges() const override { 9313 return CalledFunctions; 9314 } 9315 9316 virtual bool hasUnknownCallee() const override { return HasUnknownCallee; } 9317 9318 virtual bool hasNonAsmUnknownCallee() const override { 9319 return HasUnknownCalleeNonAsm; 9320 } 9321 9322 const std::string getAsStr() const override { 9323 return "CallEdges[" + std::to_string(HasUnknownCallee) + "," + 9324 std::to_string(CalledFunctions.size()) + "]"; 9325 } 9326 9327 void trackStatistics() const override {} 9328 9329 protected: 9330 void addCalledFunction(Function *Fn, ChangeStatus &Change) { 9331 if (CalledFunctions.insert(Fn)) { 9332 Change = ChangeStatus::CHANGED; 9333 LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName() 9334 << "\n"); 9335 } 9336 } 9337 9338 void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) { 9339 if (!HasUnknownCallee) 9340 Change = ChangeStatus::CHANGED; 9341 if (NonAsm && !HasUnknownCalleeNonAsm) 9342 Change = ChangeStatus::CHANGED; 9343 HasUnknownCalleeNonAsm |= NonAsm; 9344 HasUnknownCallee = true; 9345 } 9346 9347 private: 9348 /// Optimistic set of functions that might be called by this position. 9349 SetVector<Function *> CalledFunctions; 9350 9351 /// Is there any call with a unknown callee. 9352 bool HasUnknownCallee = false; 9353 9354 /// Is there any call with a unknown callee, excluding any inline asm. 9355 bool HasUnknownCalleeNonAsm = false; 9356 }; 9357 9358 struct AACallEdgesCallSite : public AACallEdgesImpl { 9359 AACallEdgesCallSite(const IRPosition &IRP, Attributor &A) 9360 : AACallEdgesImpl(IRP, A) {} 9361 /// See AbstractAttribute::updateImpl(...). 9362 ChangeStatus updateImpl(Attributor &A) override { 9363 ChangeStatus Change = ChangeStatus::UNCHANGED; 9364 9365 auto VisitValue = [&](Value &V, const Instruction *CtxI) -> bool { 9366 if (Function *Fn = dyn_cast<Function>(&V)) { 9367 addCalledFunction(Fn, Change); 9368 } else { 9369 LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n"); 9370 setHasUnknownCallee(true, Change); 9371 } 9372 9373 // Explore all values. 9374 return true; 9375 }; 9376 9377 SmallVector<AA::ValueAndContext> Values; 9378 // Process any value that we might call. 9379 auto ProcessCalledOperand = [&](Value *V, Instruction *CtxI) { 9380 bool UsedAssumedInformation = false; 9381 Values.clear(); 9382 if (!A.getAssumedSimplifiedValues(IRPosition::value(*V), *this, Values, 9383 AA::AnyScope, UsedAssumedInformation)) { 9384 Values.push_back({*V, CtxI}); 9385 } 9386 for (auto &VAC : Values) 9387 VisitValue(*VAC.getValue(), VAC.getCtxI()); 9388 }; 9389 9390 CallBase *CB = cast<CallBase>(getCtxI()); 9391 9392 if (CB->isInlineAsm()) { 9393 if (!hasAssumption(*CB->getCaller(), "ompx_no_call_asm") && 9394 !hasAssumption(*CB, "ompx_no_call_asm")) 9395 setHasUnknownCallee(false, Change); 9396 return Change; 9397 } 9398 9399 // Process callee metadata if available. 9400 if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) { 9401 for (auto &Op : MD->operands()) { 9402 Function *Callee = mdconst::dyn_extract_or_null<Function>(Op); 9403 if (Callee) 9404 addCalledFunction(Callee, Change); 9405 } 9406 return Change; 9407 } 9408 9409 // The most simple case. 9410 ProcessCalledOperand(CB->getCalledOperand(), CB); 9411 9412 // Process callback functions. 9413 SmallVector<const Use *, 4u> CallbackUses; 9414 AbstractCallSite::getCallbackUses(*CB, CallbackUses); 9415 for (const Use *U : CallbackUses) 9416 ProcessCalledOperand(U->get(), CB); 9417 9418 return Change; 9419 } 9420 }; 9421 9422 struct AACallEdgesFunction : public AACallEdgesImpl { 9423 AACallEdgesFunction(const IRPosition &IRP, Attributor &A) 9424 : AACallEdgesImpl(IRP, A) {} 9425 9426 /// See AbstractAttribute::updateImpl(...). 9427 ChangeStatus updateImpl(Attributor &A) override { 9428 ChangeStatus Change = ChangeStatus::UNCHANGED; 9429 9430 auto ProcessCallInst = [&](Instruction &Inst) { 9431 CallBase &CB = cast<CallBase>(Inst); 9432 9433 auto &CBEdges = A.getAAFor<AACallEdges>( 9434 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9435 if (CBEdges.hasNonAsmUnknownCallee()) 9436 setHasUnknownCallee(true, Change); 9437 if (CBEdges.hasUnknownCallee()) 9438 setHasUnknownCallee(false, Change); 9439 9440 for (Function *F : CBEdges.getOptimisticEdges()) 9441 addCalledFunction(F, Change); 9442 9443 return true; 9444 }; 9445 9446 // Visit all callable instructions. 9447 bool UsedAssumedInformation = false; 9448 if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this, 9449 UsedAssumedInformation, 9450 /* CheckBBLivenessOnly */ true)) { 9451 // If we haven't looked at all call like instructions, assume that there 9452 // are unknown callees. 9453 setHasUnknownCallee(true, Change); 9454 } 9455 9456 return Change; 9457 } 9458 }; 9459 9460 struct AAFunctionReachabilityFunction : public AAFunctionReachability { 9461 private: 9462 struct QuerySet { 9463 void markReachable(const Function &Fn) { 9464 Reachable.insert(&Fn); 9465 Unreachable.erase(&Fn); 9466 } 9467 9468 /// If there is no information about the function None is returned. 9469 Optional<bool> isCachedReachable(const Function &Fn) { 9470 // Assume that we can reach the function. 9471 // TODO: Be more specific with the unknown callee. 9472 if (CanReachUnknownCallee) 9473 return true; 9474 9475 if (Reachable.count(&Fn)) 9476 return true; 9477 9478 if (Unreachable.count(&Fn)) 9479 return false; 9480 9481 return llvm::None; 9482 } 9483 9484 /// Set of functions that we know for sure is reachable. 9485 DenseSet<const Function *> Reachable; 9486 9487 /// Set of functions that are unreachable, but might become reachable. 9488 DenseSet<const Function *> Unreachable; 9489 9490 /// If we can reach a function with a call to a unknown function we assume 9491 /// that we can reach any function. 9492 bool CanReachUnknownCallee = false; 9493 }; 9494 9495 struct QueryResolver : public QuerySet { 9496 ChangeStatus update(Attributor &A, const AAFunctionReachability &AA, 9497 ArrayRef<const AACallEdges *> AAEdgesList) { 9498 ChangeStatus Change = ChangeStatus::UNCHANGED; 9499 9500 for (auto *AAEdges : AAEdgesList) { 9501 if (AAEdges->hasUnknownCallee()) { 9502 if (!CanReachUnknownCallee) { 9503 LLVM_DEBUG(dbgs() 9504 << "[QueryResolver] Edges include unknown callee!\n"); 9505 Change = ChangeStatus::CHANGED; 9506 } 9507 CanReachUnknownCallee = true; 9508 return Change; 9509 } 9510 } 9511 9512 for (const Function *Fn : make_early_inc_range(Unreachable)) { 9513 if (checkIfReachable(A, AA, AAEdgesList, *Fn)) { 9514 Change = ChangeStatus::CHANGED; 9515 markReachable(*Fn); 9516 } 9517 } 9518 return Change; 9519 } 9520 9521 bool isReachable(Attributor &A, AAFunctionReachability &AA, 9522 ArrayRef<const AACallEdges *> AAEdgesList, 9523 const Function &Fn) { 9524 Optional<bool> Cached = isCachedReachable(Fn); 9525 if (Cached) 9526 return Cached.value(); 9527 9528 // The query was not cached, thus it is new. We need to request an update 9529 // explicitly to make sure this the information is properly run to a 9530 // fixpoint. 9531 A.registerForUpdate(AA); 9532 9533 // We need to assume that this function can't reach Fn to prevent 9534 // an infinite loop if this function is recursive. 9535 Unreachable.insert(&Fn); 9536 9537 bool Result = checkIfReachable(A, AA, AAEdgesList, Fn); 9538 if (Result) 9539 markReachable(Fn); 9540 return Result; 9541 } 9542 9543 bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA, 9544 ArrayRef<const AACallEdges *> AAEdgesList, 9545 const Function &Fn) const { 9546 9547 // Handle the most trivial case first. 9548 for (auto *AAEdges : AAEdgesList) { 9549 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9550 9551 if (Edges.count(const_cast<Function *>(&Fn))) 9552 return true; 9553 } 9554 9555 SmallVector<const AAFunctionReachability *, 8> Deps; 9556 for (auto &AAEdges : AAEdgesList) { 9557 const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges(); 9558 9559 for (Function *Edge : Edges) { 9560 // Functions that do not call back into the module can be ignored. 9561 if (Edge->hasFnAttribute(Attribute::NoCallback)) 9562 continue; 9563 9564 // We don't need a dependency if the result is reachable. 9565 const AAFunctionReachability &EdgeReachability = 9566 A.getAAFor<AAFunctionReachability>( 9567 AA, IRPosition::function(*Edge), DepClassTy::NONE); 9568 Deps.push_back(&EdgeReachability); 9569 9570 if (EdgeReachability.canReach(A, Fn)) 9571 return true; 9572 } 9573 } 9574 9575 // The result is false for now, set dependencies and leave. 9576 for (auto *Dep : Deps) 9577 A.recordDependence(*Dep, AA, DepClassTy::REQUIRED); 9578 9579 return false; 9580 } 9581 }; 9582 9583 /// Get call edges that can be reached by this instruction. 9584 bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability, 9585 const Instruction &Inst, 9586 SmallVector<const AACallEdges *> &Result) const { 9587 // Determine call like instructions that we can reach from the inst. 9588 auto CheckCallBase = [&](Instruction &CBInst) { 9589 if (!Reachability.isAssumedReachable(A, Inst, CBInst)) 9590 return true; 9591 9592 auto &CB = cast<CallBase>(CBInst); 9593 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9594 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9595 9596 Result.push_back(&AAEdges); 9597 return true; 9598 }; 9599 9600 bool UsedAssumedInformation = false; 9601 return A.checkForAllCallLikeInstructions(CheckCallBase, *this, 9602 UsedAssumedInformation, 9603 /* CheckBBLivenessOnly */ true); 9604 } 9605 9606 public: 9607 AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A) 9608 : AAFunctionReachability(IRP, A) {} 9609 9610 bool canReach(Attributor &A, const Function &Fn) const override { 9611 if (!isValidState()) 9612 return true; 9613 9614 const AACallEdges &AAEdges = 9615 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9616 9617 // Attributor returns attributes as const, so this function has to be 9618 // const for users of this attribute to use it without having to do 9619 // a const_cast. 9620 // This is a hack for us to be able to cache queries. 9621 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9622 bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis, 9623 {&AAEdges}, Fn); 9624 9625 return Result; 9626 } 9627 9628 /// Can \p CB reach \p Fn 9629 bool canReach(Attributor &A, CallBase &CB, 9630 const Function &Fn) const override { 9631 if (!isValidState()) 9632 return true; 9633 9634 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9635 *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED); 9636 9637 // Attributor returns attributes as const, so this function has to be 9638 // const for users of this attribute to use it without having to do 9639 // a const_cast. 9640 // This is a hack for us to be able to cache queries. 9641 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9642 QueryResolver &CBQuery = NonConstThis->CBQueries[&CB]; 9643 9644 bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn); 9645 9646 return Result; 9647 } 9648 9649 bool instructionCanReach(Attributor &A, const Instruction &Inst, 9650 const Function &Fn, 9651 bool UseBackwards) const override { 9652 if (!isValidState()) 9653 return true; 9654 9655 if (UseBackwards) 9656 return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr); 9657 9658 const auto &Reachability = A.getAAFor<AAReachability>( 9659 *this, IRPosition::function(*getAssociatedFunction()), 9660 DepClassTy::REQUIRED); 9661 9662 SmallVector<const AACallEdges *> CallEdges; 9663 bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges); 9664 // Attributor returns attributes as const, so this function has to be 9665 // const for users of this attribute to use it without having to do 9666 // a const_cast. 9667 // This is a hack for us to be able to cache queries. 9668 auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this); 9669 QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst]; 9670 if (!AllKnown) { 9671 LLVM_DEBUG(dbgs() << "[AAReachability] Not all reachable edges known, " 9672 "may reach unknown callee!\n"); 9673 InstQSet.CanReachUnknownCallee = true; 9674 } 9675 9676 return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn); 9677 } 9678 9679 /// See AbstractAttribute::updateImpl(...). 9680 ChangeStatus updateImpl(Attributor &A) override { 9681 const AACallEdges &AAEdges = 9682 A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED); 9683 ChangeStatus Change = ChangeStatus::UNCHANGED; 9684 9685 Change |= WholeFunction.update(A, *this, {&AAEdges}); 9686 9687 for (auto &CBPair : CBQueries) { 9688 const AACallEdges &AAEdges = A.getAAFor<AACallEdges>( 9689 *this, IRPosition::callsite_function(*CBPair.first), 9690 DepClassTy::REQUIRED); 9691 9692 Change |= CBPair.second.update(A, *this, {&AAEdges}); 9693 } 9694 9695 // Update the Instruction queries. 9696 if (!InstQueries.empty()) { 9697 const AAReachability *Reachability = &A.getAAFor<AAReachability>( 9698 *this, IRPosition::function(*getAssociatedFunction()), 9699 DepClassTy::REQUIRED); 9700 9701 // Check for local callbases first. 9702 for (auto &InstPair : InstQueries) { 9703 SmallVector<const AACallEdges *> CallEdges; 9704 bool AllKnown = 9705 getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges); 9706 // Update will return change if we this effects any queries. 9707 if (!AllKnown) { 9708 LLVM_DEBUG(dbgs() << "[AAReachability] Not all reachable edges " 9709 "known, may reach unknown callee!\n"); 9710 InstPair.second.CanReachUnknownCallee = true; 9711 } 9712 Change |= InstPair.second.update(A, *this, CallEdges); 9713 } 9714 } 9715 9716 return Change; 9717 } 9718 9719 const std::string getAsStr() const override { 9720 size_t QueryCount = 9721 WholeFunction.Reachable.size() + WholeFunction.Unreachable.size(); 9722 9723 return "FunctionReachability [" + 9724 (canReachUnknownCallee() 9725 ? "unknown" 9726 : (std::to_string(WholeFunction.Reachable.size()) + "," + 9727 std::to_string(QueryCount))) + 9728 "]"; 9729 } 9730 9731 void trackStatistics() const override {} 9732 9733 private: 9734 bool canReachUnknownCallee() const override { 9735 return WholeFunction.CanReachUnknownCallee; 9736 } 9737 9738 /// Used to answer if a the whole function can reacha a specific function. 9739 QueryResolver WholeFunction; 9740 9741 /// Used to answer if a call base inside this function can reach a specific 9742 /// function. 9743 MapVector<const CallBase *, QueryResolver> CBQueries; 9744 9745 /// This is for instruction queries than scan "forward". 9746 MapVector<const Instruction *, QueryResolver> InstQueries; 9747 }; 9748 } // namespace 9749 9750 template <typename AAType> 9751 static Optional<Constant *> 9752 askForAssumedConstant(Attributor &A, const AbstractAttribute &QueryingAA, 9753 const IRPosition &IRP, Type &Ty) { 9754 if (!Ty.isIntegerTy()) 9755 return nullptr; 9756 9757 // This will also pass the call base context. 9758 const auto &AA = A.getAAFor<AAType>(QueryingAA, IRP, DepClassTy::NONE); 9759 9760 Optional<Constant *> COpt = AA.getAssumedConstant(A); 9761 9762 if (!COpt.has_value()) { 9763 A.recordDependence(AA, QueryingAA, DepClassTy::OPTIONAL); 9764 return llvm::None; 9765 } 9766 if (auto *C = COpt.getValue()) { 9767 A.recordDependence(AA, QueryingAA, DepClassTy::OPTIONAL); 9768 return C; 9769 } 9770 return nullptr; 9771 } 9772 9773 Value *AAPotentialValues::getSingleValue( 9774 Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP, 9775 SmallVectorImpl<AA::ValueAndContext> &Values) { 9776 Type &Ty = *IRP.getAssociatedType(); 9777 Optional<Value *> V; 9778 for (auto &It : Values) { 9779 V = AA::combineOptionalValuesInAAValueLatice(V, It.getValue(), &Ty); 9780 if (V.has_value() && !V.getValue()) 9781 break; 9782 } 9783 if (!V.has_value()) 9784 return UndefValue::get(&Ty); 9785 return V.getValue(); 9786 } 9787 9788 namespace { 9789 struct AAPotentialValuesImpl : AAPotentialValues { 9790 using StateType = PotentialLLVMValuesState; 9791 9792 AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A) 9793 : AAPotentialValues(IRP, A) {} 9794 9795 /// See AbstractAttribute::initialize(..). 9796 void initialize(Attributor &A) override { 9797 if (A.hasSimplificationCallback(getIRPosition())) { 9798 indicatePessimisticFixpoint(); 9799 return; 9800 } 9801 Value *Stripped = getAssociatedValue().stripPointerCasts(); 9802 if (isa<Constant>(Stripped)) { 9803 addValue(A, getState(), *Stripped, getCtxI(), AA::AnyScope, 9804 getAnchorScope()); 9805 indicateOptimisticFixpoint(); 9806 return; 9807 } 9808 AAPotentialValues::initialize(A); 9809 } 9810 9811 /// See AbstractAttribute::getAsStr(). 9812 const std::string getAsStr() const override { 9813 std::string Str; 9814 llvm::raw_string_ostream OS(Str); 9815 OS << getState(); 9816 return OS.str(); 9817 } 9818 9819 template <typename AAType> 9820 static Optional<Value *> askOtherAA(Attributor &A, 9821 const AbstractAttribute &AA, 9822 const IRPosition &IRP, Type &Ty) { 9823 if (isa<Constant>(IRP.getAssociatedValue())) 9824 return &IRP.getAssociatedValue(); 9825 Optional<Constant *> C = askForAssumedConstant<AAType>(A, AA, IRP, Ty); 9826 if (!C) 9827 return llvm::None; 9828 if (C.getValue()) 9829 if (auto *CC = AA::getWithType(**C, Ty)) 9830 return CC; 9831 return nullptr; 9832 } 9833 9834 void addValue(Attributor &A, StateType &State, Value &V, 9835 const Instruction *CtxI, AA::ValueScope S, 9836 Function *AnchorScope) const { 9837 9838 IRPosition ValIRP = IRPosition::value(V); 9839 if (auto *CB = dyn_cast_or_null<CallBase>(CtxI)) { 9840 for (auto &U : CB->args()) { 9841 if (U.get() != &V) 9842 continue; 9843 ValIRP = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)); 9844 break; 9845 } 9846 } 9847 9848 Value *VPtr = &V; 9849 if (ValIRP.getAssociatedType()->isIntegerTy()) { 9850 Type &Ty = *getAssociatedType(); 9851 Optional<Value *> SimpleV = 9852 askOtherAA<AAValueConstantRange>(A, *this, ValIRP, Ty); 9853 if (SimpleV.has_value() && !SimpleV.getValue()) { 9854 auto &PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>( 9855 *this, ValIRP, DepClassTy::OPTIONAL); 9856 if (PotentialConstantsAA.isValidState()) { 9857 for (auto &It : PotentialConstantsAA.getAssumedSet()) { 9858 State.unionAssumed({{*ConstantInt::get(&Ty, It), nullptr}, S}); 9859 } 9860 assert(!PotentialConstantsAA.undefIsContained() && 9861 "Undef should be an explicit value!"); 9862 return; 9863 } 9864 } 9865 if (!SimpleV.has_value()) 9866 return; 9867 9868 if (SimpleV.getValue()) 9869 VPtr = SimpleV.getValue(); 9870 } 9871 9872 if (isa<ConstantInt>(VPtr)) 9873 CtxI = nullptr; 9874 if (!AA::isValidInScope(*VPtr, AnchorScope)) 9875 S = AA::ValueScope(S | AA::Interprocedural); 9876 9877 State.unionAssumed({{*VPtr, CtxI}, S}); 9878 } 9879 9880 /// Helper struct to tie a value+context pair together with the scope for 9881 /// which this is the simplified version. 9882 struct ItemInfo { 9883 AA::ValueAndContext I; 9884 AA::ValueScope S; 9885 }; 9886 9887 bool recurseForValue(Attributor &A, const IRPosition &IRP, AA::ValueScope S) { 9888 SmallMapVector<AA::ValueAndContext, int, 8> ValueScopeMap; 9889 for (auto CS : {AA::Intraprocedural, AA::Interprocedural}) { 9890 if (!(CS & S)) 9891 continue; 9892 9893 bool UsedAssumedInformation = false; 9894 SmallVector<AA::ValueAndContext> Values; 9895 if (!A.getAssumedSimplifiedValues(IRP, this, Values, CS, 9896 UsedAssumedInformation)) 9897 return false; 9898 9899 for (auto &It : Values) 9900 ValueScopeMap[It] += CS; 9901 } 9902 for (auto &It : ValueScopeMap) 9903 addValue(A, getState(), *It.first.getValue(), It.first.getCtxI(), 9904 AA::ValueScope(It.second), getAnchorScope()); 9905 9906 return true; 9907 } 9908 9909 void giveUpOnIntraprocedural(Attributor &A) { 9910 auto NewS = StateType::getBestState(getState()); 9911 for (auto &It : getAssumedSet()) { 9912 if (It.second == AA::Intraprocedural) 9913 continue; 9914 addValue(A, NewS, *It.first.getValue(), It.first.getCtxI(), 9915 AA::Interprocedural, getAnchorScope()); 9916 } 9917 assert(!undefIsContained() && "Undef should be an explicit value!"); 9918 addValue(A, NewS, getAssociatedValue(), getCtxI(), AA::Intraprocedural, 9919 getAnchorScope()); 9920 getState() = NewS; 9921 } 9922 9923 /// See AbstractState::indicatePessimisticFixpoint(...). 9924 ChangeStatus indicatePessimisticFixpoint() override { 9925 getState() = StateType::getBestState(getState()); 9926 getState().unionAssumed({{getAssociatedValue(), getCtxI()}, AA::AnyScope}); 9927 AAPotentialValues::indicateOptimisticFixpoint(); 9928 return ChangeStatus::CHANGED; 9929 } 9930 9931 /// See AbstractAttribute::updateImpl(...). 9932 ChangeStatus updateImpl(Attributor &A) override { 9933 return indicatePessimisticFixpoint(); 9934 } 9935 9936 /// See AbstractAttribute::manifest(...). 9937 ChangeStatus manifest(Attributor &A) override { 9938 SmallVector<AA::ValueAndContext> Values; 9939 for (AA::ValueScope S : {AA::Interprocedural, AA::Intraprocedural}) { 9940 Values.clear(); 9941 if (!getAssumedSimplifiedValues(A, Values, S)) 9942 continue; 9943 Value &OldV = getAssociatedValue(); 9944 if (isa<UndefValue>(OldV)) 9945 continue; 9946 Value *NewV = getSingleValue(A, *this, getIRPosition(), Values); 9947 if (!NewV || NewV == &OldV) 9948 continue; 9949 if (getCtxI() && 9950 !AA::isValidAtPosition({*NewV, *getCtxI()}, A.getInfoCache())) 9951 continue; 9952 if (A.changeAfterManifest(getIRPosition(), *NewV)) 9953 return ChangeStatus::CHANGED; 9954 } 9955 return ChangeStatus::UNCHANGED; 9956 } 9957 9958 bool getAssumedSimplifiedValues(Attributor &A, 9959 SmallVectorImpl<AA::ValueAndContext> &Values, 9960 AA::ValueScope S) const override { 9961 if (!isValidState()) 9962 return false; 9963 for (auto &It : getAssumedSet()) 9964 if (It.second & S) 9965 Values.push_back(It.first); 9966 assert(!undefIsContained() && "Undef should be an explicit value!"); 9967 return true; 9968 } 9969 }; 9970 9971 struct AAPotentialValuesFloating : AAPotentialValuesImpl { 9972 AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A) 9973 : AAPotentialValuesImpl(IRP, A) {} 9974 9975 /// See AbstractAttribute::updateImpl(...). 9976 ChangeStatus updateImpl(Attributor &A) override { 9977 auto AssumedBefore = getAssumed(); 9978 9979 genericValueTraversal(A); 9980 9981 return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED 9982 : ChangeStatus::CHANGED; 9983 } 9984 9985 /// Helper struct to remember which AAIsDead instances we actually used. 9986 struct LivenessInfo { 9987 const AAIsDead *LivenessAA = nullptr; 9988 bool AnyDead = false; 9989 }; 9990 9991 /// Check if \p Cmp is a comparison we can simplify. 9992 /// 9993 /// We handle multiple cases, one in which at least one operand is an 9994 /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other 9995 /// operand. Return true if successful, in that case Worklist will be updated. 9996 bool handleCmp(Attributor &A, CmpInst &Cmp, ItemInfo II, 9997 SmallVectorImpl<ItemInfo> &Worklist) { 9998 Value *LHS = Cmp.getOperand(0); 9999 Value *RHS = Cmp.getOperand(1); 10000 10001 // Simplify the operands first. 10002 bool UsedAssumedInformation = false; 10003 const auto &SimplifiedLHS = A.getAssumedSimplified( 10004 IRPosition::value(*LHS, getCallBaseContext()), *this, 10005 UsedAssumedInformation, AA::Intraprocedural); 10006 if (!SimplifiedLHS.has_value()) 10007 return true; 10008 if (!SimplifiedLHS.getValue()) 10009 return false; 10010 LHS = *SimplifiedLHS; 10011 10012 const auto &SimplifiedRHS = A.getAssumedSimplified( 10013 IRPosition::value(*RHS, getCallBaseContext()), *this, 10014 UsedAssumedInformation, AA::Intraprocedural); 10015 if (!SimplifiedRHS.has_value()) 10016 return true; 10017 if (!SimplifiedRHS.getValue()) 10018 return false; 10019 RHS = *SimplifiedRHS; 10020 10021 LLVMContext &Ctx = Cmp.getContext(); 10022 // Handle the trivial case first in which we don't even need to think about 10023 // null or non-null. 10024 if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) { 10025 Constant *NewV = 10026 ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual()); 10027 addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S, 10028 getAnchorScope()); 10029 return true; 10030 } 10031 10032 // From now on we only handle equalities (==, !=). 10033 ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp); 10034 if (!ICmp || !ICmp->isEquality()) 10035 return false; 10036 10037 bool LHSIsNull = isa<ConstantPointerNull>(LHS); 10038 bool RHSIsNull = isa<ConstantPointerNull>(RHS); 10039 if (!LHSIsNull && !RHSIsNull) 10040 return false; 10041 10042 // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the 10043 // non-nullptr operand and if we assume it's non-null we can conclude the 10044 // result of the comparison. 10045 assert((LHSIsNull || RHSIsNull) && 10046 "Expected nullptr versus non-nullptr comparison at this point"); 10047 10048 // The index is the operand that we assume is not null. 10049 unsigned PtrIdx = LHSIsNull; 10050 auto &PtrNonNullAA = A.getAAFor<AANonNull>( 10051 *this, IRPosition::value(*ICmp->getOperand(PtrIdx)), 10052 DepClassTy::REQUIRED); 10053 if (!PtrNonNullAA.isAssumedNonNull()) 10054 return false; 10055 10056 // The new value depends on the predicate, true for != and false for ==. 10057 Constant *NewV = ConstantInt::get(Type::getInt1Ty(Ctx), 10058 ICmp->getPredicate() == CmpInst::ICMP_NE); 10059 addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S, getAnchorScope()); 10060 return true; 10061 } 10062 10063 bool handleSelectInst(Attributor &A, SelectInst &SI, ItemInfo II, 10064 SmallVectorImpl<ItemInfo> &Worklist) { 10065 const Instruction *CtxI = II.I.getCtxI(); 10066 bool UsedAssumedInformation = false; 10067 10068 Optional<Constant *> C = 10069 A.getAssumedConstant(*SI.getCondition(), *this, UsedAssumedInformation); 10070 bool NoValueYet = !C.has_value(); 10071 if (NoValueYet || isa_and_nonnull<UndefValue>(*C)) 10072 return true; 10073 if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) { 10074 if (CI->isZero()) 10075 Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S}); 10076 else 10077 Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S}); 10078 } else { 10079 // We could not simplify the condition, assume both values. 10080 Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S}); 10081 Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S}); 10082 } 10083 return true; 10084 } 10085 10086 bool handleLoadInst(Attributor &A, LoadInst &LI, ItemInfo II, 10087 SmallVectorImpl<ItemInfo> &Worklist) { 10088 SmallSetVector<Value *, 4> PotentialCopies; 10089 SmallSetVector<Instruction *, 4> PotentialValueOrigins; 10090 bool UsedAssumedInformation = false; 10091 if (!AA::getPotentiallyLoadedValues(A, LI, PotentialCopies, 10092 PotentialValueOrigins, *this, 10093 UsedAssumedInformation, 10094 /* OnlyExact */ true)) { 10095 LLVM_DEBUG(dbgs() << "[AAPotentialValues] Failed to get potentially " 10096 "loaded values for load instruction " 10097 << LI << "\n"); 10098 return false; 10099 } 10100 10101 // Do not simplify loads that are only used in llvm.assume if we cannot also 10102 // remove all stores that may feed into the load. The reason is that the 10103 // assume is probably worth something as long as the stores are around. 10104 InformationCache &InfoCache = A.getInfoCache(); 10105 if (InfoCache.isOnlyUsedByAssume(LI)) { 10106 if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) { 10107 if (!I) 10108 return true; 10109 if (auto *SI = dyn_cast<StoreInst>(I)) 10110 return A.isAssumedDead(SI->getOperandUse(0), this, 10111 /* LivenessAA */ nullptr, 10112 UsedAssumedInformation, 10113 /* CheckBBLivenessOnly */ false); 10114 return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr, 10115 UsedAssumedInformation, 10116 /* CheckBBLivenessOnly */ false); 10117 })) { 10118 LLVM_DEBUG(dbgs() << "[AAPotentialValues] Load is onl used by assumes " 10119 "and we cannot delete all the stores: " 10120 << LI << "\n"); 10121 return false; 10122 } 10123 } 10124 10125 // Values have to be dynamically unique or we loose the fact that a 10126 // single llvm::Value might represent two runtime values (e.g., 10127 // stack locations in different recursive calls). 10128 const Instruction *CtxI = II.I.getCtxI(); 10129 bool ScopeIsLocal = (II.S & AA::Intraprocedural); 10130 bool AllLocal = ScopeIsLocal; 10131 bool DynamicallyUnique = llvm::all_of(PotentialCopies, [&](Value *PC) { 10132 AllLocal &= AA::isValidInScope(*PC, getAnchorScope()); 10133 return AA::isDynamicallyUnique(A, *this, *PC); 10134 }); 10135 if (!DynamicallyUnique) { 10136 LLVM_DEBUG(dbgs() << "[AAPotentialValues] Not all potentially loaded " 10137 "values are dynamically unique: " 10138 << LI << "\n"); 10139 return false; 10140 } 10141 10142 for (auto *PotentialCopy : PotentialCopies) { 10143 if (AllLocal) { 10144 Worklist.push_back({{*PotentialCopy, CtxI}, II.S}); 10145 } else { 10146 Worklist.push_back({{*PotentialCopy, CtxI}, AA::Interprocedural}); 10147 } 10148 } 10149 if (!AllLocal && ScopeIsLocal) 10150 addValue(A, getState(), LI, CtxI, AA::Intraprocedural, getAnchorScope()); 10151 return true; 10152 } 10153 10154 bool handlePHINode( 10155 Attributor &A, PHINode &PHI, ItemInfo II, 10156 SmallVectorImpl<ItemInfo> &Worklist, 10157 SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) { 10158 auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & { 10159 LivenessInfo &LI = LivenessAAs[&F]; 10160 if (!LI.LivenessAA) 10161 LI.LivenessAA = &A.getAAFor<AAIsDead>(*this, IRPosition::function(F), 10162 DepClassTy::NONE); 10163 return LI; 10164 }; 10165 10166 LivenessInfo &LI = GetLivenessInfo(*PHI.getFunction()); 10167 for (unsigned u = 0, e = PHI.getNumIncomingValues(); u < e; u++) { 10168 BasicBlock *IncomingBB = PHI.getIncomingBlock(u); 10169 if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI.getParent())) { 10170 LI.AnyDead = true; 10171 continue; 10172 } 10173 Worklist.push_back( 10174 {{*PHI.getIncomingValue(u), IncomingBB->getTerminator()}, II.S}); 10175 } 10176 return true; 10177 } 10178 10179 /// Use the generic, non-optimistic InstSimplfy functionality if we managed to 10180 /// simplify any operand of the instruction \p I. Return true if successful, 10181 /// in that case Worklist will be updated. 10182 bool handleGenericInst(Attributor &A, Instruction &I, ItemInfo II, 10183 SmallVectorImpl<ItemInfo> &Worklist) { 10184 bool SomeSimplified = false; 10185 bool UsedAssumedInformation = false; 10186 10187 SmallVector<Value *, 8> NewOps(I.getNumOperands()); 10188 int Idx = 0; 10189 for (Value *Op : I.operands()) { 10190 const auto &SimplifiedOp = A.getAssumedSimplified( 10191 IRPosition::value(*Op, getCallBaseContext()), *this, 10192 UsedAssumedInformation, AA::Intraprocedural); 10193 // If we are not sure about any operand we are not sure about the entire 10194 // instruction, we'll wait. 10195 if (!SimplifiedOp.has_value()) 10196 return true; 10197 10198 if (SimplifiedOp.getValue()) 10199 NewOps[Idx] = SimplifiedOp.getValue(); 10200 else 10201 NewOps[Idx] = Op; 10202 10203 SomeSimplified |= (NewOps[Idx] != Op); 10204 ++Idx; 10205 } 10206 10207 // We won't bother with the InstSimplify interface if we didn't simplify any 10208 // operand ourselves. 10209 if (!SomeSimplified) 10210 return false; 10211 10212 InformationCache &InfoCache = A.getInfoCache(); 10213 Function *F = I.getFunction(); 10214 const auto *DT = 10215 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F); 10216 const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F); 10217 auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F); 10218 OptimizationRemarkEmitter *ORE = nullptr; 10219 10220 const DataLayout &DL = I.getModule()->getDataLayout(); 10221 SimplifyQuery Q(DL, TLI, DT, AC, &I); 10222 Value *NewV = simplifyInstructionWithOperands(&I, NewOps, Q, ORE); 10223 if (!NewV || NewV == &I) 10224 return false; 10225 10226 LLVM_DEBUG(dbgs() << "Generic inst " << I << " assumed simplified to " 10227 << *NewV << "\n"); 10228 Worklist.push_back({{*NewV, II.I.getCtxI()}, II.S}); 10229 return true; 10230 } 10231 10232 bool simplifyInstruction( 10233 Attributor &A, Instruction &I, ItemInfo II, 10234 SmallVectorImpl<ItemInfo> &Worklist, 10235 SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) { 10236 if (auto *CI = dyn_cast<CmpInst>(&I)) 10237 if (handleCmp(A, *CI, II, Worklist)) 10238 return true; 10239 10240 switch (I.getOpcode()) { 10241 case Instruction::Select: 10242 return handleSelectInst(A, cast<SelectInst>(I), II, Worklist); 10243 case Instruction::PHI: 10244 return handlePHINode(A, cast<PHINode>(I), II, Worklist, LivenessAAs); 10245 case Instruction::Load: 10246 return handleLoadInst(A, cast<LoadInst>(I), II, Worklist); 10247 default: 10248 return handleGenericInst(A, I, II, Worklist); 10249 }; 10250 return false; 10251 } 10252 10253 void genericValueTraversal(Attributor &A) { 10254 SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs; 10255 10256 Value *InitialV = &getAssociatedValue(); 10257 SmallSet<AA::ValueAndContext, 16> Visited; 10258 SmallVector<ItemInfo, 16> Worklist; 10259 Worklist.push_back({{*InitialV, getCtxI()}, AA::AnyScope}); 10260 10261 int Iteration = 0; 10262 do { 10263 ItemInfo II = Worklist.pop_back_val(); 10264 Value *V = II.I.getValue(); 10265 assert(V); 10266 const Instruction *CtxI = II.I.getCtxI(); 10267 AA::ValueScope S = II.S; 10268 10269 // Check if we should process the current value. To prevent endless 10270 // recursion keep a record of the values we followed! 10271 if (!Visited.insert(II.I).second) 10272 continue; 10273 10274 // Make sure we limit the compile time for complex expressions. 10275 if (Iteration++ >= MaxPotentialValuesIterations) { 10276 LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: " 10277 << Iteration << "!\n"); 10278 addValue(A, getState(), *V, CtxI, S, getAnchorScope()); 10279 continue; 10280 } 10281 10282 // Explicitly look through calls with a "returned" attribute if we do 10283 // not have a pointer as stripPointerCasts only works on them. 10284 Value *NewV = nullptr; 10285 if (V->getType()->isPointerTy()) { 10286 NewV = AA::getWithType(*V->stripPointerCasts(), *V->getType()); 10287 } else { 10288 auto *CB = dyn_cast<CallBase>(V); 10289 if (CB && CB->getCalledFunction()) { 10290 for (Argument &Arg : CB->getCalledFunction()->args()) 10291 if (Arg.hasReturnedAttr()) { 10292 NewV = CB->getArgOperand(Arg.getArgNo()); 10293 break; 10294 } 10295 } 10296 } 10297 if (NewV && NewV != V) { 10298 Worklist.push_back({{*NewV, CtxI}, S}); 10299 continue; 10300 } 10301 10302 if (auto *I = dyn_cast<Instruction>(V)) { 10303 if (simplifyInstruction(A, *I, II, Worklist, LivenessAAs)) 10304 continue; 10305 } 10306 10307 if (V != InitialV || isa<Argument>(V)) 10308 if (recurseForValue(A, IRPosition::value(*V), II.S)) 10309 continue; 10310 10311 // If we haven't stripped anything we give up. 10312 if (V == InitialV && CtxI == getCtxI()) { 10313 indicatePessimisticFixpoint(); 10314 return; 10315 } 10316 10317 addValue(A, getState(), *V, CtxI, S, getAnchorScope()); 10318 } while (!Worklist.empty()); 10319 10320 // If we actually used liveness information so we have to record a 10321 // dependence. 10322 for (auto &It : LivenessAAs) 10323 if (It.second.AnyDead) 10324 A.recordDependence(*It.second.LivenessAA, *this, DepClassTy::OPTIONAL); 10325 } 10326 10327 /// See AbstractAttribute::trackStatistics() 10328 void trackStatistics() const override { 10329 STATS_DECLTRACK_FLOATING_ATTR(potential_values) 10330 } 10331 }; 10332 10333 struct AAPotentialValuesArgument final : AAPotentialValuesImpl { 10334 using Base = AAPotentialValuesImpl; 10335 AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A) 10336 : Base(IRP, A) {} 10337 10338 /// See AbstractAttribute::initialize(..). 10339 void initialize(Attributor &A) override { 10340 auto &Arg = cast<Argument>(getAssociatedValue()); 10341 if (Arg.hasPointeeInMemoryValueAttr()) 10342 indicatePessimisticFixpoint(); 10343 } 10344 10345 /// See AbstractAttribute::updateImpl(...). 10346 ChangeStatus updateImpl(Attributor &A) override { 10347 auto AssumedBefore = getAssumed(); 10348 10349 unsigned CSArgNo = getCallSiteArgNo(); 10350 10351 bool UsedAssumedInformation = false; 10352 SmallVector<AA::ValueAndContext> Values; 10353 auto CallSitePred = [&](AbstractCallSite ACS) { 10354 const auto CSArgIRP = IRPosition::callsite_argument(ACS, CSArgNo); 10355 if (CSArgIRP.getPositionKind() == IRP_INVALID) 10356 return false; 10357 10358 if (!A.getAssumedSimplifiedValues(CSArgIRP, this, Values, 10359 AA::Interprocedural, 10360 UsedAssumedInformation)) 10361 return false; 10362 10363 return isValidState(); 10364 }; 10365 10366 if (!A.checkForAllCallSites(CallSitePred, *this, 10367 /* RequireAllCallSites */ true, 10368 UsedAssumedInformation)) 10369 return indicatePessimisticFixpoint(); 10370 10371 Function *Fn = getAssociatedFunction(); 10372 bool AnyNonLocal = false; 10373 for (auto &It : Values) { 10374 if (isa<Constant>(It.getValue())) { 10375 addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope, 10376 getAnchorScope()); 10377 continue; 10378 } 10379 if (!AA::isDynamicallyUnique(A, *this, *It.getValue())) 10380 return indicatePessimisticFixpoint(); 10381 10382 if (auto *Arg = dyn_cast<Argument>(It.getValue())) 10383 if (Arg->getParent() == Fn) { 10384 addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope, 10385 getAnchorScope()); 10386 continue; 10387 } 10388 addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::Interprocedural, 10389 getAnchorScope()); 10390 AnyNonLocal = true; 10391 } 10392 if (undefIsContained()) 10393 unionAssumedWithUndef(); 10394 if (AnyNonLocal) 10395 giveUpOnIntraprocedural(A); 10396 10397 return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED 10398 : ChangeStatus::CHANGED; 10399 } 10400 10401 /// See AbstractAttribute::trackStatistics() 10402 void trackStatistics() const override { 10403 STATS_DECLTRACK_ARG_ATTR(potential_values) 10404 } 10405 }; 10406 10407 struct AAPotentialValuesReturned 10408 : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> { 10409 using Base = 10410 AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>; 10411 AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A) 10412 : Base(IRP, A) {} 10413 10414 /// See AbstractAttribute::initialize(..). 10415 void initialize(Attributor &A) override { 10416 if (A.hasSimplificationCallback(getIRPosition())) 10417 indicatePessimisticFixpoint(); 10418 else 10419 AAPotentialValues::initialize(A); 10420 } 10421 10422 ChangeStatus manifest(Attributor &A) override { 10423 // We queried AAValueSimplify for the returned values so they will be 10424 // replaced if a simplified form was found. Nothing to do here. 10425 return ChangeStatus::UNCHANGED; 10426 } 10427 10428 ChangeStatus indicatePessimisticFixpoint() override { 10429 return AAPotentialValues::indicatePessimisticFixpoint(); 10430 } 10431 10432 /// See AbstractAttribute::trackStatistics() 10433 void trackStatistics() const override { 10434 STATS_DECLTRACK_FNRET_ATTR(potential_values) 10435 } 10436 }; 10437 10438 struct AAPotentialValuesFunction : AAPotentialValuesImpl { 10439 AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A) 10440 : AAPotentialValuesImpl(IRP, A) {} 10441 10442 /// See AbstractAttribute::updateImpl(...). 10443 ChangeStatus updateImpl(Attributor &A) override { 10444 llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will " 10445 "not be called"); 10446 } 10447 10448 /// See AbstractAttribute::trackStatistics() 10449 void trackStatistics() const override { 10450 STATS_DECLTRACK_FN_ATTR(potential_values) 10451 } 10452 }; 10453 10454 struct AAPotentialValuesCallSite : AAPotentialValuesFunction { 10455 AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A) 10456 : AAPotentialValuesFunction(IRP, A) {} 10457 10458 /// See AbstractAttribute::trackStatistics() 10459 void trackStatistics() const override { 10460 STATS_DECLTRACK_CS_ATTR(potential_values) 10461 } 10462 }; 10463 10464 struct AAPotentialValuesCallSiteReturned : AAPotentialValuesImpl { 10465 AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A) 10466 : AAPotentialValuesImpl(IRP, A) {} 10467 10468 /// See AbstractAttribute::updateImpl(...). 10469 ChangeStatus updateImpl(Attributor &A) override { 10470 auto AssumedBefore = getAssumed(); 10471 10472 Function *Callee = getAssociatedFunction(); 10473 if (!Callee) 10474 return indicatePessimisticFixpoint(); 10475 10476 bool UsedAssumedInformation = false; 10477 auto *CB = cast<CallBase>(getCtxI()); 10478 if (CB->isMustTailCall() && 10479 !A.isAssumedDead(IRPosition::inst(*CB), this, nullptr, 10480 UsedAssumedInformation)) 10481 return indicatePessimisticFixpoint(); 10482 10483 SmallVector<AA::ValueAndContext> Values; 10484 if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this, 10485 Values, AA::Intraprocedural, 10486 UsedAssumedInformation)) 10487 return indicatePessimisticFixpoint(); 10488 10489 Function *Caller = CB->getCaller(); 10490 10491 bool AnyNonLocal = false; 10492 for (auto &It : Values) { 10493 Value *V = It.getValue(); 10494 Optional<Value *> CallerV = A.translateArgumentToCallSiteContent( 10495 V, *CB, *this, UsedAssumedInformation); 10496 if (!CallerV.has_value()) { 10497 // Nothing to do as long as no value was determined. 10498 continue; 10499 } 10500 V = CallerV.getValue() ? CallerV.getValue() : V; 10501 if (AA::isDynamicallyUnique(A, *this, *V) && 10502 AA::isValidInScope(*V, Caller)) { 10503 if (CallerV.getValue()) { 10504 SmallVector<AA::ValueAndContext> ArgValues; 10505 IRPosition IRP = IRPosition::value(*V); 10506 if (auto *Arg = dyn_cast<Argument>(V)) 10507 if (Arg->getParent() == CB->getCalledFunction()) 10508 IRP = IRPosition::callsite_argument(*CB, Arg->getArgNo()); 10509 if (recurseForValue(A, IRP, AA::AnyScope)) 10510 continue; 10511 } 10512 addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope()); 10513 } else { 10514 AnyNonLocal = true; 10515 break; 10516 } 10517 } 10518 if (AnyNonLocal) { 10519 Values.clear(); 10520 if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this, 10521 Values, AA::Interprocedural, 10522 UsedAssumedInformation)) 10523 return indicatePessimisticFixpoint(); 10524 AnyNonLocal = false; 10525 getState() = PotentialLLVMValuesState::getBestState(); 10526 for (auto &It : Values) { 10527 Value *V = It.getValue(); 10528 if (!AA::isDynamicallyUnique(A, *this, *V)) 10529 return indicatePessimisticFixpoint(); 10530 if (AA::isValidInScope(*V, Caller)) { 10531 addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope()); 10532 } else { 10533 AnyNonLocal = true; 10534 addValue(A, getState(), *V, CB, AA::Interprocedural, 10535 getAnchorScope()); 10536 } 10537 } 10538 if (AnyNonLocal) 10539 giveUpOnIntraprocedural(A); 10540 } 10541 return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED 10542 : ChangeStatus::CHANGED; 10543 } 10544 10545 ChangeStatus indicatePessimisticFixpoint() override { 10546 return AAPotentialValues::indicatePessimisticFixpoint(); 10547 } 10548 10549 /// See AbstractAttribute::trackStatistics() 10550 void trackStatistics() const override { 10551 STATS_DECLTRACK_CSRET_ATTR(potential_values) 10552 } 10553 }; 10554 10555 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating { 10556 AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A) 10557 : AAPotentialValuesFloating(IRP, A) {} 10558 10559 /// See AbstractAttribute::trackStatistics() 10560 void trackStatistics() const override { 10561 STATS_DECLTRACK_CSARG_ATTR(potential_values) 10562 } 10563 }; 10564 } // namespace 10565 10566 /// ---------------------- Assumption Propagation ------------------------------ 10567 namespace { 10568 struct AAAssumptionInfoImpl : public AAAssumptionInfo { 10569 AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A, 10570 const DenseSet<StringRef> &Known) 10571 : AAAssumptionInfo(IRP, A, Known) {} 10572 10573 bool hasAssumption(const StringRef Assumption) const override { 10574 return isValidState() && setContains(Assumption); 10575 } 10576 10577 /// See AbstractAttribute::getAsStr() 10578 const std::string getAsStr() const override { 10579 const SetContents &Known = getKnown(); 10580 const SetContents &Assumed = getAssumed(); 10581 10582 const std::string KnownStr = 10583 llvm::join(Known.getSet().begin(), Known.getSet().end(), ","); 10584 const std::string AssumedStr = 10585 (Assumed.isUniversal()) 10586 ? "Universal" 10587 : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ","); 10588 10589 return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]"; 10590 } 10591 }; 10592 10593 /// Propagates assumption information from parent functions to all of their 10594 /// successors. An assumption can be propagated if the containing function 10595 /// dominates the called function. 10596 /// 10597 /// We start with a "known" set of assumptions already valid for the associated 10598 /// function and an "assumed" set that initially contains all possible 10599 /// assumptions. The assumed set is inter-procedurally updated by narrowing its 10600 /// contents as concrete values are known. The concrete values are seeded by the 10601 /// first nodes that are either entries into the call graph, or contains no 10602 /// assumptions. Each node is updated as the intersection of the assumed state 10603 /// with all of its predecessors. 10604 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl { 10605 AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A) 10606 : AAAssumptionInfoImpl(IRP, A, 10607 getAssumptions(*IRP.getAssociatedFunction())) {} 10608 10609 /// See AbstractAttribute::manifest(...). 10610 ChangeStatus manifest(Attributor &A) override { 10611 const auto &Assumptions = getKnown(); 10612 10613 // Don't manifest a universal set if it somehow made it here. 10614 if (Assumptions.isUniversal()) 10615 return ChangeStatus::UNCHANGED; 10616 10617 Function *AssociatedFunction = getAssociatedFunction(); 10618 10619 bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet()); 10620 10621 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 10622 } 10623 10624 /// See AbstractAttribute::updateImpl(...). 10625 ChangeStatus updateImpl(Attributor &A) override { 10626 bool Changed = false; 10627 10628 auto CallSitePred = [&](AbstractCallSite ACS) { 10629 const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>( 10630 *this, IRPosition::callsite_function(*ACS.getInstruction()), 10631 DepClassTy::REQUIRED); 10632 // Get the set of assumptions shared by all of this function's callers. 10633 Changed |= getIntersection(AssumptionAA.getAssumed()); 10634 return !getAssumed().empty() || !getKnown().empty(); 10635 }; 10636 10637 bool UsedAssumedInformation = false; 10638 // Get the intersection of all assumptions held by this node's predecessors. 10639 // If we don't know all the call sites then this is either an entry into the 10640 // call graph or an empty node. This node is known to only contain its own 10641 // assumptions and can be propagated to its successors. 10642 if (!A.checkForAllCallSites(CallSitePred, *this, true, 10643 UsedAssumedInformation)) 10644 return indicatePessimisticFixpoint(); 10645 10646 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 10647 } 10648 10649 void trackStatistics() const override {} 10650 }; 10651 10652 /// Assumption Info defined for call sites. 10653 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl { 10654 10655 AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A) 10656 : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {} 10657 10658 /// See AbstractAttribute::initialize(...). 10659 void initialize(Attributor &A) override { 10660 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 10661 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 10662 } 10663 10664 /// See AbstractAttribute::manifest(...). 10665 ChangeStatus manifest(Attributor &A) override { 10666 // Don't manifest a universal set if it somehow made it here. 10667 if (getKnown().isUniversal()) 10668 return ChangeStatus::UNCHANGED; 10669 10670 CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue()); 10671 bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet()); 10672 10673 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 10674 } 10675 10676 /// See AbstractAttribute::updateImpl(...). 10677 ChangeStatus updateImpl(Attributor &A) override { 10678 const IRPosition &FnPos = IRPosition::function(*getAnchorScope()); 10679 auto &AssumptionAA = 10680 A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED); 10681 bool Changed = getIntersection(AssumptionAA.getAssumed()); 10682 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED; 10683 } 10684 10685 /// See AbstractAttribute::trackStatistics() 10686 void trackStatistics() const override {} 10687 10688 private: 10689 /// Helper to initialized the known set as all the assumptions this call and 10690 /// the callee contain. 10691 DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) { 10692 const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue()); 10693 auto Assumptions = getAssumptions(CB); 10694 if (Function *F = IRP.getAssociatedFunction()) 10695 set_union(Assumptions, getAssumptions(*F)); 10696 if (Function *F = IRP.getAssociatedFunction()) 10697 set_union(Assumptions, getAssumptions(*F)); 10698 return Assumptions; 10699 } 10700 }; 10701 } // namespace 10702 10703 AACallGraphNode *AACallEdgeIterator::operator*() const { 10704 return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>( 10705 &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I)))); 10706 } 10707 10708 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); } 10709 10710 const char AAReturnedValues::ID = 0; 10711 const char AANoUnwind::ID = 0; 10712 const char AANoSync::ID = 0; 10713 const char AANoFree::ID = 0; 10714 const char AANonNull::ID = 0; 10715 const char AANoRecurse::ID = 0; 10716 const char AAWillReturn::ID = 0; 10717 const char AAUndefinedBehavior::ID = 0; 10718 const char AANoAlias::ID = 0; 10719 const char AAReachability::ID = 0; 10720 const char AANoReturn::ID = 0; 10721 const char AAIsDead::ID = 0; 10722 const char AADereferenceable::ID = 0; 10723 const char AAAlign::ID = 0; 10724 const char AAInstanceInfo::ID = 0; 10725 const char AANoCapture::ID = 0; 10726 const char AAValueSimplify::ID = 0; 10727 const char AAHeapToStack::ID = 0; 10728 const char AAPrivatizablePtr::ID = 0; 10729 const char AAMemoryBehavior::ID = 0; 10730 const char AAMemoryLocation::ID = 0; 10731 const char AAValueConstantRange::ID = 0; 10732 const char AAPotentialConstantValues::ID = 0; 10733 const char AAPotentialValues::ID = 0; 10734 const char AANoUndef::ID = 0; 10735 const char AACallEdges::ID = 0; 10736 const char AAFunctionReachability::ID = 0; 10737 const char AAPointerInfo::ID = 0; 10738 const char AAAssumptionInfo::ID = 0; 10739 10740 // Macro magic to create the static generator function for attributes that 10741 // follow the naming scheme. 10742 10743 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \ 10744 case IRPosition::PK: \ 10745 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!"); 10746 10747 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \ 10748 case IRPosition::PK: \ 10749 AA = new (A.Allocator) CLASS##SUFFIX(IRP, A); \ 10750 ++NumAAs; \ 10751 break; 10752 10753 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10754 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10755 CLASS *AA = nullptr; \ 10756 switch (IRP.getPositionKind()) { \ 10757 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10758 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 10759 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 10760 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10761 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 10762 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 10763 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10764 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10765 } \ 10766 return *AA; \ 10767 } 10768 10769 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10770 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10771 CLASS *AA = nullptr; \ 10772 switch (IRP.getPositionKind()) { \ 10773 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10774 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \ 10775 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 10776 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10777 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10778 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 10779 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10780 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10781 } \ 10782 return *AA; \ 10783 } 10784 10785 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10786 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10787 CLASS *AA = nullptr; \ 10788 switch (IRP.getPositionKind()) { \ 10789 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10790 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10791 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10792 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10793 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10794 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \ 10795 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10796 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10797 } \ 10798 return *AA; \ 10799 } 10800 10801 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10802 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10803 CLASS *AA = nullptr; \ 10804 switch (IRP.getPositionKind()) { \ 10805 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10806 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \ 10807 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \ 10808 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10809 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \ 10810 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \ 10811 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \ 10812 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10813 } \ 10814 return *AA; \ 10815 } 10816 10817 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \ 10818 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \ 10819 CLASS *AA = nullptr; \ 10820 switch (IRP.getPositionKind()) { \ 10821 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \ 10822 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \ 10823 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \ 10824 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \ 10825 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \ 10826 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \ 10827 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \ 10828 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \ 10829 } \ 10830 return *AA; \ 10831 } 10832 10833 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind) 10834 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync) 10835 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse) 10836 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn) 10837 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn) 10838 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues) 10839 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation) 10840 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges) 10841 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo) 10842 10843 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull) 10844 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias) 10845 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr) 10846 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable) 10847 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign) 10848 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo) 10849 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture) 10850 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange) 10851 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues) 10852 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues) 10853 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef) 10854 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo) 10855 10856 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) 10857 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) 10858 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree) 10859 10860 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack) 10861 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability) 10862 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior) 10863 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability) 10864 10865 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior) 10866 10867 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION 10868 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION 10869 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION 10870 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION 10871 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION 10872 #undef SWITCH_PK_CREATE 10873 #undef SWITCH_PK_INV 10874