1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/MapVector.h"
19 #include "llvm/ADT/SCCIterator.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetOperations.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/CaptureTracking.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/LazyValueInfo.h"
32 #include "llvm/Analysis/MemoryBuiltins.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/ScalarEvolution.h"
35 #include "llvm/Analysis/TargetTransformInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/IR/Argument.h"
38 #include "llvm/IR/Assumptions.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/Constant.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/GlobalValue.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/InstrTypes.h"
47 #include "llvm/IR/Instruction.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/NoFolder.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/IR/ValueHandle.h"
53 #include "llvm/Support/Alignment.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/ErrorHandling.h"
57 #include "llvm/Support/GraphWriter.h"
58 #include "llvm/Support/MathExtras.h"
59 #include "llvm/Support/raw_ostream.h"
60 #include "llvm/Transforms/Utils/Local.h"
61 #include "llvm/Transforms/Utils/ValueMapper.h"
62 #include <cassert>
63 
64 using namespace llvm;
65 
66 #define DEBUG_TYPE "attributor"
67 
68 static cl::opt<bool> ManifestInternal(
69     "attributor-manifest-internal", cl::Hidden,
70     cl::desc("Manifest Attributor internal string attributes."),
71     cl::init(false));
72 
73 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
74                                        cl::Hidden);
75 
76 template <>
77 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
78 
79 template <> unsigned llvm::PotentialLLVMValuesState::MaxPotentialValues = -1;
80 
81 static cl::opt<unsigned, true> MaxPotentialValues(
82     "attributor-max-potential-values", cl::Hidden,
83     cl::desc("Maximum number of potential values to be "
84              "tracked for each position."),
85     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
86     cl::init(7));
87 
88 static cl::opt<int> MaxPotentialValuesIterations(
89     "attributor-max-potential-values-iterations", cl::Hidden,
90     cl::desc(
91         "Maximum number of iterations we keep dismantling potential values."),
92     cl::init(64));
93 
94 static cl::opt<unsigned> MaxInterferingAccesses(
95     "attributor-max-interfering-accesses", cl::Hidden,
96     cl::desc("Maximum number of interfering accesses to "
97              "check before assuming all might interfere."),
98     cl::init(6));
99 
100 STATISTIC(NumAAs, "Number of abstract attributes created");
101 
102 // Some helper macros to deal with statistics tracking.
103 //
104 // Usage:
105 // For simple IR attribute tracking overload trackStatistics in the abstract
106 // attribute and choose the right STATS_DECLTRACK_********* macro,
107 // e.g.,:
108 //  void trackStatistics() const override {
109 //    STATS_DECLTRACK_ARG_ATTR(returned)
110 //  }
111 // If there is a single "increment" side one can use the macro
112 // STATS_DECLTRACK with a custom message. If there are multiple increment
113 // sides, STATS_DECL and STATS_TRACK can also be used separately.
114 //
115 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
116   ("Number of " #TYPE " marked '" #NAME "'")
117 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
118 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
119 #define STATS_DECL(NAME, TYPE, MSG)                                            \
120   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
121 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
122 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
123   {                                                                            \
124     STATS_DECL(NAME, TYPE, MSG)                                                \
125     STATS_TRACK(NAME, TYPE)                                                    \
126   }
127 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
128   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
129 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
130   STATS_DECLTRACK(NAME, CSArguments,                                           \
131                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
132 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
133   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
134 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
135   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
136 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
137   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
138                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
139 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
140   STATS_DECLTRACK(NAME, CSReturn,                                              \
141                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
142 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
143   STATS_DECLTRACK(NAME, Floating,                                              \
144                   ("Number of floating values known to be '" #NAME "'"))
145 
146 // Specialization of the operator<< for abstract attributes subclasses. This
147 // disambiguates situations where multiple operators are applicable.
148 namespace llvm {
149 #define PIPE_OPERATOR(CLASS)                                                   \
150   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
151     return OS << static_cast<const AbstractAttribute &>(AA);                   \
152   }
153 
154 PIPE_OPERATOR(AAIsDead)
155 PIPE_OPERATOR(AANoUnwind)
156 PIPE_OPERATOR(AANoSync)
157 PIPE_OPERATOR(AANoRecurse)
158 PIPE_OPERATOR(AAWillReturn)
159 PIPE_OPERATOR(AANoReturn)
160 PIPE_OPERATOR(AAReturnedValues)
161 PIPE_OPERATOR(AANonNull)
162 PIPE_OPERATOR(AANoAlias)
163 PIPE_OPERATOR(AADereferenceable)
164 PIPE_OPERATOR(AAAlign)
165 PIPE_OPERATOR(AAInstanceInfo)
166 PIPE_OPERATOR(AANoCapture)
167 PIPE_OPERATOR(AAValueSimplify)
168 PIPE_OPERATOR(AANoFree)
169 PIPE_OPERATOR(AAHeapToStack)
170 PIPE_OPERATOR(AAReachability)
171 PIPE_OPERATOR(AAMemoryBehavior)
172 PIPE_OPERATOR(AAMemoryLocation)
173 PIPE_OPERATOR(AAValueConstantRange)
174 PIPE_OPERATOR(AAPrivatizablePtr)
175 PIPE_OPERATOR(AAUndefinedBehavior)
176 PIPE_OPERATOR(AAPotentialConstantValues)
177 PIPE_OPERATOR(AAPotentialValues)
178 PIPE_OPERATOR(AANoUndef)
179 PIPE_OPERATOR(AACallEdges)
180 PIPE_OPERATOR(AAFunctionReachability)
181 PIPE_OPERATOR(AAPointerInfo)
182 PIPE_OPERATOR(AAAssumptionInfo)
183 
184 #undef PIPE_OPERATOR
185 
186 template <>
187 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
188                                                      const DerefState &R) {
189   ChangeStatus CS0 =
190       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
191   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
192   return CS0 | CS1;
193 }
194 
195 } // namespace llvm
196 
197 /// Checks if a type could have padding bytes.
198 static bool isDenselyPacked(Type *Ty, const DataLayout &DL) {
199   // There is no size information, so be conservative.
200   if (!Ty->isSized())
201     return false;
202 
203   // If the alloc size is not equal to the storage size, then there are padding
204   // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128.
205   if (DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty))
206     return false;
207 
208   // FIXME: This isn't the right way to check for padding in vectors with
209   // non-byte-size elements.
210   if (VectorType *SeqTy = dyn_cast<VectorType>(Ty))
211     return isDenselyPacked(SeqTy->getElementType(), DL);
212 
213   // For array types, check for padding within members.
214   if (ArrayType *SeqTy = dyn_cast<ArrayType>(Ty))
215     return isDenselyPacked(SeqTy->getElementType(), DL);
216 
217   if (!isa<StructType>(Ty))
218     return true;
219 
220   // Check for padding within and between elements of a struct.
221   StructType *StructTy = cast<StructType>(Ty);
222   const StructLayout *Layout = DL.getStructLayout(StructTy);
223   uint64_t StartPos = 0;
224   for (unsigned I = 0, E = StructTy->getNumElements(); I < E; ++I) {
225     Type *ElTy = StructTy->getElementType(I);
226     if (!isDenselyPacked(ElTy, DL))
227       return false;
228     if (StartPos != Layout->getElementOffsetInBits(I))
229       return false;
230     StartPos += DL.getTypeAllocSizeInBits(ElTy);
231   }
232 
233   return true;
234 }
235 
236 /// Get pointer operand of memory accessing instruction. If \p I is
237 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
238 /// is set to false and the instruction is volatile, return nullptr.
239 static const Value *getPointerOperand(const Instruction *I,
240                                       bool AllowVolatile) {
241   if (!AllowVolatile && I->isVolatile())
242     return nullptr;
243 
244   if (auto *LI = dyn_cast<LoadInst>(I)) {
245     return LI->getPointerOperand();
246   }
247 
248   if (auto *SI = dyn_cast<StoreInst>(I)) {
249     return SI->getPointerOperand();
250   }
251 
252   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
253     return CXI->getPointerOperand();
254   }
255 
256   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
257     return RMWI->getPointerOperand();
258   }
259 
260   return nullptr;
261 }
262 
263 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
264 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
265 /// getelement pointer instructions that traverse the natural type of \p Ptr if
266 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
267 /// through a cast to i8*.
268 ///
269 /// TODO: This could probably live somewhere more prominantly if it doesn't
270 ///       already exist.
271 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
272                                int64_t Offset, IRBuilder<NoFolder> &IRB,
273                                const DataLayout &DL) {
274   assert(Offset >= 0 && "Negative offset not supported yet!");
275   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
276                     << "-bytes as " << *ResTy << "\n");
277 
278   if (Offset) {
279     Type *Ty = PtrElemTy;
280     APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
281     SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
282 
283     SmallVector<Value *, 4> ValIndices;
284     std::string GEPName = Ptr->getName().str();
285     for (const APInt &Index : IntIndices) {
286       ValIndices.push_back(IRB.getInt(Index));
287       GEPName += "." + std::to_string(Index.getZExtValue());
288     }
289 
290     // Create a GEP for the indices collected above.
291     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
292 
293     // If an offset is left we use byte-wise adjustment.
294     if (IntOffset != 0) {
295       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
296       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
297                           GEPName + ".b" + Twine(IntOffset.getZExtValue()));
298     }
299   }
300 
301   // Ensure the result has the requested type.
302   Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy,
303                                                 Ptr->getName() + ".cast");
304 
305   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
306   return Ptr;
307 }
308 
309 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
310                                      SmallSetVector<Value *, 8> &Objects,
311                                      const AbstractAttribute &QueryingAA,
312                                      const Instruction *CtxI,
313                                      bool &UsedAssumedInformation,
314                                      AA::ValueScope S,
315                                      SmallPtrSetImpl<Value *> *SeenObjects) {
316   SmallPtrSet<Value *, 8> LocalSeenObjects;
317   if (!SeenObjects)
318     SeenObjects = &LocalSeenObjects;
319 
320   SmallVector<AA::ValueAndContext> Values;
321   if (!A.getAssumedSimplifiedValues(IRPosition::value(Ptr), &QueryingAA, Values,
322                                     S, UsedAssumedInformation)) {
323     Objects.insert(const_cast<Value *>(&Ptr));
324     return true;
325   }
326 
327   for (auto &VAC : Values) {
328     Value *UO = getUnderlyingObject(VAC.getValue());
329     if (UO && UO != VAC.getValue() && SeenObjects->insert(UO).second) {
330       if (!getAssumedUnderlyingObjects(A, *UO, Objects, QueryingAA,
331                                        VAC.getCtxI(), UsedAssumedInformation, S,
332                                        SeenObjects))
333         return false;
334       continue;
335     }
336     Objects.insert(VAC.getValue());
337   }
338   return true;
339 }
340 
341 static const Value *
342 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA,
343                           const Value *Val, const DataLayout &DL, APInt &Offset,
344                           bool GetMinOffset, bool AllowNonInbounds,
345                           bool UseAssumed = false) {
346 
347   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
348     const IRPosition &Pos = IRPosition::value(V);
349     // Only track dependence if we are going to use the assumed info.
350     const AAValueConstantRange &ValueConstantRangeAA =
351         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
352                                          UseAssumed ? DepClassTy::OPTIONAL
353                                                     : DepClassTy::NONE);
354     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
355                                      : ValueConstantRangeAA.getKnown();
356     if (Range.isFullSet())
357       return false;
358 
359     // We can only use the lower part of the range because the upper part can
360     // be higher than what the value can really be.
361     if (GetMinOffset)
362       ROffset = Range.getSignedMin();
363     else
364       ROffset = Range.getSignedMax();
365     return true;
366   };
367 
368   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
369                                                 /* AllowInvariant */ true,
370                                                 AttributorAnalysis);
371 }
372 
373 static const Value *
374 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA,
375                         const Value *Ptr, int64_t &BytesOffset,
376                         const DataLayout &DL, bool AllowNonInbounds = false) {
377   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
378   const Value *Base =
379       stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
380                                 /* GetMinOffset */ true, AllowNonInbounds);
381 
382   BytesOffset = OffsetAPInt.getSExtValue();
383   return Base;
384 }
385 
386 /// Clamp the information known for all returned values of a function
387 /// (identified by \p QueryingAA) into \p S.
388 template <typename AAType, typename StateType = typename AAType::StateType>
389 static void clampReturnedValueStates(
390     Attributor &A, const AAType &QueryingAA, StateType &S,
391     const IRPosition::CallBaseContext *CBContext = nullptr) {
392   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
393                     << QueryingAA << " into " << S << "\n");
394 
395   assert((QueryingAA.getIRPosition().getPositionKind() ==
396               IRPosition::IRP_RETURNED ||
397           QueryingAA.getIRPosition().getPositionKind() ==
398               IRPosition::IRP_CALL_SITE_RETURNED) &&
399          "Can only clamp returned value states for a function returned or call "
400          "site returned position!");
401 
402   // Use an optional state as there might not be any return values and we want
403   // to join (IntegerState::operator&) the state of all there are.
404   Optional<StateType> T;
405 
406   // Callback for each possibly returned value.
407   auto CheckReturnValue = [&](Value &RV) -> bool {
408     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
409     const AAType &AA =
410         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
411     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
412                       << " @ " << RVPos << "\n");
413     const StateType &AAS = AA.getState();
414     if (!T)
415       T = StateType::getBestState(AAS);
416     *T &= AAS;
417     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
418                       << "\n");
419     return T->isValidState();
420   };
421 
422   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
423     S.indicatePessimisticFixpoint();
424   else if (T)
425     S ^= *T;
426 }
427 
428 namespace {
429 /// Helper class for generic deduction: return value -> returned position.
430 template <typename AAType, typename BaseType,
431           typename StateType = typename BaseType::StateType,
432           bool PropagateCallBaseContext = false>
433 struct AAReturnedFromReturnedValues : public BaseType {
434   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
435       : BaseType(IRP, A) {}
436 
437   /// See AbstractAttribute::updateImpl(...).
438   ChangeStatus updateImpl(Attributor &A) override {
439     StateType S(StateType::getBestState(this->getState()));
440     clampReturnedValueStates<AAType, StateType>(
441         A, *this, S,
442         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
443     // TODO: If we know we visited all returned values, thus no are assumed
444     // dead, we can take the known information from the state T.
445     return clampStateAndIndicateChange<StateType>(this->getState(), S);
446   }
447 };
448 
449 /// Clamp the information known at all call sites for a given argument
450 /// (identified by \p QueryingAA) into \p S.
451 template <typename AAType, typename StateType = typename AAType::StateType>
452 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
453                                         StateType &S) {
454   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
455                     << QueryingAA << " into " << S << "\n");
456 
457   assert(QueryingAA.getIRPosition().getPositionKind() ==
458              IRPosition::IRP_ARGUMENT &&
459          "Can only clamp call site argument states for an argument position!");
460 
461   // Use an optional state as there might not be any return values and we want
462   // to join (IntegerState::operator&) the state of all there are.
463   Optional<StateType> T;
464 
465   // The argument number which is also the call site argument number.
466   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
467 
468   auto CallSiteCheck = [&](AbstractCallSite ACS) {
469     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
470     // Check if a coresponding argument was found or if it is on not associated
471     // (which can happen for callback calls).
472     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
473       return false;
474 
475     const AAType &AA =
476         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
477     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
478                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
479     const StateType &AAS = AA.getState();
480     if (!T)
481       T = StateType::getBestState(AAS);
482     *T &= AAS;
483     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
484                       << "\n");
485     return T->isValidState();
486   };
487 
488   bool UsedAssumedInformation = false;
489   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
490                               UsedAssumedInformation))
491     S.indicatePessimisticFixpoint();
492   else if (T)
493     S ^= *T;
494 }
495 
496 /// This function is the bridge between argument position and the call base
497 /// context.
498 template <typename AAType, typename BaseType,
499           typename StateType = typename AAType::StateType>
500 bool getArgumentStateFromCallBaseContext(Attributor &A,
501                                          BaseType &QueryingAttribute,
502                                          IRPosition &Pos, StateType &State) {
503   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
504          "Expected an 'argument' position !");
505   const CallBase *CBContext = Pos.getCallBaseContext();
506   if (!CBContext)
507     return false;
508 
509   int ArgNo = Pos.getCallSiteArgNo();
510   assert(ArgNo >= 0 && "Invalid Arg No!");
511 
512   const auto &AA = A.getAAFor<AAType>(
513       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
514       DepClassTy::REQUIRED);
515   const StateType &CBArgumentState =
516       static_cast<const StateType &>(AA.getState());
517 
518   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
519                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
520                     << "\n");
521 
522   // NOTE: If we want to do call site grouping it should happen here.
523   State ^= CBArgumentState;
524   return true;
525 }
526 
527 /// Helper class for generic deduction: call site argument -> argument position.
528 template <typename AAType, typename BaseType,
529           typename StateType = typename AAType::StateType,
530           bool BridgeCallBaseContext = false>
531 struct AAArgumentFromCallSiteArguments : public BaseType {
532   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
533       : BaseType(IRP, A) {}
534 
535   /// See AbstractAttribute::updateImpl(...).
536   ChangeStatus updateImpl(Attributor &A) override {
537     StateType S = StateType::getBestState(this->getState());
538 
539     if (BridgeCallBaseContext) {
540       bool Success =
541           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
542               A, *this, this->getIRPosition(), S);
543       if (Success)
544         return clampStateAndIndicateChange<StateType>(this->getState(), S);
545     }
546     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
547 
548     // TODO: If we know we visited all incoming values, thus no are assumed
549     // dead, we can take the known information from the state T.
550     return clampStateAndIndicateChange<StateType>(this->getState(), S);
551   }
552 };
553 
554 /// Helper class for generic replication: function returned -> cs returned.
555 template <typename AAType, typename BaseType,
556           typename StateType = typename BaseType::StateType,
557           bool IntroduceCallBaseContext = false>
558 struct AACallSiteReturnedFromReturned : public BaseType {
559   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
560       : BaseType(IRP, A) {}
561 
562   /// See AbstractAttribute::updateImpl(...).
563   ChangeStatus updateImpl(Attributor &A) override {
564     assert(this->getIRPosition().getPositionKind() ==
565                IRPosition::IRP_CALL_SITE_RETURNED &&
566            "Can only wrap function returned positions for call site returned "
567            "positions!");
568     auto &S = this->getState();
569 
570     const Function *AssociatedFunction =
571         this->getIRPosition().getAssociatedFunction();
572     if (!AssociatedFunction)
573       return S.indicatePessimisticFixpoint();
574 
575     CallBase &CBContext = cast<CallBase>(this->getAnchorValue());
576     if (IntroduceCallBaseContext)
577       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
578                         << CBContext << "\n");
579 
580     IRPosition FnPos = IRPosition::returned(
581         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
582     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
583     return clampStateAndIndicateChange(S, AA.getState());
584   }
585 };
586 
587 /// Helper function to accumulate uses.
588 template <class AAType, typename StateType = typename AAType::StateType>
589 static void followUsesInContext(AAType &AA, Attributor &A,
590                                 MustBeExecutedContextExplorer &Explorer,
591                                 const Instruction *CtxI,
592                                 SetVector<const Use *> &Uses,
593                                 StateType &State) {
594   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
595   for (unsigned u = 0; u < Uses.size(); ++u) {
596     const Use *U = Uses[u];
597     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
598       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
599       if (Found && AA.followUseInMBEC(A, U, UserI, State))
600         for (const Use &Us : UserI->uses())
601           Uses.insert(&Us);
602     }
603   }
604 }
605 
606 /// Use the must-be-executed-context around \p I to add information into \p S.
607 /// The AAType class is required to have `followUseInMBEC` method with the
608 /// following signature and behaviour:
609 ///
610 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
611 /// U - Underlying use.
612 /// I - The user of the \p U.
613 /// Returns true if the value should be tracked transitively.
614 ///
615 template <class AAType, typename StateType = typename AAType::StateType>
616 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
617                              Instruction &CtxI) {
618 
619   // Container for (transitive) uses of the associated value.
620   SetVector<const Use *> Uses;
621   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
622     Uses.insert(&U);
623 
624   MustBeExecutedContextExplorer &Explorer =
625       A.getInfoCache().getMustBeExecutedContextExplorer();
626 
627   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
628 
629   if (S.isAtFixpoint())
630     return;
631 
632   SmallVector<const BranchInst *, 4> BrInsts;
633   auto Pred = [&](const Instruction *I) {
634     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
635       if (Br->isConditional())
636         BrInsts.push_back(Br);
637     return true;
638   };
639 
640   // Here, accumulate conditional branch instructions in the context. We
641   // explore the child paths and collect the known states. The disjunction of
642   // those states can be merged to its own state. Let ParentState_i be a state
643   // to indicate the known information for an i-th branch instruction in the
644   // context. ChildStates are created for its successors respectively.
645   //
646   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
647   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
648   //      ...
649   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
650   //
651   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
652   //
653   // FIXME: Currently, recursive branches are not handled. For example, we
654   // can't deduce that ptr must be dereferenced in below function.
655   //
656   // void f(int a, int c, int *ptr) {
657   //    if(a)
658   //      if (b) {
659   //        *ptr = 0;
660   //      } else {
661   //        *ptr = 1;
662   //      }
663   //    else {
664   //      if (b) {
665   //        *ptr = 0;
666   //      } else {
667   //        *ptr = 1;
668   //      }
669   //    }
670   // }
671 
672   Explorer.checkForAllContext(&CtxI, Pred);
673   for (const BranchInst *Br : BrInsts) {
674     StateType ParentState;
675 
676     // The known state of the parent state is a conjunction of children's
677     // known states so it is initialized with a best state.
678     ParentState.indicateOptimisticFixpoint();
679 
680     for (const BasicBlock *BB : Br->successors()) {
681       StateType ChildState;
682 
683       size_t BeforeSize = Uses.size();
684       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
685 
686       // Erase uses which only appear in the child.
687       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
688         It = Uses.erase(It);
689 
690       ParentState &= ChildState;
691     }
692 
693     // Use only known state.
694     S += ParentState;
695   }
696 }
697 } // namespace
698 
699 /// ------------------------ PointerInfo ---------------------------------------
700 
701 namespace llvm {
702 namespace AA {
703 namespace PointerInfo {
704 
705 struct State;
706 
707 } // namespace PointerInfo
708 } // namespace AA
709 
710 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
711 template <>
712 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
713   using Access = AAPointerInfo::Access;
714   static inline Access getEmptyKey();
715   static inline Access getTombstoneKey();
716   static unsigned getHashValue(const Access &A);
717   static bool isEqual(const Access &LHS, const Access &RHS);
718 };
719 
720 /// Helper that allows OffsetAndSize as a key in a DenseMap.
721 template <>
722 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize>
723     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
724 
725 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
726 /// but the instruction
727 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
728   using Base = DenseMapInfo<Instruction *>;
729   using Access = AAPointerInfo::Access;
730   static inline Access getEmptyKey();
731   static inline Access getTombstoneKey();
732   static unsigned getHashValue(const Access &A);
733   static bool isEqual(const Access &LHS, const Access &RHS);
734 };
735 
736 } // namespace llvm
737 
738 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
739 struct AA::PointerInfo::State : public AbstractState {
740 
741   ~State() {
742     // We do not delete the Accesses objects but need to destroy them still.
743     for (auto &It : AccessBins)
744       It.second->~Accesses();
745   }
746 
747   /// Return the best possible representable state.
748   static State getBestState(const State &SIS) { return State(); }
749 
750   /// Return the worst possible representable state.
751   static State getWorstState(const State &SIS) {
752     State R;
753     R.indicatePessimisticFixpoint();
754     return R;
755   }
756 
757   State() = default;
758   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {
759     SIS.AccessBins.clear();
760   }
761 
762   const State &getAssumed() const { return *this; }
763 
764   /// See AbstractState::isValidState().
765   bool isValidState() const override { return BS.isValidState(); }
766 
767   /// See AbstractState::isAtFixpoint().
768   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
769 
770   /// See AbstractState::indicateOptimisticFixpoint().
771   ChangeStatus indicateOptimisticFixpoint() override {
772     BS.indicateOptimisticFixpoint();
773     return ChangeStatus::UNCHANGED;
774   }
775 
776   /// See AbstractState::indicatePessimisticFixpoint().
777   ChangeStatus indicatePessimisticFixpoint() override {
778     BS.indicatePessimisticFixpoint();
779     return ChangeStatus::CHANGED;
780   }
781 
782   State &operator=(const State &R) {
783     if (this == &R)
784       return *this;
785     BS = R.BS;
786     AccessBins = R.AccessBins;
787     return *this;
788   }
789 
790   State &operator=(State &&R) {
791     if (this == &R)
792       return *this;
793     std::swap(BS, R.BS);
794     std::swap(AccessBins, R.AccessBins);
795     return *this;
796   }
797 
798   bool operator==(const State &R) const {
799     if (BS != R.BS)
800       return false;
801     if (AccessBins.size() != R.AccessBins.size())
802       return false;
803     auto It = begin(), RIt = R.begin(), E = end();
804     while (It != E) {
805       if (It->getFirst() != RIt->getFirst())
806         return false;
807       auto &Accs = It->getSecond();
808       auto &RAccs = RIt->getSecond();
809       if (Accs->size() != RAccs->size())
810         return false;
811       for (const auto &ZipIt : llvm::zip(*Accs, *RAccs))
812         if (std::get<0>(ZipIt) != std::get<1>(ZipIt))
813           return false;
814       ++It;
815       ++RIt;
816     }
817     return true;
818   }
819   bool operator!=(const State &R) const { return !(*this == R); }
820 
821   /// We store accesses in a set with the instruction as key.
822   struct Accesses {
823     SmallVector<AAPointerInfo::Access, 4> Accesses;
824     DenseMap<const Instruction *, unsigned> Map;
825 
826     unsigned size() const { return Accesses.size(); }
827 
828     using vec_iterator = decltype(Accesses)::iterator;
829     vec_iterator begin() { return Accesses.begin(); }
830     vec_iterator end() { return Accesses.end(); }
831 
832     using iterator = decltype(Map)::const_iterator;
833     iterator find(AAPointerInfo::Access &Acc) {
834       return Map.find(Acc.getRemoteInst());
835     }
836     iterator find_end() { return Map.end(); }
837 
838     AAPointerInfo::Access &get(iterator &It) {
839       return Accesses[It->getSecond()];
840     }
841 
842     void insert(AAPointerInfo::Access &Acc) {
843       Map[Acc.getRemoteInst()] = Accesses.size();
844       Accesses.push_back(Acc);
845     }
846   };
847 
848   /// We store all accesses in bins denoted by their offset and size.
849   using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>;
850 
851   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
852   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
853 
854 protected:
855   /// The bins with all the accesses for the associated pointer.
856   AccessBinsTy AccessBins;
857 
858   /// Add a new access to the state at offset \p Offset and with size \p Size.
859   /// The access is associated with \p I, writes \p Content (if anything), and
860   /// is of kind \p Kind.
861   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
862   ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size,
863                          Instruction &I, Optional<Value *> Content,
864                          AAPointerInfo::AccessKind Kind, Type *Ty,
865                          Instruction *RemoteI = nullptr,
866                          Accesses *BinPtr = nullptr) {
867     AAPointerInfo::OffsetAndSize Key{Offset, Size};
868     Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key];
869     if (!Bin)
870       Bin = new (A.Allocator) Accesses;
871     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
872     // Check if we have an access for this instruction in this bin, if not,
873     // simply add it.
874     auto It = Bin->find(Acc);
875     if (It == Bin->find_end()) {
876       Bin->insert(Acc);
877       return ChangeStatus::CHANGED;
878     }
879     // If the existing access is the same as then new one, nothing changed.
880     AAPointerInfo::Access &Current = Bin->get(It);
881     AAPointerInfo::Access Before = Current;
882     // The new one will be combined with the existing one.
883     Current &= Acc;
884     return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
885   }
886 
887   /// See AAPointerInfo::forallInterferingAccesses.
888   bool forallInterferingAccesses(
889       AAPointerInfo::OffsetAndSize OAS,
890       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
891     if (!isValidState())
892       return false;
893 
894     for (auto &It : AccessBins) {
895       AAPointerInfo::OffsetAndSize ItOAS = It.getFirst();
896       if (!OAS.mayOverlap(ItOAS))
897         continue;
898       bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
899       for (auto &Access : *It.getSecond())
900         if (!CB(Access, IsExact))
901           return false;
902     }
903     return true;
904   }
905 
906   /// See AAPointerInfo::forallInterferingAccesses.
907   bool forallInterferingAccesses(
908       Instruction &I,
909       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
910     if (!isValidState())
911       return false;
912 
913     // First find the offset and size of I.
914     AAPointerInfo::OffsetAndSize OAS(-1, -1);
915     for (auto &It : AccessBins) {
916       for (auto &Access : *It.getSecond()) {
917         if (Access.getRemoteInst() == &I) {
918           OAS = It.getFirst();
919           break;
920         }
921       }
922       if (OAS.getSize() != -1)
923         break;
924     }
925     // No access for I was found, we are done.
926     if (OAS.getSize() == -1)
927       return true;
928 
929     // Now that we have an offset and size, find all overlapping ones and use
930     // the callback on the accesses.
931     return forallInterferingAccesses(OAS, CB);
932   }
933 
934 private:
935   /// State to track fixpoint and validity.
936   BooleanState BS;
937 };
938 
939 namespace {
940 struct AAPointerInfoImpl
941     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
942   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
943   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
944 
945   /// See AbstractAttribute::initialize(...).
946   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
947 
948   /// See AbstractAttribute::getAsStr().
949   const std::string getAsStr() const override {
950     return std::string("PointerInfo ") +
951            (isValidState() ? (std::string("#") +
952                               std::to_string(AccessBins.size()) + " bins")
953                            : "<invalid>");
954   }
955 
956   /// See AbstractAttribute::manifest(...).
957   ChangeStatus manifest(Attributor &A) override {
958     return AAPointerInfo::manifest(A);
959   }
960 
961   bool forallInterferingAccesses(
962       OffsetAndSize OAS,
963       function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
964       const override {
965     return State::forallInterferingAccesses(OAS, CB);
966   }
967   bool forallInterferingAccesses(
968       Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
969       function_ref<bool(const Access &, bool)> UserCB) const override {
970     SmallPtrSet<const Access *, 8> DominatingWrites;
971     SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
972 
973     Function &Scope = *I.getFunction();
974     const auto &NoSyncAA = A.getAAFor<AANoSync>(
975         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
976     const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
977         IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL);
978     const bool NoSync = NoSyncAA.isAssumedNoSync();
979 
980     // Helper to determine if we need to consider threading, which we cannot
981     // right now. However, if the function is (assumed) nosync or the thread
982     // executing all instructions is the main thread only we can ignore
983     // threading.
984     auto CanIgnoreThreading = [&](const Instruction &I) -> bool {
985       if (NoSync)
986         return true;
987       if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I))
988         return true;
989       return false;
990     };
991 
992     // Helper to determine if the access is executed by the same thread as the
993     // load, for now it is sufficient to avoid any potential threading effects
994     // as we cannot deal with them anyway.
995     auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool {
996       return CanIgnoreThreading(*Acc.getLocalInst());
997     };
998 
999     // TODO: Use inter-procedural reachability and dominance.
1000     const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1001         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1002 
1003     const bool FindInterferingWrites = I.mayReadFromMemory();
1004     const bool FindInterferingReads = I.mayWriteToMemory();
1005     const bool UseDominanceReasoning = FindInterferingWrites;
1006     const bool CanUseCFGResoning = CanIgnoreThreading(I);
1007     InformationCache &InfoCache = A.getInfoCache();
1008     const DominatorTree *DT =
1009         NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning
1010             ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
1011                   Scope)
1012             : nullptr;
1013 
1014     enum GPUAddressSpace : unsigned {
1015       Generic = 0,
1016       Global = 1,
1017       Shared = 3,
1018       Constant = 4,
1019       Local = 5,
1020     };
1021 
1022     // Helper to check if a value has "kernel lifetime", that is it will not
1023     // outlive a GPU kernel. This is true for shared, constant, and local
1024     // globals on AMD and NVIDIA GPUs.
1025     auto HasKernelLifetime = [&](Value *V, Module &M) {
1026       Triple T(M.getTargetTriple());
1027       if (!(T.isAMDGPU() || T.isNVPTX()))
1028         return false;
1029       switch (V->getType()->getPointerAddressSpace()) {
1030       case GPUAddressSpace::Shared:
1031       case GPUAddressSpace::Constant:
1032       case GPUAddressSpace::Local:
1033         return true;
1034       default:
1035         return false;
1036       };
1037     };
1038 
1039     // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1040     // to determine if we should look at reachability from the callee. For
1041     // certain pointers we know the lifetime and we do not have to step into the
1042     // callee to determine reachability as the pointer would be dead in the
1043     // callee. See the conditional initialization below.
1044     std::function<bool(const Function &)> IsLiveInCalleeCB;
1045 
1046     if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1047       // If the alloca containing function is not recursive the alloca
1048       // must be dead in the callee.
1049       const Function *AIFn = AI->getFunction();
1050       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1051           *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL);
1052       if (NoRecurseAA.isAssumedNoRecurse()) {
1053         IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1054       }
1055     } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1056       // If the global has kernel lifetime we can stop if we reach a kernel
1057       // as it is "dead" in the (unknown) callees.
1058       if (HasKernelLifetime(GV, *GV->getParent()))
1059         IsLiveInCalleeCB = [](const Function &Fn) {
1060           return !Fn.hasFnAttribute("kernel");
1061         };
1062     }
1063 
1064     auto AccessCB = [&](const Access &Acc, bool Exact) {
1065       if ((!FindInterferingWrites || !Acc.isWrite()) &&
1066           (!FindInterferingReads || !Acc.isRead()))
1067         return true;
1068 
1069       // For now we only filter accesses based on CFG reasoning which does not
1070       // work yet if we have threading effects, or the access is complicated.
1071       if (CanUseCFGResoning) {
1072         if ((!Acc.isWrite() ||
1073              !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA,
1074                                          IsLiveInCalleeCB)) &&
1075             (!Acc.isRead() ||
1076              !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA,
1077                                          IsLiveInCalleeCB)))
1078           return true;
1079         if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) &&
1080             IsSameThreadAsLoad(Acc)) {
1081           if (DT->dominates(Acc.getLocalInst(), &I))
1082             DominatingWrites.insert(&Acc);
1083         }
1084       }
1085 
1086       InterferingAccesses.push_back({&Acc, Exact});
1087       return true;
1088     };
1089     if (!State::forallInterferingAccesses(I, AccessCB))
1090       return false;
1091 
1092     // If we cannot use CFG reasoning we only filter the non-write accesses
1093     // and are done here.
1094     if (!CanUseCFGResoning) {
1095       for (auto &It : InterferingAccesses)
1096         if (!UserCB(*It.first, It.second))
1097           return false;
1098       return true;
1099     }
1100 
1101     // Helper to determine if we can skip a specific write access. This is in
1102     // the worst case quadratic as we are looking for another write that will
1103     // hide the effect of this one.
1104     auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1105       if (!IsSameThreadAsLoad(Acc))
1106         return false;
1107       if (!DominatingWrites.count(&Acc))
1108         return false;
1109       for (const Access *DomAcc : DominatingWrites) {
1110         assert(Acc.getLocalInst()->getFunction() ==
1111                    DomAcc->getLocalInst()->getFunction() &&
1112                "Expected dominating writes to be in the same function!");
1113 
1114         if (DomAcc != &Acc &&
1115             DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) {
1116           return true;
1117         }
1118       }
1119       return false;
1120     };
1121 
1122     // Run the user callback on all accesses we cannot skip and return if that
1123     // succeeded for all or not.
1124     unsigned NumInterferingAccesses = InterferingAccesses.size();
1125     for (auto &It : InterferingAccesses) {
1126       if (!DT || NumInterferingAccesses > MaxInterferingAccesses ||
1127           !CanSkipAccess(*It.first, It.second)) {
1128         if (!UserCB(*It.first, It.second))
1129           return false;
1130       }
1131     }
1132     return true;
1133   }
1134 
1135   ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA,
1136                                     int64_t Offset, CallBase &CB,
1137                                     bool FromCallee = false) {
1138     using namespace AA::PointerInfo;
1139     if (!OtherAA.getState().isValidState() || !isValidState())
1140       return indicatePessimisticFixpoint();
1141 
1142     const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA);
1143     bool IsByval =
1144         FromCallee && OtherAAImpl.getAssociatedArgument()->hasByValAttr();
1145 
1146     // Combine the accesses bin by bin.
1147     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1148     for (auto &It : OtherAAImpl.getState()) {
1149       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1150       if (Offset != OffsetAndSize::Unknown)
1151         OAS = OffsetAndSize(It.first.getOffset() + Offset, It.first.getSize());
1152       Accesses *Bin = AccessBins.lookup(OAS);
1153       for (const AAPointerInfo::Access &RAcc : *It.second) {
1154         if (IsByval && !RAcc.isRead())
1155           continue;
1156         bool UsedAssumedInformation = false;
1157         AccessKind AK = RAcc.getKind();
1158         Optional<Value *> Content = RAcc.getContent();
1159         if (FromCallee) {
1160           Content = A.translateArgumentToCallSiteContent(
1161               RAcc.getContent(), CB, *this, UsedAssumedInformation);
1162           AK = AccessKind(
1163               AK & (IsByval ? AccessKind::AK_READ : AccessKind::AK_READ_WRITE));
1164         }
1165         Changed =
1166             Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content,
1167                                 AK, RAcc.getType(), RAcc.getRemoteInst(), Bin);
1168       }
1169     }
1170     return Changed;
1171   }
1172 
1173   /// Statistic tracking for all AAPointerInfo implementations.
1174   /// See AbstractAttribute::trackStatistics().
1175   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1176 
1177   /// Dump the state into \p O.
1178   void dumpState(raw_ostream &O) {
1179     for (auto &It : AccessBins) {
1180       O << "[" << It.first.getOffset() << "-"
1181         << It.first.getOffset() + It.first.getSize()
1182         << "] : " << It.getSecond()->size() << "\n";
1183       for (auto &Acc : *It.getSecond()) {
1184         O << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst() << "\n";
1185         if (Acc.getLocalInst() != Acc.getRemoteInst())
1186           O << "     -->                         " << *Acc.getRemoteInst()
1187             << "\n";
1188         if (!Acc.isWrittenValueYetUndetermined()) {
1189           if (Acc.getWrittenValue())
1190             O << "       - c: " << *Acc.getWrittenValue() << "\n";
1191           else
1192             O << "       - c: <unknown>\n";
1193         }
1194       }
1195     }
1196   }
1197 };
1198 
1199 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1200   using AccessKind = AAPointerInfo::AccessKind;
1201   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1202       : AAPointerInfoImpl(IRP, A) {}
1203 
1204   /// See AbstractAttribute::initialize(...).
1205   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1206 
1207   /// Deal with an access and signal if it was handled successfully.
1208   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1209                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1210                     ChangeStatus &Changed, Type *Ty,
1211                     int64_t Size = OffsetAndSize::Unknown) {
1212     using namespace AA::PointerInfo;
1213     // No need to find a size if one is given or the offset is unknown.
1214     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1215         Ty) {
1216       const DataLayout &DL = A.getDataLayout();
1217       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1218       if (!AccessSize.isScalable())
1219         Size = AccessSize.getFixedSize();
1220     }
1221     Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty);
1222     return true;
1223   };
1224 
1225   /// Helper struct, will support ranges eventually.
1226   struct OffsetInfo {
1227     int64_t Offset = OffsetAndSize::Unknown;
1228 
1229     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1230   };
1231 
1232   /// See AbstractAttribute::updateImpl(...).
1233   ChangeStatus updateImpl(Attributor &A) override {
1234     using namespace AA::PointerInfo;
1235     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1236     Value &AssociatedValue = getAssociatedValue();
1237 
1238     const DataLayout &DL = A.getDataLayout();
1239     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1240     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1241 
1242     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI,
1243                                      bool &Follow) {
1244       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1245       UsrOI = PtrOI;
1246       Follow = true;
1247       return true;
1248     };
1249 
1250     const auto *TLI = getAnchorScope()
1251                           ? A.getInfoCache().getTargetLibraryInfoForFunction(
1252                                 *getAnchorScope())
1253                           : nullptr;
1254     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1255       Value *CurPtr = U.get();
1256       User *Usr = U.getUser();
1257       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1258                         << *Usr << "\n");
1259       assert(OffsetInfoMap.count(CurPtr) &&
1260              "The current pointer offset should have been seeded!");
1261 
1262       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1263         if (CE->isCast())
1264           return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1265         if (CE->isCompare())
1266           return true;
1267         if (!isa<GEPOperator>(CE)) {
1268           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1269                             << "\n");
1270           return false;
1271         }
1272       }
1273       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1274         // Note the order here, the Usr access might change the map, CurPtr is
1275         // already in it though.
1276         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1277         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1278         UsrOI = PtrOI;
1279 
1280         // TODO: Use range information.
1281         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1282             !GEP->hasAllConstantIndices()) {
1283           UsrOI.Offset = OffsetAndSize::Unknown;
1284           Follow = true;
1285           return true;
1286         }
1287 
1288         SmallVector<Value *, 8> Indices;
1289         for (Use &Idx : GEP->indices()) {
1290           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1291             Indices.push_back(CIdx);
1292             continue;
1293           }
1294 
1295           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1296                             << " : " << *Idx << "\n");
1297           return false;
1298         }
1299         UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType(
1300                                           GEP->getSourceElementType(), Indices);
1301         Follow = true;
1302         return true;
1303       }
1304       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr) || isa<ReturnInst>(Usr))
1305         return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1306 
1307       // For PHIs we need to take care of the recurrence explicitly as the value
1308       // might change while we iterate through a loop. For now, we give up if
1309       // the PHI is not invariant.
1310       if (isa<PHINode>(Usr)) {
1311         // Note the order here, the Usr access might change the map, CurPtr is
1312         // already in it though.
1313         bool IsFirstPHIUser = !OffsetInfoMap.count(Usr);
1314         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1315         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1316         // Check if the PHI is invariant (so far).
1317         if (UsrOI == PtrOI)
1318           return true;
1319 
1320         // Check if the PHI operand has already an unknown offset as we can't
1321         // improve on that anymore.
1322         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1323           UsrOI = PtrOI;
1324           Follow = true;
1325           return true;
1326         }
1327 
1328         // Check if the PHI operand is not dependent on the PHI itself.
1329         APInt Offset(
1330             DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1331             0);
1332         Value *CurPtrBase = CurPtr->stripAndAccumulateConstantOffsets(
1333             DL, Offset, /* AllowNonInbounds */ true);
1334         auto It = OffsetInfoMap.find(CurPtrBase);
1335         if (It != OffsetInfoMap.end()) {
1336           Offset += It->getSecond().Offset;
1337           if (IsFirstPHIUser || Offset == UsrOI.Offset)
1338             return HandlePassthroughUser(Usr, PtrOI, Follow);
1339           LLVM_DEBUG(dbgs()
1340                      << "[AAPointerInfo] PHI operand pointer offset mismatch "
1341                      << *CurPtr << " in " << *Usr << "\n");
1342         } else {
1343           LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1344                             << *CurPtr << " in " << *Usr << "\n");
1345         }
1346 
1347         // TODO: Approximate in case we know the direction of the recurrence.
1348         UsrOI = PtrOI;
1349         UsrOI.Offset = OffsetAndSize::Unknown;
1350         Follow = true;
1351         return true;
1352       }
1353 
1354       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1355         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1356                             AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset,
1357                             Changed, LoadI->getType());
1358       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1359         if (StoreI->getValueOperand() == CurPtr) {
1360           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1361                             << *StoreI << "\n");
1362           return false;
1363         }
1364         bool UsedAssumedInformation = false;
1365         Optional<Value *> Content =
1366             A.getAssumedSimplified(*StoreI->getValueOperand(), *this,
1367                                    UsedAssumedInformation, AA::Interprocedural);
1368         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1369                             OffsetInfoMap[CurPtr].Offset, Changed,
1370                             StoreI->getValueOperand()->getType());
1371       }
1372       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1373         if (CB->isLifetimeStartOrEnd())
1374           return true;
1375         if (getFreedOperand(CB, TLI) == U)
1376           return true;
1377         if (CB->isArgOperand(&U)) {
1378           unsigned ArgNo = CB->getArgOperandNo(&U);
1379           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1380               *this, IRPosition::callsite_argument(*CB, ArgNo),
1381               DepClassTy::REQUIRED);
1382           Changed = translateAndAddState(A, CSArgPI,
1383                                          OffsetInfoMap[CurPtr].Offset, *CB) |
1384                     Changed;
1385           return isValidState();
1386         }
1387         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1388                           << "\n");
1389         // TODO: Allow some call uses
1390         return false;
1391       }
1392 
1393       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1394       return false;
1395     };
1396     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1397       if (OffsetInfoMap.count(NewU)) {
1398         LLVM_DEBUG({
1399           if (!(OffsetInfoMap[NewU] == OffsetInfoMap[OldU])) {
1400             dbgs() << "[AAPointerInfo] Equivalent use callback failed: "
1401                    << OffsetInfoMap[NewU].Offset << " vs "
1402                    << OffsetInfoMap[OldU].Offset << "\n";
1403           }
1404         });
1405         return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1406       }
1407       OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1408       return true;
1409     };
1410     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1411                            /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1412                            /* IgnoreDroppableUses */ true, EquivalentUseCB)) {
1413       LLVM_DEBUG(
1414           dbgs() << "[AAPointerInfo] Check for all uses failed, abort!\n");
1415       return indicatePessimisticFixpoint();
1416     }
1417 
1418     LLVM_DEBUG({
1419       dbgs() << "Accesses by bin after update:\n";
1420       dumpState(dbgs());
1421     });
1422 
1423     return Changed;
1424   }
1425 
1426   /// See AbstractAttribute::trackStatistics()
1427   void trackStatistics() const override {
1428     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1429   }
1430 };
1431 
1432 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1433   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1434       : AAPointerInfoImpl(IRP, A) {}
1435 
1436   /// See AbstractAttribute::updateImpl(...).
1437   ChangeStatus updateImpl(Attributor &A) override {
1438     return indicatePessimisticFixpoint();
1439   }
1440 
1441   /// See AbstractAttribute::trackStatistics()
1442   void trackStatistics() const override {
1443     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1444   }
1445 };
1446 
1447 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1448   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1449       : AAPointerInfoFloating(IRP, A) {}
1450 
1451   /// See AbstractAttribute::initialize(...).
1452   void initialize(Attributor &A) override {
1453     AAPointerInfoFloating::initialize(A);
1454     if (getAnchorScope()->isDeclaration())
1455       indicatePessimisticFixpoint();
1456   }
1457 
1458   /// See AbstractAttribute::trackStatistics()
1459   void trackStatistics() const override {
1460     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1461   }
1462 };
1463 
1464 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1465   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1466       : AAPointerInfoFloating(IRP, A) {}
1467 
1468   /// See AbstractAttribute::updateImpl(...).
1469   ChangeStatus updateImpl(Attributor &A) override {
1470     using namespace AA::PointerInfo;
1471     // We handle memory intrinsics explicitly, at least the first (=
1472     // destination) and second (=source) arguments as we know how they are
1473     // accessed.
1474     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1475       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1476       int64_t LengthVal = OffsetAndSize::Unknown;
1477       if (Length)
1478         LengthVal = Length->getSExtValue();
1479       Value &Ptr = getAssociatedValue();
1480       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1481       ChangeStatus Changed = ChangeStatus::UNCHANGED;
1482       if (ArgNo == 0) {
1483         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1484                      nullptr, LengthVal);
1485       } else if (ArgNo == 1) {
1486         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1487                      nullptr, LengthVal);
1488       } else {
1489         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1490                           << *MI << "\n");
1491         return indicatePessimisticFixpoint();
1492       }
1493 
1494       LLVM_DEBUG({
1495         dbgs() << "Accesses by bin after update:\n";
1496         dumpState(dbgs());
1497       });
1498 
1499       return Changed;
1500     }
1501 
1502     // TODO: Once we have call site specific value information we can provide
1503     //       call site specific liveness information and then it makes
1504     //       sense to specialize attributes for call sites arguments instead of
1505     //       redirecting requests to the callee argument.
1506     Argument *Arg = getAssociatedArgument();
1507     if (!Arg)
1508       return indicatePessimisticFixpoint();
1509     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1510     auto &ArgAA =
1511         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1512     return translateAndAddState(A, ArgAA, 0, *cast<CallBase>(getCtxI()),
1513                                 /* FromCallee */ true);
1514   }
1515 
1516   /// See AbstractAttribute::trackStatistics()
1517   void trackStatistics() const override {
1518     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1519   }
1520 };
1521 
1522 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1523   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1524       : AAPointerInfoFloating(IRP, A) {}
1525 
1526   /// See AbstractAttribute::trackStatistics()
1527   void trackStatistics() const override {
1528     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1529   }
1530 };
1531 } // namespace
1532 
1533 /// -----------------------NoUnwind Function Attribute--------------------------
1534 
1535 namespace {
1536 struct AANoUnwindImpl : AANoUnwind {
1537   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1538 
1539   const std::string getAsStr() const override {
1540     return getAssumed() ? "nounwind" : "may-unwind";
1541   }
1542 
1543   /// See AbstractAttribute::updateImpl(...).
1544   ChangeStatus updateImpl(Attributor &A) override {
1545     auto Opcodes = {
1546         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1547         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1548         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1549 
1550     auto CheckForNoUnwind = [&](Instruction &I) {
1551       if (!I.mayThrow())
1552         return true;
1553 
1554       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1555         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1556             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1557         return NoUnwindAA.isAssumedNoUnwind();
1558       }
1559       return false;
1560     };
1561 
1562     bool UsedAssumedInformation = false;
1563     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1564                                    UsedAssumedInformation))
1565       return indicatePessimisticFixpoint();
1566 
1567     return ChangeStatus::UNCHANGED;
1568   }
1569 };
1570 
1571 struct AANoUnwindFunction final : public AANoUnwindImpl {
1572   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1573       : AANoUnwindImpl(IRP, A) {}
1574 
1575   /// See AbstractAttribute::trackStatistics()
1576   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1577 };
1578 
1579 /// NoUnwind attribute deduction for a call sites.
1580 struct AANoUnwindCallSite final : AANoUnwindImpl {
1581   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1582       : AANoUnwindImpl(IRP, A) {}
1583 
1584   /// See AbstractAttribute::initialize(...).
1585   void initialize(Attributor &A) override {
1586     AANoUnwindImpl::initialize(A);
1587     Function *F = getAssociatedFunction();
1588     if (!F || F->isDeclaration())
1589       indicatePessimisticFixpoint();
1590   }
1591 
1592   /// See AbstractAttribute::updateImpl(...).
1593   ChangeStatus updateImpl(Attributor &A) override {
1594     // TODO: Once we have call site specific value information we can provide
1595     //       call site specific liveness information and then it makes
1596     //       sense to specialize attributes for call sites arguments instead of
1597     //       redirecting requests to the callee argument.
1598     Function *F = getAssociatedFunction();
1599     const IRPosition &FnPos = IRPosition::function(*F);
1600     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1601     return clampStateAndIndicateChange(getState(), FnAA.getState());
1602   }
1603 
1604   /// See AbstractAttribute::trackStatistics()
1605   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1606 };
1607 } // namespace
1608 
1609 /// --------------------- Function Return Values -------------------------------
1610 
1611 namespace {
1612 /// "Attribute" that collects all potential returned values and the return
1613 /// instructions that they arise from.
1614 ///
1615 /// If there is a unique returned value R, the manifest method will:
1616 ///   - mark R with the "returned" attribute, if R is an argument.
1617 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1618 
1619   /// Mapping of values potentially returned by the associated function to the
1620   /// return instructions that might return them.
1621   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1622 
1623   /// State flags
1624   ///
1625   ///{
1626   bool IsFixed = false;
1627   bool IsValidState = true;
1628   ///}
1629 
1630 public:
1631   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1632       : AAReturnedValues(IRP, A) {}
1633 
1634   /// See AbstractAttribute::initialize(...).
1635   void initialize(Attributor &A) override {
1636     // Reset the state.
1637     IsFixed = false;
1638     IsValidState = true;
1639     ReturnedValues.clear();
1640 
1641     Function *F = getAssociatedFunction();
1642     if (!F || F->isDeclaration()) {
1643       indicatePessimisticFixpoint();
1644       return;
1645     }
1646     assert(!F->getReturnType()->isVoidTy() &&
1647            "Did not expect a void return type!");
1648 
1649     // The map from instruction opcodes to those instructions in the function.
1650     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1651 
1652     // Look through all arguments, if one is marked as returned we are done.
1653     for (Argument &Arg : F->args()) {
1654       if (Arg.hasReturnedAttr()) {
1655         auto &ReturnInstSet = ReturnedValues[&Arg];
1656         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1657           for (Instruction *RI : *Insts)
1658             ReturnInstSet.insert(cast<ReturnInst>(RI));
1659 
1660         indicateOptimisticFixpoint();
1661         return;
1662       }
1663     }
1664 
1665     if (!A.isFunctionIPOAmendable(*F))
1666       indicatePessimisticFixpoint();
1667   }
1668 
1669   /// See AbstractAttribute::manifest(...).
1670   ChangeStatus manifest(Attributor &A) override;
1671 
1672   /// See AbstractAttribute::getState(...).
1673   AbstractState &getState() override { return *this; }
1674 
1675   /// See AbstractAttribute::getState(...).
1676   const AbstractState &getState() const override { return *this; }
1677 
1678   /// See AbstractAttribute::updateImpl(Attributor &A).
1679   ChangeStatus updateImpl(Attributor &A) override;
1680 
1681   llvm::iterator_range<iterator> returned_values() override {
1682     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1683   }
1684 
1685   llvm::iterator_range<const_iterator> returned_values() const override {
1686     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1687   }
1688 
1689   /// Return the number of potential return values, -1 if unknown.
1690   size_t getNumReturnValues() const override {
1691     return isValidState() ? ReturnedValues.size() : -1;
1692   }
1693 
1694   /// Return an assumed unique return value if a single candidate is found. If
1695   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1696   /// Optional::NoneType.
1697   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1698 
1699   /// See AbstractState::checkForAllReturnedValues(...).
1700   bool checkForAllReturnedValuesAndReturnInsts(
1701       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1702       const override;
1703 
1704   /// Pretty print the attribute similar to the IR representation.
1705   const std::string getAsStr() const override;
1706 
1707   /// See AbstractState::isAtFixpoint().
1708   bool isAtFixpoint() const override { return IsFixed; }
1709 
1710   /// See AbstractState::isValidState().
1711   bool isValidState() const override { return IsValidState; }
1712 
1713   /// See AbstractState::indicateOptimisticFixpoint(...).
1714   ChangeStatus indicateOptimisticFixpoint() override {
1715     IsFixed = true;
1716     return ChangeStatus::UNCHANGED;
1717   }
1718 
1719   ChangeStatus indicatePessimisticFixpoint() override {
1720     IsFixed = true;
1721     IsValidState = false;
1722     return ChangeStatus::CHANGED;
1723   }
1724 };
1725 
1726 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1727   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1728 
1729   // Bookkeeping.
1730   assert(isValidState());
1731   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1732                   "Number of function with known return values");
1733 
1734   // Check if we have an assumed unique return value that we could manifest.
1735   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1736 
1737   if (!UniqueRV || !UniqueRV.value())
1738     return Changed;
1739 
1740   // Bookkeeping.
1741   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1742                   "Number of function with unique return");
1743   // If the assumed unique return value is an argument, annotate it.
1744   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.value())) {
1745     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1746             getAssociatedFunction()->getReturnType())) {
1747       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1748       Changed = IRAttribute::manifest(A);
1749     }
1750   }
1751   return Changed;
1752 }
1753 
1754 const std::string AAReturnedValuesImpl::getAsStr() const {
1755   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1756          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1757 }
1758 
1759 Optional<Value *>
1760 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1761   // If checkForAllReturnedValues provides a unique value, ignoring potential
1762   // undef values that can also be present, it is assumed to be the actual
1763   // return value and forwarded to the caller of this method. If there are
1764   // multiple, a nullptr is returned indicating there cannot be a unique
1765   // returned value.
1766   Optional<Value *> UniqueRV;
1767   Type *Ty = getAssociatedFunction()->getReturnType();
1768 
1769   auto Pred = [&](Value &RV) -> bool {
1770     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1771     return UniqueRV != Optional<Value *>(nullptr);
1772   };
1773 
1774   if (!A.checkForAllReturnedValues(Pred, *this))
1775     UniqueRV = nullptr;
1776 
1777   return UniqueRV;
1778 }
1779 
1780 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1781     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1782     const {
1783   if (!isValidState())
1784     return false;
1785 
1786   // Check all returned values but ignore call sites as long as we have not
1787   // encountered an overdefined one during an update.
1788   for (auto &It : ReturnedValues) {
1789     Value *RV = It.first;
1790     if (!Pred(*RV, It.second))
1791       return false;
1792   }
1793 
1794   return true;
1795 }
1796 
1797 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1798   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1799 
1800   SmallVector<AA::ValueAndContext> Values;
1801   bool UsedAssumedInformation = false;
1802   auto ReturnInstCB = [&](Instruction &I) {
1803     ReturnInst &Ret = cast<ReturnInst>(I);
1804     Values.clear();
1805     if (!A.getAssumedSimplifiedValues(IRPosition::value(*Ret.getReturnValue()),
1806                                       *this, Values, AA::Intraprocedural,
1807                                       UsedAssumedInformation))
1808       Values.push_back({*Ret.getReturnValue(), Ret});
1809 
1810     for (auto &VAC : Values) {
1811       assert(AA::isValidInScope(*VAC.getValue(), Ret.getFunction()) &&
1812              "Assumed returned value should be valid in function scope!");
1813       if (ReturnedValues[VAC.getValue()].insert(&Ret))
1814         Changed = ChangeStatus::CHANGED;
1815     }
1816     return true;
1817   };
1818 
1819   // Discover returned values from all live returned instructions in the
1820   // associated function.
1821   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1822                                  UsedAssumedInformation))
1823     return indicatePessimisticFixpoint();
1824   return Changed;
1825 }
1826 
1827 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1828   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1829       : AAReturnedValuesImpl(IRP, A) {}
1830 
1831   /// See AbstractAttribute::trackStatistics()
1832   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1833 };
1834 
1835 /// Returned values information for a call sites.
1836 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1837   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1838       : AAReturnedValuesImpl(IRP, A) {}
1839 
1840   /// See AbstractAttribute::initialize(...).
1841   void initialize(Attributor &A) override {
1842     // TODO: Once we have call site specific value information we can provide
1843     //       call site specific liveness information and then it makes
1844     //       sense to specialize attributes for call sites instead of
1845     //       redirecting requests to the callee.
1846     llvm_unreachable("Abstract attributes for returned values are not "
1847                      "supported for call sites yet!");
1848   }
1849 
1850   /// See AbstractAttribute::updateImpl(...).
1851   ChangeStatus updateImpl(Attributor &A) override {
1852     return indicatePessimisticFixpoint();
1853   }
1854 
1855   /// See AbstractAttribute::trackStatistics()
1856   void trackStatistics() const override {}
1857 };
1858 } // namespace
1859 
1860 /// ------------------------ NoSync Function Attribute -------------------------
1861 
1862 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) {
1863   if (!I->isAtomic())
1864     return false;
1865 
1866   if (auto *FI = dyn_cast<FenceInst>(I))
1867     // All legal orderings for fence are stronger than monotonic.
1868     return FI->getSyncScopeID() != SyncScope::SingleThread;
1869   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1870     // Unordered is not a legal ordering for cmpxchg.
1871     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1872             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1873   }
1874 
1875   AtomicOrdering Ordering;
1876   switch (I->getOpcode()) {
1877   case Instruction::AtomicRMW:
1878     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1879     break;
1880   case Instruction::Store:
1881     Ordering = cast<StoreInst>(I)->getOrdering();
1882     break;
1883   case Instruction::Load:
1884     Ordering = cast<LoadInst>(I)->getOrdering();
1885     break;
1886   default:
1887     llvm_unreachable(
1888         "New atomic operations need to be known in the attributor.");
1889   }
1890 
1891   return (Ordering != AtomicOrdering::Unordered &&
1892           Ordering != AtomicOrdering::Monotonic);
1893 }
1894 
1895 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1896 /// which would be nosync except that they have a volatile flag.  All other
1897 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1898 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) {
1899   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1900     return !MI->isVolatile();
1901   return false;
1902 }
1903 
1904 namespace {
1905 struct AANoSyncImpl : AANoSync {
1906   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1907 
1908   const std::string getAsStr() const override {
1909     return getAssumed() ? "nosync" : "may-sync";
1910   }
1911 
1912   /// See AbstractAttribute::updateImpl(...).
1913   ChangeStatus updateImpl(Attributor &A) override;
1914 };
1915 
1916 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1917 
1918   auto CheckRWInstForNoSync = [&](Instruction &I) {
1919     return AA::isNoSyncInst(A, I, *this);
1920   };
1921 
1922   auto CheckForNoSync = [&](Instruction &I) {
1923     // At this point we handled all read/write effects and they are all
1924     // nosync, so they can be skipped.
1925     if (I.mayReadOrWriteMemory())
1926       return true;
1927 
1928     // non-convergent and readnone imply nosync.
1929     return !cast<CallBase>(I).isConvergent();
1930   };
1931 
1932   bool UsedAssumedInformation = false;
1933   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
1934                                           UsedAssumedInformation) ||
1935       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
1936                                          UsedAssumedInformation))
1937     return indicatePessimisticFixpoint();
1938 
1939   return ChangeStatus::UNCHANGED;
1940 }
1941 
1942 struct AANoSyncFunction final : public AANoSyncImpl {
1943   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1944       : AANoSyncImpl(IRP, A) {}
1945 
1946   /// See AbstractAttribute::trackStatistics()
1947   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1948 };
1949 
1950 /// NoSync attribute deduction for a call sites.
1951 struct AANoSyncCallSite final : AANoSyncImpl {
1952   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1953       : AANoSyncImpl(IRP, A) {}
1954 
1955   /// See AbstractAttribute::initialize(...).
1956   void initialize(Attributor &A) override {
1957     AANoSyncImpl::initialize(A);
1958     Function *F = getAssociatedFunction();
1959     if (!F || F->isDeclaration())
1960       indicatePessimisticFixpoint();
1961   }
1962 
1963   /// See AbstractAttribute::updateImpl(...).
1964   ChangeStatus updateImpl(Attributor &A) override {
1965     // TODO: Once we have call site specific value information we can provide
1966     //       call site specific liveness information and then it makes
1967     //       sense to specialize attributes for call sites arguments instead of
1968     //       redirecting requests to the callee argument.
1969     Function *F = getAssociatedFunction();
1970     const IRPosition &FnPos = IRPosition::function(*F);
1971     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1972     return clampStateAndIndicateChange(getState(), FnAA.getState());
1973   }
1974 
1975   /// See AbstractAttribute::trackStatistics()
1976   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1977 };
1978 } // namespace
1979 
1980 /// ------------------------ No-Free Attributes ----------------------------
1981 
1982 namespace {
1983 struct AANoFreeImpl : public AANoFree {
1984   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1985 
1986   /// See AbstractAttribute::updateImpl(...).
1987   ChangeStatus updateImpl(Attributor &A) override {
1988     auto CheckForNoFree = [&](Instruction &I) {
1989       const auto &CB = cast<CallBase>(I);
1990       if (CB.hasFnAttr(Attribute::NoFree))
1991         return true;
1992 
1993       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1994           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1995       return NoFreeAA.isAssumedNoFree();
1996     };
1997 
1998     bool UsedAssumedInformation = false;
1999     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
2000                                            UsedAssumedInformation))
2001       return indicatePessimisticFixpoint();
2002     return ChangeStatus::UNCHANGED;
2003   }
2004 
2005   /// See AbstractAttribute::getAsStr().
2006   const std::string getAsStr() const override {
2007     return getAssumed() ? "nofree" : "may-free";
2008   }
2009 };
2010 
2011 struct AANoFreeFunction final : public AANoFreeImpl {
2012   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
2013       : AANoFreeImpl(IRP, A) {}
2014 
2015   /// See AbstractAttribute::trackStatistics()
2016   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
2017 };
2018 
2019 /// NoFree attribute deduction for a call sites.
2020 struct AANoFreeCallSite final : AANoFreeImpl {
2021   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2022       : AANoFreeImpl(IRP, A) {}
2023 
2024   /// See AbstractAttribute::initialize(...).
2025   void initialize(Attributor &A) override {
2026     AANoFreeImpl::initialize(A);
2027     Function *F = getAssociatedFunction();
2028     if (!F || F->isDeclaration())
2029       indicatePessimisticFixpoint();
2030   }
2031 
2032   /// See AbstractAttribute::updateImpl(...).
2033   ChangeStatus updateImpl(Attributor &A) override {
2034     // TODO: Once we have call site specific value information we can provide
2035     //       call site specific liveness information and then it makes
2036     //       sense to specialize attributes for call sites arguments instead of
2037     //       redirecting requests to the callee argument.
2038     Function *F = getAssociatedFunction();
2039     const IRPosition &FnPos = IRPosition::function(*F);
2040     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
2041     return clampStateAndIndicateChange(getState(), FnAA.getState());
2042   }
2043 
2044   /// See AbstractAttribute::trackStatistics()
2045   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2046 };
2047 
2048 /// NoFree attribute for floating values.
2049 struct AANoFreeFloating : AANoFreeImpl {
2050   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2051       : AANoFreeImpl(IRP, A) {}
2052 
2053   /// See AbstractAttribute::trackStatistics()
2054   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2055 
2056   /// See Abstract Attribute::updateImpl(...).
2057   ChangeStatus updateImpl(Attributor &A) override {
2058     const IRPosition &IRP = getIRPosition();
2059 
2060     const auto &NoFreeAA = A.getAAFor<AANoFree>(
2061         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
2062     if (NoFreeAA.isAssumedNoFree())
2063       return ChangeStatus::UNCHANGED;
2064 
2065     Value &AssociatedValue = getIRPosition().getAssociatedValue();
2066     auto Pred = [&](const Use &U, bool &Follow) -> bool {
2067       Instruction *UserI = cast<Instruction>(U.getUser());
2068       if (auto *CB = dyn_cast<CallBase>(UserI)) {
2069         if (CB->isBundleOperand(&U))
2070           return false;
2071         if (!CB->isArgOperand(&U))
2072           return true;
2073         unsigned ArgNo = CB->getArgOperandNo(&U);
2074 
2075         const auto &NoFreeArg = A.getAAFor<AANoFree>(
2076             *this, IRPosition::callsite_argument(*CB, ArgNo),
2077             DepClassTy::REQUIRED);
2078         return NoFreeArg.isAssumedNoFree();
2079       }
2080 
2081       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2082           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2083         Follow = true;
2084         return true;
2085       }
2086       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2087           isa<ReturnInst>(UserI))
2088         return true;
2089 
2090       // Unknown user.
2091       return false;
2092     };
2093     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2094       return indicatePessimisticFixpoint();
2095 
2096     return ChangeStatus::UNCHANGED;
2097   }
2098 };
2099 
2100 /// NoFree attribute for a call site argument.
2101 struct AANoFreeArgument final : AANoFreeFloating {
2102   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2103       : AANoFreeFloating(IRP, A) {}
2104 
2105   /// See AbstractAttribute::trackStatistics()
2106   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2107 };
2108 
2109 /// NoFree attribute for call site arguments.
2110 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2111   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2112       : AANoFreeFloating(IRP, A) {}
2113 
2114   /// See AbstractAttribute::updateImpl(...).
2115   ChangeStatus updateImpl(Attributor &A) override {
2116     // TODO: Once we have call site specific value information we can provide
2117     //       call site specific liveness information and then it makes
2118     //       sense to specialize attributes for call sites arguments instead of
2119     //       redirecting requests to the callee argument.
2120     Argument *Arg = getAssociatedArgument();
2121     if (!Arg)
2122       return indicatePessimisticFixpoint();
2123     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2124     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2125     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2126   }
2127 
2128   /// See AbstractAttribute::trackStatistics()
2129   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2130 };
2131 
2132 /// NoFree attribute for function return value.
2133 struct AANoFreeReturned final : AANoFreeFloating {
2134   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2135       : AANoFreeFloating(IRP, A) {
2136     llvm_unreachable("NoFree is not applicable to function returns!");
2137   }
2138 
2139   /// See AbstractAttribute::initialize(...).
2140   void initialize(Attributor &A) override {
2141     llvm_unreachable("NoFree is not applicable to function returns!");
2142   }
2143 
2144   /// See AbstractAttribute::updateImpl(...).
2145   ChangeStatus updateImpl(Attributor &A) override {
2146     llvm_unreachable("NoFree is not applicable to function returns!");
2147   }
2148 
2149   /// See AbstractAttribute::trackStatistics()
2150   void trackStatistics() const override {}
2151 };
2152 
2153 /// NoFree attribute deduction for a call site return value.
2154 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2155   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2156       : AANoFreeFloating(IRP, A) {}
2157 
2158   ChangeStatus manifest(Attributor &A) override {
2159     return ChangeStatus::UNCHANGED;
2160   }
2161   /// See AbstractAttribute::trackStatistics()
2162   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2163 };
2164 } // namespace
2165 
2166 /// ------------------------ NonNull Argument Attribute ------------------------
2167 namespace {
2168 static int64_t getKnownNonNullAndDerefBytesForUse(
2169     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2170     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2171   TrackUse = false;
2172 
2173   const Value *UseV = U->get();
2174   if (!UseV->getType()->isPointerTy())
2175     return 0;
2176 
2177   // We need to follow common pointer manipulation uses to the accesses they
2178   // feed into. We can try to be smart to avoid looking through things we do not
2179   // like for now, e.g., non-inbounds GEPs.
2180   if (isa<CastInst>(I)) {
2181     TrackUse = true;
2182     return 0;
2183   }
2184 
2185   if (isa<GetElementPtrInst>(I)) {
2186     TrackUse = true;
2187     return 0;
2188   }
2189 
2190   Type *PtrTy = UseV->getType();
2191   const Function *F = I->getFunction();
2192   bool NullPointerIsDefined =
2193       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2194   const DataLayout &DL = A.getInfoCache().getDL();
2195   if (const auto *CB = dyn_cast<CallBase>(I)) {
2196     if (CB->isBundleOperand(U)) {
2197       if (RetainedKnowledge RK = getKnowledgeFromUse(
2198               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2199         IsNonNull |=
2200             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2201         return RK.ArgValue;
2202       }
2203       return 0;
2204     }
2205 
2206     if (CB->isCallee(U)) {
2207       IsNonNull |= !NullPointerIsDefined;
2208       return 0;
2209     }
2210 
2211     unsigned ArgNo = CB->getArgOperandNo(U);
2212     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2213     // As long as we only use known information there is no need to track
2214     // dependences here.
2215     auto &DerefAA =
2216         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2217     IsNonNull |= DerefAA.isKnownNonNull();
2218     return DerefAA.getKnownDereferenceableBytes();
2219   }
2220 
2221   Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2222   if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
2223     return 0;
2224 
2225   int64_t Offset;
2226   const Value *Base =
2227       getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2228   if (Base && Base == &AssociatedValue) {
2229     int64_t DerefBytes = Loc->Size.getValue() + Offset;
2230     IsNonNull |= !NullPointerIsDefined;
2231     return std::max(int64_t(0), DerefBytes);
2232   }
2233 
2234   /// Corner case when an offset is 0.
2235   Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
2236                                           /*AllowNonInbounds*/ true);
2237   if (Base && Base == &AssociatedValue && Offset == 0) {
2238     int64_t DerefBytes = Loc->Size.getValue();
2239     IsNonNull |= !NullPointerIsDefined;
2240     return std::max(int64_t(0), DerefBytes);
2241   }
2242 
2243   return 0;
2244 }
2245 
2246 struct AANonNullImpl : AANonNull {
2247   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2248       : AANonNull(IRP, A),
2249         NullIsDefined(NullPointerIsDefined(
2250             getAnchorScope(),
2251             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2252 
2253   /// See AbstractAttribute::initialize(...).
2254   void initialize(Attributor &A) override {
2255     Value &V = *getAssociatedValue().stripPointerCasts();
2256     if (!NullIsDefined &&
2257         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2258                 /* IgnoreSubsumingPositions */ false, &A)) {
2259       indicateOptimisticFixpoint();
2260       return;
2261     }
2262 
2263     if (isa<ConstantPointerNull>(V)) {
2264       indicatePessimisticFixpoint();
2265       return;
2266     }
2267 
2268     AANonNull::initialize(A);
2269 
2270     bool CanBeNull, CanBeFreed;
2271     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2272                                          CanBeFreed)) {
2273       if (!CanBeNull) {
2274         indicateOptimisticFixpoint();
2275         return;
2276       }
2277     }
2278 
2279     if (isa<GlobalValue>(V)) {
2280       indicatePessimisticFixpoint();
2281       return;
2282     }
2283 
2284     if (Instruction *CtxI = getCtxI())
2285       followUsesInMBEC(*this, A, getState(), *CtxI);
2286   }
2287 
2288   /// See followUsesInMBEC
2289   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2290                        AANonNull::StateType &State) {
2291     bool IsNonNull = false;
2292     bool TrackUse = false;
2293     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2294                                        IsNonNull, TrackUse);
2295     State.setKnown(IsNonNull);
2296     return TrackUse;
2297   }
2298 
2299   /// See AbstractAttribute::getAsStr().
2300   const std::string getAsStr() const override {
2301     return getAssumed() ? "nonnull" : "may-null";
2302   }
2303 
2304   /// Flag to determine if the underlying value can be null and still allow
2305   /// valid accesses.
2306   const bool NullIsDefined;
2307 };
2308 
2309 /// NonNull attribute for a floating value.
2310 struct AANonNullFloating : public AANonNullImpl {
2311   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2312       : AANonNullImpl(IRP, A) {}
2313 
2314   /// See AbstractAttribute::updateImpl(...).
2315   ChangeStatus updateImpl(Attributor &A) override {
2316     const DataLayout &DL = A.getDataLayout();
2317 
2318     bool Stripped;
2319     bool UsedAssumedInformation = false;
2320     SmallVector<AA::ValueAndContext> Values;
2321     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
2322                                       AA::AnyScope, UsedAssumedInformation)) {
2323       Values.push_back({getAssociatedValue(), getCtxI()});
2324       Stripped = false;
2325     } else {
2326       Stripped = Values.size() != 1 ||
2327                  Values.front().getValue() != &getAssociatedValue();
2328     }
2329 
2330     DominatorTree *DT = nullptr;
2331     AssumptionCache *AC = nullptr;
2332     InformationCache &InfoCache = A.getInfoCache();
2333     if (const Function *Fn = getAnchorScope()) {
2334       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2335       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2336     }
2337 
2338     AANonNull::StateType T;
2339     auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
2340       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2341                                              DepClassTy::REQUIRED);
2342       if (!Stripped && this == &AA) {
2343         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2344           T.indicatePessimisticFixpoint();
2345       } else {
2346         // Use abstract attribute information.
2347         const AANonNull::StateType &NS = AA.getState();
2348         T ^= NS;
2349       }
2350       return T.isValidState();
2351     };
2352 
2353     for (const auto &VAC : Values)
2354       if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI()))
2355         return indicatePessimisticFixpoint();
2356 
2357     return clampStateAndIndicateChange(getState(), T);
2358   }
2359 
2360   /// See AbstractAttribute::trackStatistics()
2361   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2362 };
2363 
2364 /// NonNull attribute for function return value.
2365 struct AANonNullReturned final
2366     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2367   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2368       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2369 
2370   /// See AbstractAttribute::getAsStr().
2371   const std::string getAsStr() const override {
2372     return getAssumed() ? "nonnull" : "may-null";
2373   }
2374 
2375   /// See AbstractAttribute::trackStatistics()
2376   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2377 };
2378 
2379 /// NonNull attribute for function argument.
2380 struct AANonNullArgument final
2381     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2382   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2383       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2384 
2385   /// See AbstractAttribute::trackStatistics()
2386   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2387 };
2388 
2389 struct AANonNullCallSiteArgument final : AANonNullFloating {
2390   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2391       : AANonNullFloating(IRP, A) {}
2392 
2393   /// See AbstractAttribute::trackStatistics()
2394   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2395 };
2396 
2397 /// NonNull attribute for a call site return position.
2398 struct AANonNullCallSiteReturned final
2399     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2400   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2401       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2402 
2403   /// See AbstractAttribute::trackStatistics()
2404   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2405 };
2406 } // namespace
2407 
2408 /// ------------------------ No-Recurse Attributes ----------------------------
2409 
2410 namespace {
2411 struct AANoRecurseImpl : public AANoRecurse {
2412   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2413 
2414   /// See AbstractAttribute::getAsStr()
2415   const std::string getAsStr() const override {
2416     return getAssumed() ? "norecurse" : "may-recurse";
2417   }
2418 };
2419 
2420 struct AANoRecurseFunction final : AANoRecurseImpl {
2421   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2422       : AANoRecurseImpl(IRP, A) {}
2423 
2424   /// See AbstractAttribute::updateImpl(...).
2425   ChangeStatus updateImpl(Attributor &A) override {
2426 
2427     // If all live call sites are known to be no-recurse, we are as well.
2428     auto CallSitePred = [&](AbstractCallSite ACS) {
2429       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2430           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2431           DepClassTy::NONE);
2432       return NoRecurseAA.isKnownNoRecurse();
2433     };
2434     bool UsedAssumedInformation = false;
2435     if (A.checkForAllCallSites(CallSitePred, *this, true,
2436                                UsedAssumedInformation)) {
2437       // If we know all call sites and all are known no-recurse, we are done.
2438       // If all known call sites, which might not be all that exist, are known
2439       // to be no-recurse, we are not done but we can continue to assume
2440       // no-recurse. If one of the call sites we have not visited will become
2441       // live, another update is triggered.
2442       if (!UsedAssumedInformation)
2443         indicateOptimisticFixpoint();
2444       return ChangeStatus::UNCHANGED;
2445     }
2446 
2447     const AAFunctionReachability &EdgeReachability =
2448         A.getAAFor<AAFunctionReachability>(*this, getIRPosition(),
2449                                            DepClassTy::REQUIRED);
2450     if (EdgeReachability.canReach(A, *getAnchorScope()))
2451       return indicatePessimisticFixpoint();
2452     return ChangeStatus::UNCHANGED;
2453   }
2454 
2455   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2456 };
2457 
2458 /// NoRecurse attribute deduction for a call sites.
2459 struct AANoRecurseCallSite final : AANoRecurseImpl {
2460   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2461       : AANoRecurseImpl(IRP, A) {}
2462 
2463   /// See AbstractAttribute::initialize(...).
2464   void initialize(Attributor &A) override {
2465     AANoRecurseImpl::initialize(A);
2466     Function *F = getAssociatedFunction();
2467     if (!F || F->isDeclaration())
2468       indicatePessimisticFixpoint();
2469   }
2470 
2471   /// See AbstractAttribute::updateImpl(...).
2472   ChangeStatus updateImpl(Attributor &A) override {
2473     // TODO: Once we have call site specific value information we can provide
2474     //       call site specific liveness information and then it makes
2475     //       sense to specialize attributes for call sites arguments instead of
2476     //       redirecting requests to the callee argument.
2477     Function *F = getAssociatedFunction();
2478     const IRPosition &FnPos = IRPosition::function(*F);
2479     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2480     return clampStateAndIndicateChange(getState(), FnAA.getState());
2481   }
2482 
2483   /// See AbstractAttribute::trackStatistics()
2484   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2485 };
2486 } // namespace
2487 
2488 /// -------------------- Undefined-Behavior Attributes ------------------------
2489 
2490 namespace {
2491 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2492   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2493       : AAUndefinedBehavior(IRP, A) {}
2494 
2495   /// See AbstractAttribute::updateImpl(...).
2496   // through a pointer (i.e. also branches etc.)
2497   ChangeStatus updateImpl(Attributor &A) override {
2498     const size_t UBPrevSize = KnownUBInsts.size();
2499     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2500 
2501     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2502       // Lang ref now states volatile store is not UB, let's skip them.
2503       if (I.isVolatile() && I.mayWriteToMemory())
2504         return true;
2505 
2506       // Skip instructions that are already saved.
2507       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2508         return true;
2509 
2510       // If we reach here, we know we have an instruction
2511       // that accesses memory through a pointer operand,
2512       // for which getPointerOperand() should give it to us.
2513       Value *PtrOp =
2514           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2515       assert(PtrOp &&
2516              "Expected pointer operand of memory accessing instruction");
2517 
2518       // Either we stopped and the appropriate action was taken,
2519       // or we got back a simplified value to continue.
2520       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2521       if (!SimplifiedPtrOp || !SimplifiedPtrOp.value())
2522         return true;
2523       const Value *PtrOpVal = SimplifiedPtrOp.value();
2524 
2525       // A memory access through a pointer is considered UB
2526       // only if the pointer has constant null value.
2527       // TODO: Expand it to not only check constant values.
2528       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2529         AssumedNoUBInsts.insert(&I);
2530         return true;
2531       }
2532       const Type *PtrTy = PtrOpVal->getType();
2533 
2534       // Because we only consider instructions inside functions,
2535       // assume that a parent function exists.
2536       const Function *F = I.getFunction();
2537 
2538       // A memory access using constant null pointer is only considered UB
2539       // if null pointer is _not_ defined for the target platform.
2540       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2541         AssumedNoUBInsts.insert(&I);
2542       else
2543         KnownUBInsts.insert(&I);
2544       return true;
2545     };
2546 
2547     auto InspectBrInstForUB = [&](Instruction &I) {
2548       // A conditional branch instruction is considered UB if it has `undef`
2549       // condition.
2550 
2551       // Skip instructions that are already saved.
2552       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2553         return true;
2554 
2555       // We know we have a branch instruction.
2556       auto *BrInst = cast<BranchInst>(&I);
2557 
2558       // Unconditional branches are never considered UB.
2559       if (BrInst->isUnconditional())
2560         return true;
2561 
2562       // Either we stopped and the appropriate action was taken,
2563       // or we got back a simplified value to continue.
2564       Optional<Value *> SimplifiedCond =
2565           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2566       if (!SimplifiedCond || !*SimplifiedCond)
2567         return true;
2568       AssumedNoUBInsts.insert(&I);
2569       return true;
2570     };
2571 
2572     auto InspectCallSiteForUB = [&](Instruction &I) {
2573       // Check whether a callsite always cause UB or not
2574 
2575       // Skip instructions that are already saved.
2576       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2577         return true;
2578 
2579       // Check nonnull and noundef argument attribute violation for each
2580       // callsite.
2581       CallBase &CB = cast<CallBase>(I);
2582       Function *Callee = CB.getCalledFunction();
2583       if (!Callee)
2584         return true;
2585       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2586         // If current argument is known to be simplified to null pointer and the
2587         // corresponding argument position is known to have nonnull attribute,
2588         // the argument is poison. Furthermore, if the argument is poison and
2589         // the position is known to have noundef attriubte, this callsite is
2590         // considered UB.
2591         if (idx >= Callee->arg_size())
2592           break;
2593         Value *ArgVal = CB.getArgOperand(idx);
2594         if (!ArgVal)
2595           continue;
2596         // Here, we handle three cases.
2597         //   (1) Not having a value means it is dead. (we can replace the value
2598         //       with undef)
2599         //   (2) Simplified to undef. The argument violate noundef attriubte.
2600         //   (3) Simplified to null pointer where known to be nonnull.
2601         //       The argument is a poison value and violate noundef attribute.
2602         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2603         auto &NoUndefAA =
2604             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2605         if (!NoUndefAA.isKnownNoUndef())
2606           continue;
2607         bool UsedAssumedInformation = false;
2608         Optional<Value *> SimplifiedVal =
2609             A.getAssumedSimplified(IRPosition::value(*ArgVal), *this,
2610                                    UsedAssumedInformation, AA::Interprocedural);
2611         if (UsedAssumedInformation)
2612           continue;
2613         if (SimplifiedVal && !SimplifiedVal.value())
2614           return true;
2615         if (!SimplifiedVal || isa<UndefValue>(*SimplifiedVal.value())) {
2616           KnownUBInsts.insert(&I);
2617           continue;
2618         }
2619         if (!ArgVal->getType()->isPointerTy() ||
2620             !isa<ConstantPointerNull>(*SimplifiedVal.value()))
2621           continue;
2622         auto &NonNullAA =
2623             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2624         if (NonNullAA.isKnownNonNull())
2625           KnownUBInsts.insert(&I);
2626       }
2627       return true;
2628     };
2629 
2630     auto InspectReturnInstForUB = [&](Instruction &I) {
2631       auto &RI = cast<ReturnInst>(I);
2632       // Either we stopped and the appropriate action was taken,
2633       // or we got back a simplified return value to continue.
2634       Optional<Value *> SimplifiedRetValue =
2635           stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
2636       if (!SimplifiedRetValue || !*SimplifiedRetValue)
2637         return true;
2638 
2639       // Check if a return instruction always cause UB or not
2640       // Note: It is guaranteed that the returned position of the anchor
2641       //       scope has noundef attribute when this is called.
2642       //       We also ensure the return position is not "assumed dead"
2643       //       because the returned value was then potentially simplified to
2644       //       `undef` in AAReturnedValues without removing the `noundef`
2645       //       attribute yet.
2646 
2647       // When the returned position has noundef attriubte, UB occurs in the
2648       // following cases.
2649       //   (1) Returned value is known to be undef.
2650       //   (2) The value is known to be a null pointer and the returned
2651       //       position has nonnull attribute (because the returned value is
2652       //       poison).
2653       if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
2654         auto &NonNullAA = A.getAAFor<AANonNull>(
2655             *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE);
2656         if (NonNullAA.isKnownNonNull())
2657           KnownUBInsts.insert(&I);
2658       }
2659 
2660       return true;
2661     };
2662 
2663     bool UsedAssumedInformation = false;
2664     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2665                               {Instruction::Load, Instruction::Store,
2666                                Instruction::AtomicCmpXchg,
2667                                Instruction::AtomicRMW},
2668                               UsedAssumedInformation,
2669                               /* CheckBBLivenessOnly */ true);
2670     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2671                               UsedAssumedInformation,
2672                               /* CheckBBLivenessOnly */ true);
2673     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2674                                       UsedAssumedInformation);
2675 
2676     // If the returned position of the anchor scope has noundef attriubte, check
2677     // all returned instructions.
2678     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2679       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2680       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2681         auto &RetPosNoUndefAA =
2682             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2683         if (RetPosNoUndefAA.isKnownNoUndef())
2684           A.checkForAllInstructions(InspectReturnInstForUB, *this,
2685                                     {Instruction::Ret}, UsedAssumedInformation,
2686                                     /* CheckBBLivenessOnly */ true);
2687       }
2688     }
2689 
2690     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2691         UBPrevSize != KnownUBInsts.size())
2692       return ChangeStatus::CHANGED;
2693     return ChangeStatus::UNCHANGED;
2694   }
2695 
2696   bool isKnownToCauseUB(Instruction *I) const override {
2697     return KnownUBInsts.count(I);
2698   }
2699 
2700   bool isAssumedToCauseUB(Instruction *I) const override {
2701     // In simple words, if an instruction is not in the assumed to _not_
2702     // cause UB, then it is assumed UB (that includes those
2703     // in the KnownUBInsts set). The rest is boilerplate
2704     // is to ensure that it is one of the instructions we test
2705     // for UB.
2706 
2707     switch (I->getOpcode()) {
2708     case Instruction::Load:
2709     case Instruction::Store:
2710     case Instruction::AtomicCmpXchg:
2711     case Instruction::AtomicRMW:
2712       return !AssumedNoUBInsts.count(I);
2713     case Instruction::Br: {
2714       auto *BrInst = cast<BranchInst>(I);
2715       if (BrInst->isUnconditional())
2716         return false;
2717       return !AssumedNoUBInsts.count(I);
2718     } break;
2719     default:
2720       return false;
2721     }
2722     return false;
2723   }
2724 
2725   ChangeStatus manifest(Attributor &A) override {
2726     if (KnownUBInsts.empty())
2727       return ChangeStatus::UNCHANGED;
2728     for (Instruction *I : KnownUBInsts)
2729       A.changeToUnreachableAfterManifest(I);
2730     return ChangeStatus::CHANGED;
2731   }
2732 
2733   /// See AbstractAttribute::getAsStr()
2734   const std::string getAsStr() const override {
2735     return getAssumed() ? "undefined-behavior" : "no-ub";
2736   }
2737 
2738   /// Note: The correctness of this analysis depends on the fact that the
2739   /// following 2 sets will stop changing after some point.
2740   /// "Change" here means that their size changes.
2741   /// The size of each set is monotonically increasing
2742   /// (we only add items to them) and it is upper bounded by the number of
2743   /// instructions in the processed function (we can never save more
2744   /// elements in either set than this number). Hence, at some point,
2745   /// they will stop increasing.
2746   /// Consequently, at some point, both sets will have stopped
2747   /// changing, effectively making the analysis reach a fixpoint.
2748 
2749   /// Note: These 2 sets are disjoint and an instruction can be considered
2750   /// one of 3 things:
2751   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2752   ///    the KnownUBInsts set.
2753   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2754   ///    has a reason to assume it).
2755   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2756   ///    could not find a reason to assume or prove that it can cause UB,
2757   ///    hence it assumes it doesn't. We have a set for these instructions
2758   ///    so that we don't reprocess them in every update.
2759   ///    Note however that instructions in this set may cause UB.
2760 
2761 protected:
2762   /// A set of all live instructions _known_ to cause UB.
2763   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2764 
2765 private:
2766   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2767   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2768 
2769   // Should be called on updates in which if we're processing an instruction
2770   // \p I that depends on a value \p V, one of the following has to happen:
2771   // - If the value is assumed, then stop.
2772   // - If the value is known but undef, then consider it UB.
2773   // - Otherwise, do specific processing with the simplified value.
2774   // We return None in the first 2 cases to signify that an appropriate
2775   // action was taken and the caller should stop.
2776   // Otherwise, we return the simplified value that the caller should
2777   // use for specific processing.
2778   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2779                                          Instruction *I) {
2780     bool UsedAssumedInformation = false;
2781     Optional<Value *> SimplifiedV =
2782         A.getAssumedSimplified(IRPosition::value(*V), *this,
2783                                UsedAssumedInformation, AA::Interprocedural);
2784     if (!UsedAssumedInformation) {
2785       // Don't depend on assumed values.
2786       if (!SimplifiedV) {
2787         // If it is known (which we tested above) but it doesn't have a value,
2788         // then we can assume `undef` and hence the instruction is UB.
2789         KnownUBInsts.insert(I);
2790         return llvm::None;
2791       }
2792       if (!*SimplifiedV)
2793         return nullptr;
2794       V = *SimplifiedV;
2795     }
2796     if (isa<UndefValue>(V)) {
2797       KnownUBInsts.insert(I);
2798       return llvm::None;
2799     }
2800     return V;
2801   }
2802 };
2803 
2804 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2805   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2806       : AAUndefinedBehaviorImpl(IRP, A) {}
2807 
2808   /// See AbstractAttribute::trackStatistics()
2809   void trackStatistics() const override {
2810     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2811                "Number of instructions known to have UB");
2812     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2813         KnownUBInsts.size();
2814   }
2815 };
2816 } // namespace
2817 
2818 /// ------------------------ Will-Return Attributes ----------------------------
2819 
2820 namespace {
2821 // Helper function that checks whether a function has any cycle which we don't
2822 // know if it is bounded or not.
2823 // Loops with maximum trip count are considered bounded, any other cycle not.
2824 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2825   ScalarEvolution *SE =
2826       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2827   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2828   // If either SCEV or LoopInfo is not available for the function then we assume
2829   // any cycle to be unbounded cycle.
2830   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2831   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2832   if (!SE || !LI) {
2833     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2834       if (SCCI.hasCycle())
2835         return true;
2836     return false;
2837   }
2838 
2839   // If there's irreducible control, the function may contain non-loop cycles.
2840   if (mayContainIrreducibleControl(F, LI))
2841     return true;
2842 
2843   // Any loop that does not have a max trip count is considered unbounded cycle.
2844   for (auto *L : LI->getLoopsInPreorder()) {
2845     if (!SE->getSmallConstantMaxTripCount(L))
2846       return true;
2847   }
2848   return false;
2849 }
2850 
2851 struct AAWillReturnImpl : public AAWillReturn {
2852   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2853       : AAWillReturn(IRP, A) {}
2854 
2855   /// See AbstractAttribute::initialize(...).
2856   void initialize(Attributor &A) override {
2857     AAWillReturn::initialize(A);
2858 
2859     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2860       indicateOptimisticFixpoint();
2861       return;
2862     }
2863   }
2864 
2865   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2866   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2867     // Check for `mustprogress` in the scope and the associated function which
2868     // might be different if this is a call site.
2869     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2870         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2871       return false;
2872 
2873     bool IsKnown;
2874     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
2875       return IsKnown || !KnownOnly;
2876     return false;
2877   }
2878 
2879   /// See AbstractAttribute::updateImpl(...).
2880   ChangeStatus updateImpl(Attributor &A) override {
2881     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2882       return ChangeStatus::UNCHANGED;
2883 
2884     auto CheckForWillReturn = [&](Instruction &I) {
2885       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2886       const auto &WillReturnAA =
2887           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2888       if (WillReturnAA.isKnownWillReturn())
2889         return true;
2890       if (!WillReturnAA.isAssumedWillReturn())
2891         return false;
2892       const auto &NoRecurseAA =
2893           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2894       return NoRecurseAA.isAssumedNoRecurse();
2895     };
2896 
2897     bool UsedAssumedInformation = false;
2898     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2899                                            UsedAssumedInformation))
2900       return indicatePessimisticFixpoint();
2901 
2902     return ChangeStatus::UNCHANGED;
2903   }
2904 
2905   /// See AbstractAttribute::getAsStr()
2906   const std::string getAsStr() const override {
2907     return getAssumed() ? "willreturn" : "may-noreturn";
2908   }
2909 };
2910 
2911 struct AAWillReturnFunction final : AAWillReturnImpl {
2912   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2913       : AAWillReturnImpl(IRP, A) {}
2914 
2915   /// See AbstractAttribute::initialize(...).
2916   void initialize(Attributor &A) override {
2917     AAWillReturnImpl::initialize(A);
2918 
2919     Function *F = getAnchorScope();
2920     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2921       indicatePessimisticFixpoint();
2922   }
2923 
2924   /// See AbstractAttribute::trackStatistics()
2925   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2926 };
2927 
2928 /// WillReturn attribute deduction for a call sites.
2929 struct AAWillReturnCallSite final : AAWillReturnImpl {
2930   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2931       : AAWillReturnImpl(IRP, A) {}
2932 
2933   /// See AbstractAttribute::initialize(...).
2934   void initialize(Attributor &A) override {
2935     AAWillReturnImpl::initialize(A);
2936     Function *F = getAssociatedFunction();
2937     if (!F || !A.isFunctionIPOAmendable(*F))
2938       indicatePessimisticFixpoint();
2939   }
2940 
2941   /// See AbstractAttribute::updateImpl(...).
2942   ChangeStatus updateImpl(Attributor &A) override {
2943     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2944       return ChangeStatus::UNCHANGED;
2945 
2946     // TODO: Once we have call site specific value information we can provide
2947     //       call site specific liveness information and then it makes
2948     //       sense to specialize attributes for call sites arguments instead of
2949     //       redirecting requests to the callee argument.
2950     Function *F = getAssociatedFunction();
2951     const IRPosition &FnPos = IRPosition::function(*F);
2952     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2953     return clampStateAndIndicateChange(getState(), FnAA.getState());
2954   }
2955 
2956   /// See AbstractAttribute::trackStatistics()
2957   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2958 };
2959 } // namespace
2960 
2961 /// -------------------AAReachability Attribute--------------------------
2962 
2963 namespace {
2964 struct AAReachabilityImpl : AAReachability {
2965   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2966       : AAReachability(IRP, A) {}
2967 
2968   const std::string getAsStr() const override {
2969     // TODO: Return the number of reachable queries.
2970     return "reachable";
2971   }
2972 
2973   /// See AbstractAttribute::updateImpl(...).
2974   ChangeStatus updateImpl(Attributor &A) override {
2975     return ChangeStatus::UNCHANGED;
2976   }
2977 };
2978 
2979 struct AAReachabilityFunction final : public AAReachabilityImpl {
2980   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2981       : AAReachabilityImpl(IRP, A) {}
2982 
2983   /// See AbstractAttribute::trackStatistics()
2984   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2985 };
2986 } // namespace
2987 
2988 /// ------------------------ NoAlias Argument Attribute ------------------------
2989 
2990 namespace {
2991 struct AANoAliasImpl : AANoAlias {
2992   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2993     assert(getAssociatedType()->isPointerTy() &&
2994            "Noalias is a pointer attribute");
2995   }
2996 
2997   const std::string getAsStr() const override {
2998     return getAssumed() ? "noalias" : "may-alias";
2999   }
3000 };
3001 
3002 /// NoAlias attribute for a floating value.
3003 struct AANoAliasFloating final : AANoAliasImpl {
3004   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
3005       : AANoAliasImpl(IRP, A) {}
3006 
3007   /// See AbstractAttribute::initialize(...).
3008   void initialize(Attributor &A) override {
3009     AANoAliasImpl::initialize(A);
3010     Value *Val = &getAssociatedValue();
3011     do {
3012       CastInst *CI = dyn_cast<CastInst>(Val);
3013       if (!CI)
3014         break;
3015       Value *Base = CI->getOperand(0);
3016       if (!Base->hasOneUse())
3017         break;
3018       Val = Base;
3019     } while (true);
3020 
3021     if (!Val->getType()->isPointerTy()) {
3022       indicatePessimisticFixpoint();
3023       return;
3024     }
3025 
3026     if (isa<AllocaInst>(Val))
3027       indicateOptimisticFixpoint();
3028     else if (isa<ConstantPointerNull>(Val) &&
3029              !NullPointerIsDefined(getAnchorScope(),
3030                                    Val->getType()->getPointerAddressSpace()))
3031       indicateOptimisticFixpoint();
3032     else if (Val != &getAssociatedValue()) {
3033       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
3034           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
3035       if (ValNoAliasAA.isKnownNoAlias())
3036         indicateOptimisticFixpoint();
3037     }
3038   }
3039 
3040   /// See AbstractAttribute::updateImpl(...).
3041   ChangeStatus updateImpl(Attributor &A) override {
3042     // TODO: Implement this.
3043     return indicatePessimisticFixpoint();
3044   }
3045 
3046   /// See AbstractAttribute::trackStatistics()
3047   void trackStatistics() const override {
3048     STATS_DECLTRACK_FLOATING_ATTR(noalias)
3049   }
3050 };
3051 
3052 /// NoAlias attribute for an argument.
3053 struct AANoAliasArgument final
3054     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3055   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3056   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3057 
3058   /// See AbstractAttribute::initialize(...).
3059   void initialize(Attributor &A) override {
3060     Base::initialize(A);
3061     // See callsite argument attribute and callee argument attribute.
3062     if (hasAttr({Attribute::ByVal}))
3063       indicateOptimisticFixpoint();
3064   }
3065 
3066   /// See AbstractAttribute::update(...).
3067   ChangeStatus updateImpl(Attributor &A) override {
3068     // We have to make sure no-alias on the argument does not break
3069     // synchronization when this is a callback argument, see also [1] below.
3070     // If synchronization cannot be affected, we delegate to the base updateImpl
3071     // function, otherwise we give up for now.
3072 
3073     // If the function is no-sync, no-alias cannot break synchronization.
3074     const auto &NoSyncAA =
3075         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3076                              DepClassTy::OPTIONAL);
3077     if (NoSyncAA.isAssumedNoSync())
3078       return Base::updateImpl(A);
3079 
3080     // If the argument is read-only, no-alias cannot break synchronization.
3081     bool IsKnown;
3082     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3083       return Base::updateImpl(A);
3084 
3085     // If the argument is never passed through callbacks, no-alias cannot break
3086     // synchronization.
3087     bool UsedAssumedInformation = false;
3088     if (A.checkForAllCallSites(
3089             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3090             true, UsedAssumedInformation))
3091       return Base::updateImpl(A);
3092 
3093     // TODO: add no-alias but make sure it doesn't break synchronization by
3094     // introducing fake uses. See:
3095     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3096     //     International Workshop on OpenMP 2018,
3097     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3098 
3099     return indicatePessimisticFixpoint();
3100   }
3101 
3102   /// See AbstractAttribute::trackStatistics()
3103   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3104 };
3105 
3106 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3107   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3108       : AANoAliasImpl(IRP, A) {}
3109 
3110   /// See AbstractAttribute::initialize(...).
3111   void initialize(Attributor &A) override {
3112     // See callsite argument attribute and callee argument attribute.
3113     const auto &CB = cast<CallBase>(getAnchorValue());
3114     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3115       indicateOptimisticFixpoint();
3116     Value &Val = getAssociatedValue();
3117     if (isa<ConstantPointerNull>(Val) &&
3118         !NullPointerIsDefined(getAnchorScope(),
3119                               Val.getType()->getPointerAddressSpace()))
3120       indicateOptimisticFixpoint();
3121   }
3122 
3123   /// Determine if the underlying value may alias with the call site argument
3124   /// \p OtherArgNo of \p ICS (= the underlying call site).
3125   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3126                             const AAMemoryBehavior &MemBehaviorAA,
3127                             const CallBase &CB, unsigned OtherArgNo) {
3128     // We do not need to worry about aliasing with the underlying IRP.
3129     if (this->getCalleeArgNo() == (int)OtherArgNo)
3130       return false;
3131 
3132     // If it is not a pointer or pointer vector we do not alias.
3133     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3134     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3135       return false;
3136 
3137     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3138         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3139 
3140     // If the argument is readnone, there is no read-write aliasing.
3141     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3142       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3143       return false;
3144     }
3145 
3146     // If the argument is readonly and the underlying value is readonly, there
3147     // is no read-write aliasing.
3148     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3149     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3150       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3151       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3152       return false;
3153     }
3154 
3155     // We have to utilize actual alias analysis queries so we need the object.
3156     if (!AAR)
3157       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3158 
3159     // Try to rule it out at the call site.
3160     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3161     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3162                          "callsite arguments: "
3163                       << getAssociatedValue() << " " << *ArgOp << " => "
3164                       << (IsAliasing ? "" : "no-") << "alias \n");
3165 
3166     return IsAliasing;
3167   }
3168 
3169   bool
3170   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3171                                          const AAMemoryBehavior &MemBehaviorAA,
3172                                          const AANoAlias &NoAliasAA) {
3173     // We can deduce "noalias" if the following conditions hold.
3174     // (i)   Associated value is assumed to be noalias in the definition.
3175     // (ii)  Associated value is assumed to be no-capture in all the uses
3176     //       possibly executed before this callsite.
3177     // (iii) There is no other pointer argument which could alias with the
3178     //       value.
3179 
3180     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3181     if (!AssociatedValueIsNoAliasAtDef) {
3182       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3183                         << " is not no-alias at the definition\n");
3184       return false;
3185     }
3186 
3187     auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
3188       const auto &DerefAA = A.getAAFor<AADereferenceable>(
3189           *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
3190       return DerefAA.getAssumedDereferenceableBytes();
3191     };
3192 
3193     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3194 
3195     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3196     const Function *ScopeFn = VIRP.getAnchorScope();
3197     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3198     // Check whether the value is captured in the scope using AANoCapture.
3199     // Look at CFG and check only uses possibly executed before this
3200     // callsite.
3201     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3202       Instruction *UserI = cast<Instruction>(U.getUser());
3203 
3204       // If UserI is the curr instruction and there is a single potential use of
3205       // the value in UserI we allow the use.
3206       // TODO: We should inspect the operands and allow those that cannot alias
3207       //       with the value.
3208       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3209         return true;
3210 
3211       if (ScopeFn) {
3212         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3213           if (CB->isArgOperand(&U)) {
3214 
3215             unsigned ArgNo = CB->getArgOperandNo(&U);
3216 
3217             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3218                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3219                 DepClassTy::OPTIONAL);
3220 
3221             if (NoCaptureAA.isAssumedNoCapture())
3222               return true;
3223           }
3224         }
3225 
3226         if (!AA::isPotentiallyReachable(A, *UserI, *getCtxI(), *this))
3227           return true;
3228       }
3229 
3230       // TODO: We should track the capturing uses in AANoCapture but the problem
3231       //       is CGSCC runs. For those we would need to "allow" AANoCapture for
3232       //       a value in the module slice.
3233       switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
3234       case UseCaptureKind::NO_CAPTURE:
3235         return true;
3236       case UseCaptureKind::MAY_CAPTURE:
3237         LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI
3238                           << "\n");
3239         return false;
3240       case UseCaptureKind::PASSTHROUGH:
3241         Follow = true;
3242         return true;
3243       }
3244       llvm_unreachable("unknown UseCaptureKind");
3245     };
3246 
3247     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3248       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3249         LLVM_DEBUG(
3250             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3251                    << " cannot be noalias as it is potentially captured\n");
3252         return false;
3253       }
3254     }
3255     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3256 
3257     // Check there is no other pointer argument which could alias with the
3258     // value passed at this call site.
3259     // TODO: AbstractCallSite
3260     const auto &CB = cast<CallBase>(getAnchorValue());
3261     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3262       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3263         return false;
3264 
3265     return true;
3266   }
3267 
3268   /// See AbstractAttribute::updateImpl(...).
3269   ChangeStatus updateImpl(Attributor &A) override {
3270     // If the argument is readnone we are done as there are no accesses via the
3271     // argument.
3272     auto &MemBehaviorAA =
3273         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3274     if (MemBehaviorAA.isAssumedReadNone()) {
3275       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3276       return ChangeStatus::UNCHANGED;
3277     }
3278 
3279     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3280     const auto &NoAliasAA =
3281         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3282 
3283     AAResults *AAR = nullptr;
3284     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3285                                                NoAliasAA)) {
3286       LLVM_DEBUG(
3287           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3288       return ChangeStatus::UNCHANGED;
3289     }
3290 
3291     return indicatePessimisticFixpoint();
3292   }
3293 
3294   /// See AbstractAttribute::trackStatistics()
3295   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3296 };
3297 
3298 /// NoAlias attribute for function return value.
3299 struct AANoAliasReturned final : AANoAliasImpl {
3300   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3301       : AANoAliasImpl(IRP, A) {}
3302 
3303   /// See AbstractAttribute::initialize(...).
3304   void initialize(Attributor &A) override {
3305     AANoAliasImpl::initialize(A);
3306     Function *F = getAssociatedFunction();
3307     if (!F || F->isDeclaration())
3308       indicatePessimisticFixpoint();
3309   }
3310 
3311   /// See AbstractAttribute::updateImpl(...).
3312   virtual ChangeStatus updateImpl(Attributor &A) override {
3313 
3314     auto CheckReturnValue = [&](Value &RV) -> bool {
3315       if (Constant *C = dyn_cast<Constant>(&RV))
3316         if (C->isNullValue() || isa<UndefValue>(C))
3317           return true;
3318 
3319       /// For now, we can only deduce noalias if we have call sites.
3320       /// FIXME: add more support.
3321       if (!isa<CallBase>(&RV))
3322         return false;
3323 
3324       const IRPosition &RVPos = IRPosition::value(RV);
3325       const auto &NoAliasAA =
3326           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3327       if (!NoAliasAA.isAssumedNoAlias())
3328         return false;
3329 
3330       const auto &NoCaptureAA =
3331           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3332       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3333     };
3334 
3335     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3336       return indicatePessimisticFixpoint();
3337 
3338     return ChangeStatus::UNCHANGED;
3339   }
3340 
3341   /// See AbstractAttribute::trackStatistics()
3342   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3343 };
3344 
3345 /// NoAlias attribute deduction for a call site return value.
3346 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3347   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3348       : AANoAliasImpl(IRP, A) {}
3349 
3350   /// See AbstractAttribute::initialize(...).
3351   void initialize(Attributor &A) override {
3352     AANoAliasImpl::initialize(A);
3353     Function *F = getAssociatedFunction();
3354     if (!F || F->isDeclaration())
3355       indicatePessimisticFixpoint();
3356   }
3357 
3358   /// See AbstractAttribute::updateImpl(...).
3359   ChangeStatus updateImpl(Attributor &A) override {
3360     // TODO: Once we have call site specific value information we can provide
3361     //       call site specific liveness information and then it makes
3362     //       sense to specialize attributes for call sites arguments instead of
3363     //       redirecting requests to the callee argument.
3364     Function *F = getAssociatedFunction();
3365     const IRPosition &FnPos = IRPosition::returned(*F);
3366     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3367     return clampStateAndIndicateChange(getState(), FnAA.getState());
3368   }
3369 
3370   /// See AbstractAttribute::trackStatistics()
3371   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3372 };
3373 } // namespace
3374 
3375 /// -------------------AAIsDead Function Attribute-----------------------
3376 
3377 namespace {
3378 struct AAIsDeadValueImpl : public AAIsDead {
3379   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3380 
3381   /// See AbstractAttribute::initialize(...).
3382   void initialize(Attributor &A) override {
3383     if (auto *Scope = getAnchorScope())
3384       if (!A.isRunOn(*Scope))
3385         indicatePessimisticFixpoint();
3386   }
3387 
3388   /// See AAIsDead::isAssumedDead().
3389   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3390 
3391   /// See AAIsDead::isKnownDead().
3392   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3393 
3394   /// See AAIsDead::isAssumedDead(BasicBlock *).
3395   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3396 
3397   /// See AAIsDead::isKnownDead(BasicBlock *).
3398   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3399 
3400   /// See AAIsDead::isAssumedDead(Instruction *I).
3401   bool isAssumedDead(const Instruction *I) const override {
3402     return I == getCtxI() && isAssumedDead();
3403   }
3404 
3405   /// See AAIsDead::isKnownDead(Instruction *I).
3406   bool isKnownDead(const Instruction *I) const override {
3407     return isAssumedDead(I) && isKnownDead();
3408   }
3409 
3410   /// See AbstractAttribute::getAsStr().
3411   virtual const std::string getAsStr() const override {
3412     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3413   }
3414 
3415   /// Check if all uses are assumed dead.
3416   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3417     // Callers might not check the type, void has no uses.
3418     if (V.getType()->isVoidTy() || V.use_empty())
3419       return true;
3420 
3421     // If we replace a value with a constant there are no uses left afterwards.
3422     if (!isa<Constant>(V)) {
3423       if (auto *I = dyn_cast<Instruction>(&V))
3424         if (!A.isRunOn(*I->getFunction()))
3425           return false;
3426       bool UsedAssumedInformation = false;
3427       Optional<Constant *> C =
3428           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3429       if (!C || *C)
3430         return true;
3431     }
3432 
3433     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3434     // Explicitly set the dependence class to required because we want a long
3435     // chain of N dependent instructions to be considered live as soon as one is
3436     // without going through N update cycles. This is not required for
3437     // correctness.
3438     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3439                              DepClassTy::REQUIRED,
3440                              /* IgnoreDroppableUses */ false);
3441   }
3442 
3443   /// Determine if \p I is assumed to be side-effect free.
3444   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3445     if (!I || wouldInstructionBeTriviallyDead(I))
3446       return true;
3447 
3448     auto *CB = dyn_cast<CallBase>(I);
3449     if (!CB || isa<IntrinsicInst>(CB))
3450       return false;
3451 
3452     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3453     const auto &NoUnwindAA =
3454         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3455     if (!NoUnwindAA.isAssumedNoUnwind())
3456       return false;
3457     if (!NoUnwindAA.isKnownNoUnwind())
3458       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3459 
3460     bool IsKnown;
3461     return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
3462   }
3463 };
3464 
3465 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3466   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3467       : AAIsDeadValueImpl(IRP, A) {}
3468 
3469   /// See AbstractAttribute::initialize(...).
3470   void initialize(Attributor &A) override {
3471     AAIsDeadValueImpl::initialize(A);
3472 
3473     if (isa<UndefValue>(getAssociatedValue())) {
3474       indicatePessimisticFixpoint();
3475       return;
3476     }
3477 
3478     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3479     if (!isAssumedSideEffectFree(A, I)) {
3480       if (!isa_and_nonnull<StoreInst>(I))
3481         indicatePessimisticFixpoint();
3482       else
3483         removeAssumedBits(HAS_NO_EFFECT);
3484     }
3485   }
3486 
3487   bool isDeadStore(Attributor &A, StoreInst &SI) {
3488     // Lang ref now states volatile store is not UB/dead, let's skip them.
3489     if (SI.isVolatile())
3490       return false;
3491 
3492     bool UsedAssumedInformation = false;
3493     SmallSetVector<Value *, 4> PotentialCopies;
3494     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3495                                              UsedAssumedInformation))
3496       return false;
3497     return llvm::all_of(PotentialCopies, [&](Value *V) {
3498       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3499                              UsedAssumedInformation);
3500     });
3501   }
3502 
3503   /// See AbstractAttribute::getAsStr().
3504   const std::string getAsStr() const override {
3505     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3506     if (isa_and_nonnull<StoreInst>(I))
3507       if (isValidState())
3508         return "assumed-dead-store";
3509     return AAIsDeadValueImpl::getAsStr();
3510   }
3511 
3512   /// See AbstractAttribute::updateImpl(...).
3513   ChangeStatus updateImpl(Attributor &A) override {
3514     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3515     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3516       if (!isDeadStore(A, *SI))
3517         return indicatePessimisticFixpoint();
3518     } else {
3519       if (!isAssumedSideEffectFree(A, I))
3520         return indicatePessimisticFixpoint();
3521       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3522         return indicatePessimisticFixpoint();
3523     }
3524     return ChangeStatus::UNCHANGED;
3525   }
3526 
3527   bool isRemovableStore() const override {
3528     return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
3529   }
3530 
3531   /// See AbstractAttribute::manifest(...).
3532   ChangeStatus manifest(Attributor &A) override {
3533     Value &V = getAssociatedValue();
3534     if (auto *I = dyn_cast<Instruction>(&V)) {
3535       // If we get here we basically know the users are all dead. We check if
3536       // isAssumedSideEffectFree returns true here again because it might not be
3537       // the case and only the users are dead but the instruction (=call) is
3538       // still needed.
3539       if (isa<StoreInst>(I) ||
3540           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3541         A.deleteAfterManifest(*I);
3542         return ChangeStatus::CHANGED;
3543       }
3544     }
3545     return ChangeStatus::UNCHANGED;
3546   }
3547 
3548   /// See AbstractAttribute::trackStatistics()
3549   void trackStatistics() const override {
3550     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3551   }
3552 };
3553 
3554 struct AAIsDeadArgument : public AAIsDeadFloating {
3555   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3556       : AAIsDeadFloating(IRP, A) {}
3557 
3558   /// See AbstractAttribute::initialize(...).
3559   void initialize(Attributor &A) override {
3560     AAIsDeadFloating::initialize(A);
3561     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3562       indicatePessimisticFixpoint();
3563   }
3564 
3565   /// See AbstractAttribute::manifest(...).
3566   ChangeStatus manifest(Attributor &A) override {
3567     Argument &Arg = *getAssociatedArgument();
3568     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3569       if (A.registerFunctionSignatureRewrite(
3570               Arg, /* ReplacementTypes */ {},
3571               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3572               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3573         return ChangeStatus::CHANGED;
3574       }
3575     return ChangeStatus::UNCHANGED;
3576   }
3577 
3578   /// See AbstractAttribute::trackStatistics()
3579   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3580 };
3581 
3582 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3583   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3584       : AAIsDeadValueImpl(IRP, A) {}
3585 
3586   /// See AbstractAttribute::initialize(...).
3587   void initialize(Attributor &A) override {
3588     AAIsDeadValueImpl::initialize(A);
3589     if (isa<UndefValue>(getAssociatedValue()))
3590       indicatePessimisticFixpoint();
3591   }
3592 
3593   /// See AbstractAttribute::updateImpl(...).
3594   ChangeStatus updateImpl(Attributor &A) override {
3595     // TODO: Once we have call site specific value information we can provide
3596     //       call site specific liveness information and then it makes
3597     //       sense to specialize attributes for call sites arguments instead of
3598     //       redirecting requests to the callee argument.
3599     Argument *Arg = getAssociatedArgument();
3600     if (!Arg)
3601       return indicatePessimisticFixpoint();
3602     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3603     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3604     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3605   }
3606 
3607   /// See AbstractAttribute::manifest(...).
3608   ChangeStatus manifest(Attributor &A) override {
3609     CallBase &CB = cast<CallBase>(getAnchorValue());
3610     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3611     assert(!isa<UndefValue>(U.get()) &&
3612            "Expected undef values to be filtered out!");
3613     UndefValue &UV = *UndefValue::get(U->getType());
3614     if (A.changeUseAfterManifest(U, UV))
3615       return ChangeStatus::CHANGED;
3616     return ChangeStatus::UNCHANGED;
3617   }
3618 
3619   /// See AbstractAttribute::trackStatistics()
3620   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3621 };
3622 
3623 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3624   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3625       : AAIsDeadFloating(IRP, A) {}
3626 
3627   /// See AAIsDead::isAssumedDead().
3628   bool isAssumedDead() const override {
3629     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3630   }
3631 
3632   /// See AbstractAttribute::initialize(...).
3633   void initialize(Attributor &A) override {
3634     AAIsDeadFloating::initialize(A);
3635     if (isa<UndefValue>(getAssociatedValue())) {
3636       indicatePessimisticFixpoint();
3637       return;
3638     }
3639 
3640     // We track this separately as a secondary state.
3641     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3642   }
3643 
3644   /// See AbstractAttribute::updateImpl(...).
3645   ChangeStatus updateImpl(Attributor &A) override {
3646     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3647     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3648       IsAssumedSideEffectFree = false;
3649       Changed = ChangeStatus::CHANGED;
3650     }
3651     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3652       return indicatePessimisticFixpoint();
3653     return Changed;
3654   }
3655 
3656   /// See AbstractAttribute::trackStatistics()
3657   void trackStatistics() const override {
3658     if (IsAssumedSideEffectFree)
3659       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3660     else
3661       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3662   }
3663 
3664   /// See AbstractAttribute::getAsStr().
3665   const std::string getAsStr() const override {
3666     return isAssumedDead()
3667                ? "assumed-dead"
3668                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3669   }
3670 
3671 private:
3672   bool IsAssumedSideEffectFree = true;
3673 };
3674 
3675 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3676   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3677       : AAIsDeadValueImpl(IRP, A) {}
3678 
3679   /// See AbstractAttribute::updateImpl(...).
3680   ChangeStatus updateImpl(Attributor &A) override {
3681 
3682     bool UsedAssumedInformation = false;
3683     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3684                               {Instruction::Ret}, UsedAssumedInformation);
3685 
3686     auto PredForCallSite = [&](AbstractCallSite ACS) {
3687       if (ACS.isCallbackCall() || !ACS.getInstruction())
3688         return false;
3689       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3690     };
3691 
3692     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3693                                 UsedAssumedInformation))
3694       return indicatePessimisticFixpoint();
3695 
3696     return ChangeStatus::UNCHANGED;
3697   }
3698 
3699   /// See AbstractAttribute::manifest(...).
3700   ChangeStatus manifest(Attributor &A) override {
3701     // TODO: Rewrite the signature to return void?
3702     bool AnyChange = false;
3703     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3704     auto RetInstPred = [&](Instruction &I) {
3705       ReturnInst &RI = cast<ReturnInst>(I);
3706       if (!isa<UndefValue>(RI.getReturnValue()))
3707         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3708       return true;
3709     };
3710     bool UsedAssumedInformation = false;
3711     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3712                               UsedAssumedInformation);
3713     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3714   }
3715 
3716   /// See AbstractAttribute::trackStatistics()
3717   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3718 };
3719 
3720 struct AAIsDeadFunction : public AAIsDead {
3721   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3722 
3723   /// See AbstractAttribute::initialize(...).
3724   void initialize(Attributor &A) override {
3725     Function *F = getAnchorScope();
3726     if (!F || F->isDeclaration() || !A.isRunOn(*F)) {
3727       indicatePessimisticFixpoint();
3728       return;
3729     }
3730     ToBeExploredFrom.insert(&F->getEntryBlock().front());
3731     assumeLive(A, F->getEntryBlock());
3732   }
3733 
3734   /// See AbstractAttribute::getAsStr().
3735   const std::string getAsStr() const override {
3736     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3737            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3738            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3739            std::to_string(KnownDeadEnds.size()) + "]";
3740   }
3741 
3742   /// See AbstractAttribute::manifest(...).
3743   ChangeStatus manifest(Attributor &A) override {
3744     assert(getState().isValidState() &&
3745            "Attempted to manifest an invalid state!");
3746 
3747     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3748     Function &F = *getAnchorScope();
3749 
3750     if (AssumedLiveBlocks.empty()) {
3751       A.deleteAfterManifest(F);
3752       return ChangeStatus::CHANGED;
3753     }
3754 
3755     // Flag to determine if we can change an invoke to a call assuming the
3756     // callee is nounwind. This is not possible if the personality of the
3757     // function allows to catch asynchronous exceptions.
3758     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3759 
3760     KnownDeadEnds.set_union(ToBeExploredFrom);
3761     for (const Instruction *DeadEndI : KnownDeadEnds) {
3762       auto *CB = dyn_cast<CallBase>(DeadEndI);
3763       if (!CB)
3764         continue;
3765       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3766           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3767       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3768       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3769         continue;
3770 
3771       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3772         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3773       else
3774         A.changeToUnreachableAfterManifest(
3775             const_cast<Instruction *>(DeadEndI->getNextNode()));
3776       HasChanged = ChangeStatus::CHANGED;
3777     }
3778 
3779     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3780     for (BasicBlock &BB : F)
3781       if (!AssumedLiveBlocks.count(&BB)) {
3782         A.deleteAfterManifest(BB);
3783         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3784         HasChanged = ChangeStatus::CHANGED;
3785       }
3786 
3787     return HasChanged;
3788   }
3789 
3790   /// See AbstractAttribute::updateImpl(...).
3791   ChangeStatus updateImpl(Attributor &A) override;
3792 
3793   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3794     assert(From->getParent() == getAnchorScope() &&
3795            To->getParent() == getAnchorScope() &&
3796            "Used AAIsDead of the wrong function");
3797     return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
3798   }
3799 
3800   /// See AbstractAttribute::trackStatistics()
3801   void trackStatistics() const override {}
3802 
3803   /// Returns true if the function is assumed dead.
3804   bool isAssumedDead() const override { return false; }
3805 
3806   /// See AAIsDead::isKnownDead().
3807   bool isKnownDead() const override { return false; }
3808 
3809   /// See AAIsDead::isAssumedDead(BasicBlock *).
3810   bool isAssumedDead(const BasicBlock *BB) const override {
3811     assert(BB->getParent() == getAnchorScope() &&
3812            "BB must be in the same anchor scope function.");
3813 
3814     if (!getAssumed())
3815       return false;
3816     return !AssumedLiveBlocks.count(BB);
3817   }
3818 
3819   /// See AAIsDead::isKnownDead(BasicBlock *).
3820   bool isKnownDead(const BasicBlock *BB) const override {
3821     return getKnown() && isAssumedDead(BB);
3822   }
3823 
3824   /// See AAIsDead::isAssumed(Instruction *I).
3825   bool isAssumedDead(const Instruction *I) const override {
3826     assert(I->getParent()->getParent() == getAnchorScope() &&
3827            "Instruction must be in the same anchor scope function.");
3828 
3829     if (!getAssumed())
3830       return false;
3831 
3832     // If it is not in AssumedLiveBlocks then it for sure dead.
3833     // Otherwise, it can still be after noreturn call in a live block.
3834     if (!AssumedLiveBlocks.count(I->getParent()))
3835       return true;
3836 
3837     // If it is not after a liveness barrier it is live.
3838     const Instruction *PrevI = I->getPrevNode();
3839     while (PrevI) {
3840       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3841         return true;
3842       PrevI = PrevI->getPrevNode();
3843     }
3844     return false;
3845   }
3846 
3847   /// See AAIsDead::isKnownDead(Instruction *I).
3848   bool isKnownDead(const Instruction *I) const override {
3849     return getKnown() && isAssumedDead(I);
3850   }
3851 
3852   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3853   /// that internal function called from \p BB should now be looked at.
3854   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3855     if (!AssumedLiveBlocks.insert(&BB).second)
3856       return false;
3857 
3858     // We assume that all of BB is (probably) live now and if there are calls to
3859     // internal functions we will assume that those are now live as well. This
3860     // is a performance optimization for blocks with calls to a lot of internal
3861     // functions. It can however cause dead functions to be treated as live.
3862     for (const Instruction &I : BB)
3863       if (const auto *CB = dyn_cast<CallBase>(&I))
3864         if (const Function *F = CB->getCalledFunction())
3865           if (F->hasLocalLinkage())
3866             A.markLiveInternalFunction(*F);
3867     return true;
3868   }
3869 
3870   /// Collection of instructions that need to be explored again, e.g., we
3871   /// did assume they do not transfer control to (one of their) successors.
3872   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3873 
3874   /// Collection of instructions that are known to not transfer control.
3875   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3876 
3877   /// Collection of all assumed live edges
3878   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3879 
3880   /// Collection of all assumed live BasicBlocks.
3881   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3882 };
3883 
3884 static bool
3885 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3886                         AbstractAttribute &AA,
3887                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3888   const IRPosition &IPos = IRPosition::callsite_function(CB);
3889 
3890   const auto &NoReturnAA =
3891       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3892   if (NoReturnAA.isAssumedNoReturn())
3893     return !NoReturnAA.isKnownNoReturn();
3894   if (CB.isTerminator())
3895     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3896   else
3897     AliveSuccessors.push_back(CB.getNextNode());
3898   return false;
3899 }
3900 
3901 static bool
3902 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3903                         AbstractAttribute &AA,
3904                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3905   bool UsedAssumedInformation =
3906       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3907 
3908   // First, determine if we can change an invoke to a call assuming the
3909   // callee is nounwind. This is not possible if the personality of the
3910   // function allows to catch asynchronous exceptions.
3911   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3912     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3913   } else {
3914     const IRPosition &IPos = IRPosition::callsite_function(II);
3915     const auto &AANoUnw =
3916         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3917     if (AANoUnw.isAssumedNoUnwind()) {
3918       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3919     } else {
3920       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3921     }
3922   }
3923   return UsedAssumedInformation;
3924 }
3925 
3926 static bool
3927 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3928                         AbstractAttribute &AA,
3929                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3930   bool UsedAssumedInformation = false;
3931   if (BI.getNumSuccessors() == 1) {
3932     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3933   } else {
3934     Optional<Constant *> C =
3935         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3936     if (!C || isa_and_nonnull<UndefValue>(*C)) {
3937       // No value yet, assume both edges are dead.
3938     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3939       const BasicBlock *SuccBB =
3940           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3941       AliveSuccessors.push_back(&SuccBB->front());
3942     } else {
3943       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3944       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3945       UsedAssumedInformation = false;
3946     }
3947   }
3948   return UsedAssumedInformation;
3949 }
3950 
3951 static bool
3952 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3953                         AbstractAttribute &AA,
3954                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3955   bool UsedAssumedInformation = false;
3956   Optional<Constant *> C =
3957       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3958   if (!C || isa_and_nonnull<UndefValue>(C.value())) {
3959     // No value yet, assume all edges are dead.
3960   } else if (isa_and_nonnull<ConstantInt>(C.value())) {
3961     for (auto &CaseIt : SI.cases()) {
3962       if (CaseIt.getCaseValue() == C.value()) {
3963         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3964         return UsedAssumedInformation;
3965       }
3966     }
3967     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3968     return UsedAssumedInformation;
3969   } else {
3970     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3971       AliveSuccessors.push_back(&SuccBB->front());
3972   }
3973   return UsedAssumedInformation;
3974 }
3975 
3976 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3977   ChangeStatus Change = ChangeStatus::UNCHANGED;
3978 
3979   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3980                     << getAnchorScope()->size() << "] BBs and "
3981                     << ToBeExploredFrom.size() << " exploration points and "
3982                     << KnownDeadEnds.size() << " known dead ends\n");
3983 
3984   // Copy and clear the list of instructions we need to explore from. It is
3985   // refilled with instructions the next update has to look at.
3986   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3987                                                ToBeExploredFrom.end());
3988   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3989 
3990   SmallVector<const Instruction *, 8> AliveSuccessors;
3991   while (!Worklist.empty()) {
3992     const Instruction *I = Worklist.pop_back_val();
3993     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3994 
3995     // Fast forward for uninteresting instructions. We could look for UB here
3996     // though.
3997     while (!I->isTerminator() && !isa<CallBase>(I))
3998       I = I->getNextNode();
3999 
4000     AliveSuccessors.clear();
4001 
4002     bool UsedAssumedInformation = false;
4003     switch (I->getOpcode()) {
4004     // TODO: look for (assumed) UB to backwards propagate "deadness".
4005     default:
4006       assert(I->isTerminator() &&
4007              "Expected non-terminators to be handled already!");
4008       for (const BasicBlock *SuccBB : successors(I->getParent()))
4009         AliveSuccessors.push_back(&SuccBB->front());
4010       break;
4011     case Instruction::Call:
4012       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
4013                                                        *this, AliveSuccessors);
4014       break;
4015     case Instruction::Invoke:
4016       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
4017                                                        *this, AliveSuccessors);
4018       break;
4019     case Instruction::Br:
4020       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
4021                                                        *this, AliveSuccessors);
4022       break;
4023     case Instruction::Switch:
4024       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4025                                                        *this, AliveSuccessors);
4026       break;
4027     }
4028 
4029     if (UsedAssumedInformation) {
4030       NewToBeExploredFrom.insert(I);
4031     } else if (AliveSuccessors.empty() ||
4032                (I->isTerminator() &&
4033                 AliveSuccessors.size() < I->getNumSuccessors())) {
4034       if (KnownDeadEnds.insert(I))
4035         Change = ChangeStatus::CHANGED;
4036     }
4037 
4038     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4039                       << AliveSuccessors.size() << " UsedAssumedInformation: "
4040                       << UsedAssumedInformation << "\n");
4041 
4042     for (const Instruction *AliveSuccessor : AliveSuccessors) {
4043       if (!I->isTerminator()) {
4044         assert(AliveSuccessors.size() == 1 &&
4045                "Non-terminator expected to have a single successor!");
4046         Worklist.push_back(AliveSuccessor);
4047       } else {
4048         // record the assumed live edge
4049         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4050         if (AssumedLiveEdges.insert(Edge).second)
4051           Change = ChangeStatus::CHANGED;
4052         if (assumeLive(A, *AliveSuccessor->getParent()))
4053           Worklist.push_back(AliveSuccessor);
4054       }
4055     }
4056   }
4057 
4058   // Check if the content of ToBeExploredFrom changed, ignore the order.
4059   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4060       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4061         return !ToBeExploredFrom.count(I);
4062       })) {
4063     Change = ChangeStatus::CHANGED;
4064     ToBeExploredFrom = std::move(NewToBeExploredFrom);
4065   }
4066 
4067   // If we know everything is live there is no need to query for liveness.
4068   // Instead, indicating a pessimistic fixpoint will cause the state to be
4069   // "invalid" and all queries to be answered conservatively without lookups.
4070   // To be in this state we have to (1) finished the exploration and (3) not
4071   // discovered any non-trivial dead end and (2) not ruled unreachable code
4072   // dead.
4073   if (ToBeExploredFrom.empty() &&
4074       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4075       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4076         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4077       }))
4078     return indicatePessimisticFixpoint();
4079   return Change;
4080 }
4081 
4082 /// Liveness information for a call sites.
4083 struct AAIsDeadCallSite final : AAIsDeadFunction {
4084   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4085       : AAIsDeadFunction(IRP, A) {}
4086 
4087   /// See AbstractAttribute::initialize(...).
4088   void initialize(Attributor &A) override {
4089     // TODO: Once we have call site specific value information we can provide
4090     //       call site specific liveness information and then it makes
4091     //       sense to specialize attributes for call sites instead of
4092     //       redirecting requests to the callee.
4093     llvm_unreachable("Abstract attributes for liveness are not "
4094                      "supported for call sites yet!");
4095   }
4096 
4097   /// See AbstractAttribute::updateImpl(...).
4098   ChangeStatus updateImpl(Attributor &A) override {
4099     return indicatePessimisticFixpoint();
4100   }
4101 
4102   /// See AbstractAttribute::trackStatistics()
4103   void trackStatistics() const override {}
4104 };
4105 } // namespace
4106 
4107 /// -------------------- Dereferenceable Argument Attribute --------------------
4108 
4109 namespace {
4110 struct AADereferenceableImpl : AADereferenceable {
4111   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4112       : AADereferenceable(IRP, A) {}
4113   using StateType = DerefState;
4114 
4115   /// See AbstractAttribute::initialize(...).
4116   void initialize(Attributor &A) override {
4117     Value &V = *getAssociatedValue().stripPointerCasts();
4118     SmallVector<Attribute, 4> Attrs;
4119     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4120              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4121     for (const Attribute &Attr : Attrs)
4122       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4123 
4124     const IRPosition &IRP = this->getIRPosition();
4125     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4126 
4127     bool CanBeNull, CanBeFreed;
4128     takeKnownDerefBytesMaximum(V.getPointerDereferenceableBytes(
4129         A.getDataLayout(), CanBeNull, CanBeFreed));
4130 
4131     bool IsFnInterface = IRP.isFnInterfaceKind();
4132     Function *FnScope = IRP.getAnchorScope();
4133     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4134       indicatePessimisticFixpoint();
4135       return;
4136     }
4137 
4138     if (Instruction *CtxI = getCtxI())
4139       followUsesInMBEC(*this, A, getState(), *CtxI);
4140   }
4141 
4142   /// See AbstractAttribute::getState()
4143   /// {
4144   StateType &getState() override { return *this; }
4145   const StateType &getState() const override { return *this; }
4146   /// }
4147 
4148   /// Helper function for collecting accessed bytes in must-be-executed-context
4149   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4150                               DerefState &State) {
4151     const Value *UseV = U->get();
4152     if (!UseV->getType()->isPointerTy())
4153       return;
4154 
4155     Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4156     if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4157       return;
4158 
4159     int64_t Offset;
4160     const Value *Base = GetPointerBaseWithConstantOffset(
4161         Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4162     if (Base && Base == &getAssociatedValue())
4163       State.addAccessedBytes(Offset, Loc->Size.getValue());
4164   }
4165 
4166   /// See followUsesInMBEC
4167   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4168                        AADereferenceable::StateType &State) {
4169     bool IsNonNull = false;
4170     bool TrackUse = false;
4171     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4172         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4173     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4174                       << " for instruction " << *I << "\n");
4175 
4176     addAccessedBytesForUse(A, U, I, State);
4177     State.takeKnownDerefBytesMaximum(DerefBytes);
4178     return TrackUse;
4179   }
4180 
4181   /// See AbstractAttribute::manifest(...).
4182   ChangeStatus manifest(Attributor &A) override {
4183     ChangeStatus Change = AADereferenceable::manifest(A);
4184     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4185       removeAttrs({Attribute::DereferenceableOrNull});
4186       return ChangeStatus::CHANGED;
4187     }
4188     return Change;
4189   }
4190 
4191   void getDeducedAttributes(LLVMContext &Ctx,
4192                             SmallVectorImpl<Attribute> &Attrs) const override {
4193     // TODO: Add *_globally support
4194     if (isAssumedNonNull())
4195       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4196           Ctx, getAssumedDereferenceableBytes()));
4197     else
4198       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4199           Ctx, getAssumedDereferenceableBytes()));
4200   }
4201 
4202   /// See AbstractAttribute::getAsStr().
4203   const std::string getAsStr() const override {
4204     if (!getAssumedDereferenceableBytes())
4205       return "unknown-dereferenceable";
4206     return std::string("dereferenceable") +
4207            (isAssumedNonNull() ? "" : "_or_null") +
4208            (isAssumedGlobal() ? "_globally" : "") + "<" +
4209            std::to_string(getKnownDereferenceableBytes()) + "-" +
4210            std::to_string(getAssumedDereferenceableBytes()) + ">";
4211   }
4212 };
4213 
4214 /// Dereferenceable attribute for a floating value.
4215 struct AADereferenceableFloating : AADereferenceableImpl {
4216   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4217       : AADereferenceableImpl(IRP, A) {}
4218 
4219   /// See AbstractAttribute::updateImpl(...).
4220   ChangeStatus updateImpl(Attributor &A) override {
4221 
4222     bool Stripped;
4223     bool UsedAssumedInformation = false;
4224     SmallVector<AA::ValueAndContext> Values;
4225     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
4226                                       AA::AnyScope, UsedAssumedInformation)) {
4227       Values.push_back({getAssociatedValue(), getCtxI()});
4228       Stripped = false;
4229     } else {
4230       Stripped = Values.size() != 1 ||
4231                  Values.front().getValue() != &getAssociatedValue();
4232     }
4233 
4234     const DataLayout &DL = A.getDataLayout();
4235     DerefState T;
4236 
4237     auto VisitValueCB = [&](const Value &V) -> bool {
4238       unsigned IdxWidth =
4239           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4240       APInt Offset(IdxWidth, 0);
4241       const Value *Base = stripAndAccumulateOffsets(
4242           A, *this, &V, DL, Offset, /* GetMinOffset */ false,
4243           /* AllowNonInbounds */ true);
4244 
4245       const auto &AA = A.getAAFor<AADereferenceable>(
4246           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4247       int64_t DerefBytes = 0;
4248       if (!Stripped && this == &AA) {
4249         // Use IR information if we did not strip anything.
4250         // TODO: track globally.
4251         bool CanBeNull, CanBeFreed;
4252         DerefBytes =
4253             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4254         T.GlobalState.indicatePessimisticFixpoint();
4255       } else {
4256         const DerefState &DS = AA.getState();
4257         DerefBytes = DS.DerefBytesState.getAssumed();
4258         T.GlobalState &= DS.GlobalState;
4259       }
4260 
4261       // For now we do not try to "increase" dereferenceability due to negative
4262       // indices as we first have to come up with code to deal with loops and
4263       // for overflows of the dereferenceable bytes.
4264       int64_t OffsetSExt = Offset.getSExtValue();
4265       if (OffsetSExt < 0)
4266         OffsetSExt = 0;
4267 
4268       T.takeAssumedDerefBytesMinimum(
4269           std::max(int64_t(0), DerefBytes - OffsetSExt));
4270 
4271       if (this == &AA) {
4272         if (!Stripped) {
4273           // If nothing was stripped IR information is all we got.
4274           T.takeKnownDerefBytesMaximum(
4275               std::max(int64_t(0), DerefBytes - OffsetSExt));
4276           T.indicatePessimisticFixpoint();
4277         } else if (OffsetSExt > 0) {
4278           // If something was stripped but there is circular reasoning we look
4279           // for the offset. If it is positive we basically decrease the
4280           // dereferenceable bytes in a circluar loop now, which will simply
4281           // drive them down to the known value in a very slow way which we
4282           // can accelerate.
4283           T.indicatePessimisticFixpoint();
4284         }
4285       }
4286 
4287       return T.isValidState();
4288     };
4289 
4290     for (const auto &VAC : Values)
4291       if (!VisitValueCB(*VAC.getValue()))
4292         return indicatePessimisticFixpoint();
4293 
4294     return clampStateAndIndicateChange(getState(), T);
4295   }
4296 
4297   /// See AbstractAttribute::trackStatistics()
4298   void trackStatistics() const override {
4299     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4300   }
4301 };
4302 
4303 /// Dereferenceable attribute for a return value.
4304 struct AADereferenceableReturned final
4305     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4306   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4307       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4308             IRP, A) {}
4309 
4310   /// See AbstractAttribute::trackStatistics()
4311   void trackStatistics() const override {
4312     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4313   }
4314 };
4315 
4316 /// Dereferenceable attribute for an argument
4317 struct AADereferenceableArgument final
4318     : AAArgumentFromCallSiteArguments<AADereferenceable,
4319                                       AADereferenceableImpl> {
4320   using Base =
4321       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4322   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4323       : Base(IRP, A) {}
4324 
4325   /// See AbstractAttribute::trackStatistics()
4326   void trackStatistics() const override {
4327     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4328   }
4329 };
4330 
4331 /// Dereferenceable attribute for a call site argument.
4332 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4333   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4334       : AADereferenceableFloating(IRP, A) {}
4335 
4336   /// See AbstractAttribute::trackStatistics()
4337   void trackStatistics() const override {
4338     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4339   }
4340 };
4341 
4342 /// Dereferenceable attribute deduction for a call site return value.
4343 struct AADereferenceableCallSiteReturned final
4344     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4345   using Base =
4346       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4347   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4348       : Base(IRP, A) {}
4349 
4350   /// See AbstractAttribute::trackStatistics()
4351   void trackStatistics() const override {
4352     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4353   }
4354 };
4355 } // namespace
4356 
4357 // ------------------------ Align Argument Attribute ------------------------
4358 
4359 namespace {
4360 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4361                                     Value &AssociatedValue, const Use *U,
4362                                     const Instruction *I, bool &TrackUse) {
4363   // We need to follow common pointer manipulation uses to the accesses they
4364   // feed into.
4365   if (isa<CastInst>(I)) {
4366     // Follow all but ptr2int casts.
4367     TrackUse = !isa<PtrToIntInst>(I);
4368     return 0;
4369   }
4370   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4371     if (GEP->hasAllConstantIndices())
4372       TrackUse = true;
4373     return 0;
4374   }
4375 
4376   MaybeAlign MA;
4377   if (const auto *CB = dyn_cast<CallBase>(I)) {
4378     if (CB->isBundleOperand(U) || CB->isCallee(U))
4379       return 0;
4380 
4381     unsigned ArgNo = CB->getArgOperandNo(U);
4382     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4383     // As long as we only use known information there is no need to track
4384     // dependences here.
4385     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4386     MA = MaybeAlign(AlignAA.getKnownAlign());
4387   }
4388 
4389   const DataLayout &DL = A.getDataLayout();
4390   const Value *UseV = U->get();
4391   if (auto *SI = dyn_cast<StoreInst>(I)) {
4392     if (SI->getPointerOperand() == UseV)
4393       MA = SI->getAlign();
4394   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4395     if (LI->getPointerOperand() == UseV)
4396       MA = LI->getAlign();
4397   }
4398 
4399   if (!MA || *MA <= QueryingAA.getKnownAlign())
4400     return 0;
4401 
4402   unsigned Alignment = MA->value();
4403   int64_t Offset;
4404 
4405   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4406     if (Base == &AssociatedValue) {
4407       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4408       // So we can say that the maximum power of two which is a divisor of
4409       // gcd(Offset, Alignment) is an alignment.
4410 
4411       uint32_t gcd =
4412           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4413       Alignment = llvm::PowerOf2Floor(gcd);
4414     }
4415   }
4416 
4417   return Alignment;
4418 }
4419 
4420 struct AAAlignImpl : AAAlign {
4421   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4422 
4423   /// See AbstractAttribute::initialize(...).
4424   void initialize(Attributor &A) override {
4425     SmallVector<Attribute, 4> Attrs;
4426     getAttrs({Attribute::Alignment}, Attrs);
4427     for (const Attribute &Attr : Attrs)
4428       takeKnownMaximum(Attr.getValueAsInt());
4429 
4430     Value &V = *getAssociatedValue().stripPointerCasts();
4431     takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4432 
4433     if (getIRPosition().isFnInterfaceKind() &&
4434         (!getAnchorScope() ||
4435          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4436       indicatePessimisticFixpoint();
4437       return;
4438     }
4439 
4440     if (Instruction *CtxI = getCtxI())
4441       followUsesInMBEC(*this, A, getState(), *CtxI);
4442   }
4443 
4444   /// See AbstractAttribute::manifest(...).
4445   ChangeStatus manifest(Attributor &A) override {
4446     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4447 
4448     // Check for users that allow alignment annotations.
4449     Value &AssociatedValue = getAssociatedValue();
4450     for (const Use &U : AssociatedValue.uses()) {
4451       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4452         if (SI->getPointerOperand() == &AssociatedValue)
4453           if (SI->getAlign() < getAssumedAlign()) {
4454             STATS_DECLTRACK(AAAlign, Store,
4455                             "Number of times alignment added to a store");
4456             SI->setAlignment(getAssumedAlign());
4457             LoadStoreChanged = ChangeStatus::CHANGED;
4458           }
4459       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4460         if (LI->getPointerOperand() == &AssociatedValue)
4461           if (LI->getAlign() < getAssumedAlign()) {
4462             LI->setAlignment(getAssumedAlign());
4463             STATS_DECLTRACK(AAAlign, Load,
4464                             "Number of times alignment added to a load");
4465             LoadStoreChanged = ChangeStatus::CHANGED;
4466           }
4467       }
4468     }
4469 
4470     ChangeStatus Changed = AAAlign::manifest(A);
4471 
4472     Align InheritAlign =
4473         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4474     if (InheritAlign >= getAssumedAlign())
4475       return LoadStoreChanged;
4476     return Changed | LoadStoreChanged;
4477   }
4478 
4479   // TODO: Provide a helper to determine the implied ABI alignment and check in
4480   //       the existing manifest method and a new one for AAAlignImpl that value
4481   //       to avoid making the alignment explicit if it did not improve.
4482 
4483   /// See AbstractAttribute::getDeducedAttributes
4484   virtual void
4485   getDeducedAttributes(LLVMContext &Ctx,
4486                        SmallVectorImpl<Attribute> &Attrs) const override {
4487     if (getAssumedAlign() > 1)
4488       Attrs.emplace_back(
4489           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4490   }
4491 
4492   /// See followUsesInMBEC
4493   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4494                        AAAlign::StateType &State) {
4495     bool TrackUse = false;
4496 
4497     unsigned int KnownAlign =
4498         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4499     State.takeKnownMaximum(KnownAlign);
4500 
4501     return TrackUse;
4502   }
4503 
4504   /// See AbstractAttribute::getAsStr().
4505   const std::string getAsStr() const override {
4506     return "align<" + std::to_string(getKnownAlign().value()) + "-" +
4507            std::to_string(getAssumedAlign().value()) + ">";
4508   }
4509 };
4510 
4511 /// Align attribute for a floating value.
4512 struct AAAlignFloating : AAAlignImpl {
4513   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4514 
4515   /// See AbstractAttribute::updateImpl(...).
4516   ChangeStatus updateImpl(Attributor &A) override {
4517     const DataLayout &DL = A.getDataLayout();
4518 
4519     bool Stripped;
4520     bool UsedAssumedInformation = false;
4521     SmallVector<AA::ValueAndContext> Values;
4522     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
4523                                       AA::AnyScope, UsedAssumedInformation)) {
4524       Values.push_back({getAssociatedValue(), getCtxI()});
4525       Stripped = false;
4526     } else {
4527       Stripped = Values.size() != 1 ||
4528                  Values.front().getValue() != &getAssociatedValue();
4529     }
4530 
4531     StateType T;
4532     auto VisitValueCB = [&](Value &V) -> bool {
4533       if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4534         return true;
4535       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4536                                            DepClassTy::REQUIRED);
4537       if (!Stripped && this == &AA) {
4538         int64_t Offset;
4539         unsigned Alignment = 1;
4540         if (const Value *Base =
4541                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4542           // TODO: Use AAAlign for the base too.
4543           Align PA = Base->getPointerAlignment(DL);
4544           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4545           // So we can say that the maximum power of two which is a divisor of
4546           // gcd(Offset, Alignment) is an alignment.
4547 
4548           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4549                                                uint32_t(PA.value()));
4550           Alignment = llvm::PowerOf2Floor(gcd);
4551         } else {
4552           Alignment = V.getPointerAlignment(DL).value();
4553         }
4554         // Use only IR information if we did not strip anything.
4555         T.takeKnownMaximum(Alignment);
4556         T.indicatePessimisticFixpoint();
4557       } else {
4558         // Use abstract attribute information.
4559         const AAAlign::StateType &DS = AA.getState();
4560         T ^= DS;
4561       }
4562       return T.isValidState();
4563     };
4564 
4565     for (const auto &VAC : Values) {
4566       if (!VisitValueCB(*VAC.getValue()))
4567         return indicatePessimisticFixpoint();
4568     }
4569 
4570     //  TODO: If we know we visited all incoming values, thus no are assumed
4571     //  dead, we can take the known information from the state T.
4572     return clampStateAndIndicateChange(getState(), T);
4573   }
4574 
4575   /// See AbstractAttribute::trackStatistics()
4576   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4577 };
4578 
4579 /// Align attribute for function return value.
4580 struct AAAlignReturned final
4581     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4582   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4583   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4584 
4585   /// See AbstractAttribute::initialize(...).
4586   void initialize(Attributor &A) override {
4587     Base::initialize(A);
4588     Function *F = getAssociatedFunction();
4589     if (!F || F->isDeclaration())
4590       indicatePessimisticFixpoint();
4591   }
4592 
4593   /// See AbstractAttribute::trackStatistics()
4594   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4595 };
4596 
4597 /// Align attribute for function argument.
4598 struct AAAlignArgument final
4599     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4600   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4601   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4602 
4603   /// See AbstractAttribute::manifest(...).
4604   ChangeStatus manifest(Attributor &A) override {
4605     // If the associated argument is involved in a must-tail call we give up
4606     // because we would need to keep the argument alignments of caller and
4607     // callee in-sync. Just does not seem worth the trouble right now.
4608     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4609       return ChangeStatus::UNCHANGED;
4610     return Base::manifest(A);
4611   }
4612 
4613   /// See AbstractAttribute::trackStatistics()
4614   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4615 };
4616 
4617 struct AAAlignCallSiteArgument final : AAAlignFloating {
4618   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4619       : AAAlignFloating(IRP, A) {}
4620 
4621   /// See AbstractAttribute::manifest(...).
4622   ChangeStatus manifest(Attributor &A) override {
4623     // If the associated argument is involved in a must-tail call we give up
4624     // because we would need to keep the argument alignments of caller and
4625     // callee in-sync. Just does not seem worth the trouble right now.
4626     if (Argument *Arg = getAssociatedArgument())
4627       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4628         return ChangeStatus::UNCHANGED;
4629     ChangeStatus Changed = AAAlignImpl::manifest(A);
4630     Align InheritAlign =
4631         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4632     if (InheritAlign >= getAssumedAlign())
4633       Changed = ChangeStatus::UNCHANGED;
4634     return Changed;
4635   }
4636 
4637   /// See AbstractAttribute::updateImpl(Attributor &A).
4638   ChangeStatus updateImpl(Attributor &A) override {
4639     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4640     if (Argument *Arg = getAssociatedArgument()) {
4641       // We only take known information from the argument
4642       // so we do not need to track a dependence.
4643       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4644           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4645       takeKnownMaximum(ArgAlignAA.getKnownAlign().value());
4646     }
4647     return Changed;
4648   }
4649 
4650   /// See AbstractAttribute::trackStatistics()
4651   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4652 };
4653 
4654 /// Align attribute deduction for a call site return value.
4655 struct AAAlignCallSiteReturned final
4656     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4657   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4658   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4659       : Base(IRP, A) {}
4660 
4661   /// See AbstractAttribute::initialize(...).
4662   void initialize(Attributor &A) override {
4663     Base::initialize(A);
4664     Function *F = getAssociatedFunction();
4665     if (!F || F->isDeclaration())
4666       indicatePessimisticFixpoint();
4667   }
4668 
4669   /// See AbstractAttribute::trackStatistics()
4670   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4671 };
4672 } // namespace
4673 
4674 /// ------------------ Function No-Return Attribute ----------------------------
4675 namespace {
4676 struct AANoReturnImpl : public AANoReturn {
4677   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4678 
4679   /// See AbstractAttribute::initialize(...).
4680   void initialize(Attributor &A) override {
4681     AANoReturn::initialize(A);
4682     Function *F = getAssociatedFunction();
4683     if (!F || F->isDeclaration())
4684       indicatePessimisticFixpoint();
4685   }
4686 
4687   /// See AbstractAttribute::getAsStr().
4688   const std::string getAsStr() const override {
4689     return getAssumed() ? "noreturn" : "may-return";
4690   }
4691 
4692   /// See AbstractAttribute::updateImpl(Attributor &A).
4693   virtual ChangeStatus updateImpl(Attributor &A) override {
4694     auto CheckForNoReturn = [](Instruction &) { return false; };
4695     bool UsedAssumedInformation = false;
4696     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4697                                    {(unsigned)Instruction::Ret},
4698                                    UsedAssumedInformation))
4699       return indicatePessimisticFixpoint();
4700     return ChangeStatus::UNCHANGED;
4701   }
4702 };
4703 
4704 struct AANoReturnFunction final : AANoReturnImpl {
4705   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4706       : AANoReturnImpl(IRP, A) {}
4707 
4708   /// See AbstractAttribute::trackStatistics()
4709   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4710 };
4711 
4712 /// NoReturn attribute deduction for a call sites.
4713 struct AANoReturnCallSite final : AANoReturnImpl {
4714   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4715       : AANoReturnImpl(IRP, A) {}
4716 
4717   /// See AbstractAttribute::initialize(...).
4718   void initialize(Attributor &A) override {
4719     AANoReturnImpl::initialize(A);
4720     if (Function *F = getAssociatedFunction()) {
4721       const IRPosition &FnPos = IRPosition::function(*F);
4722       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4723       if (!FnAA.isAssumedNoReturn())
4724         indicatePessimisticFixpoint();
4725     }
4726   }
4727 
4728   /// See AbstractAttribute::updateImpl(...).
4729   ChangeStatus updateImpl(Attributor &A) override {
4730     // TODO: Once we have call site specific value information we can provide
4731     //       call site specific liveness information and then it makes
4732     //       sense to specialize attributes for call sites arguments instead of
4733     //       redirecting requests to the callee argument.
4734     Function *F = getAssociatedFunction();
4735     const IRPosition &FnPos = IRPosition::function(*F);
4736     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4737     return clampStateAndIndicateChange(getState(), FnAA.getState());
4738   }
4739 
4740   /// See AbstractAttribute::trackStatistics()
4741   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4742 };
4743 } // namespace
4744 
4745 /// ----------------------- Instance Info ---------------------------------
4746 
4747 namespace {
4748 /// A class to hold the state of for no-capture attributes.
4749 struct AAInstanceInfoImpl : public AAInstanceInfo {
4750   AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
4751       : AAInstanceInfo(IRP, A) {}
4752 
4753   /// See AbstractAttribute::initialize(...).
4754   void initialize(Attributor &A) override {
4755     Value &V = getAssociatedValue();
4756     if (auto *C = dyn_cast<Constant>(&V)) {
4757       if (C->isThreadDependent())
4758         indicatePessimisticFixpoint();
4759       else
4760         indicateOptimisticFixpoint();
4761       return;
4762     }
4763     if (auto *CB = dyn_cast<CallBase>(&V))
4764       if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
4765           !CB->mayReadFromMemory()) {
4766         indicateOptimisticFixpoint();
4767         return;
4768       }
4769   }
4770 
4771   /// See AbstractAttribute::updateImpl(...).
4772   ChangeStatus updateImpl(Attributor &A) override {
4773     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4774 
4775     Value &V = getAssociatedValue();
4776     const Function *Scope = nullptr;
4777     if (auto *I = dyn_cast<Instruction>(&V))
4778       Scope = I->getFunction();
4779     if (auto *A = dyn_cast<Argument>(&V)) {
4780       Scope = A->getParent();
4781       if (!Scope->hasLocalLinkage())
4782         return Changed;
4783     }
4784     if (!Scope)
4785       return indicateOptimisticFixpoint();
4786 
4787     auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
4788         *this, IRPosition::function(*Scope), DepClassTy::OPTIONAL);
4789     if (NoRecurseAA.isAssumedNoRecurse())
4790       return Changed;
4791 
4792     auto UsePred = [&](const Use &U, bool &Follow) {
4793       const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
4794       if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
4795           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4796         Follow = true;
4797         return true;
4798       }
4799       if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
4800           (isa<StoreInst>(UserI) &&
4801            cast<StoreInst>(UserI)->getValueOperand() != U.get()))
4802         return true;
4803       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4804         // This check is not guaranteeing uniqueness but for now that we cannot
4805         // end up with two versions of \p U thinking it was one.
4806         if (!CB->getCalledFunction() ||
4807             !CB->getCalledFunction()->hasLocalLinkage())
4808           return true;
4809         if (!CB->isArgOperand(&U))
4810           return false;
4811         const auto &ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
4812             *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)),
4813             DepClassTy::OPTIONAL);
4814         if (!ArgInstanceInfoAA.isAssumedUniqueForAnalysis())
4815           return false;
4816         // If this call base might reach the scope again we might forward the
4817         // argument back here. This is very conservative.
4818         if (AA::isPotentiallyReachable(A, *CB, *Scope, *this, nullptr))
4819           return false;
4820         return true;
4821       }
4822       return false;
4823     };
4824 
4825     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
4826       if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
4827         auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
4828         if (isa<AllocaInst>(Ptr) && AA::isDynamicallyUnique(A, *this, *Ptr))
4829           return true;
4830         auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(
4831             *SI->getFunction());
4832         if (isAllocationFn(Ptr, TLI) && AA::isDynamicallyUnique(A, *this, *Ptr))
4833           return true;
4834       }
4835       return false;
4836     };
4837 
4838     if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
4839                            DepClassTy::OPTIONAL,
4840                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
4841       return indicatePessimisticFixpoint();
4842 
4843     return Changed;
4844   }
4845 
4846   /// See AbstractState::getAsStr().
4847   const std::string getAsStr() const override {
4848     return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
4849   }
4850 
4851   /// See AbstractAttribute::trackStatistics()
4852   void trackStatistics() const override {}
4853 };
4854 
4855 /// InstanceInfo attribute for floating values.
4856 struct AAInstanceInfoFloating : AAInstanceInfoImpl {
4857   AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
4858       : AAInstanceInfoImpl(IRP, A) {}
4859 };
4860 
4861 /// NoCapture attribute for function arguments.
4862 struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
4863   AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
4864       : AAInstanceInfoFloating(IRP, A) {}
4865 };
4866 
4867 /// InstanceInfo attribute for call site arguments.
4868 struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
4869   AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
4870       : AAInstanceInfoImpl(IRP, A) {}
4871 
4872   /// See AbstractAttribute::updateImpl(...).
4873   ChangeStatus updateImpl(Attributor &A) override {
4874     // TODO: Once we have call site specific value information we can provide
4875     //       call site specific liveness information and then it makes
4876     //       sense to specialize attributes for call sites arguments instead of
4877     //       redirecting requests to the callee argument.
4878     Argument *Arg = getAssociatedArgument();
4879     if (!Arg)
4880       return indicatePessimisticFixpoint();
4881     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4882     auto &ArgAA =
4883         A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
4884     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4885   }
4886 };
4887 
4888 /// InstanceInfo attribute for function return value.
4889 struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
4890   AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
4891       : AAInstanceInfoImpl(IRP, A) {
4892     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4893   }
4894 
4895   /// See AbstractAttribute::initialize(...).
4896   void initialize(Attributor &A) override {
4897     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4898   }
4899 
4900   /// See AbstractAttribute::updateImpl(...).
4901   ChangeStatus updateImpl(Attributor &A) override {
4902     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4903   }
4904 };
4905 
4906 /// InstanceInfo attribute deduction for a call site return value.
4907 struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
4908   AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
4909       : AAInstanceInfoFloating(IRP, A) {}
4910 };
4911 } // namespace
4912 
4913 /// ----------------------- Variable Capturing ---------------------------------
4914 
4915 namespace {
4916 /// A class to hold the state of for no-capture attributes.
4917 struct AANoCaptureImpl : public AANoCapture {
4918   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4919 
4920   /// See AbstractAttribute::initialize(...).
4921   void initialize(Attributor &A) override {
4922     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4923       indicateOptimisticFixpoint();
4924       return;
4925     }
4926     Function *AnchorScope = getAnchorScope();
4927     if (isFnInterfaceKind() &&
4928         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4929       indicatePessimisticFixpoint();
4930       return;
4931     }
4932 
4933     // You cannot "capture" null in the default address space.
4934     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4935         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4936       indicateOptimisticFixpoint();
4937       return;
4938     }
4939 
4940     const Function *F =
4941         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4942 
4943     // Check what state the associated function can actually capture.
4944     if (F)
4945       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4946     else
4947       indicatePessimisticFixpoint();
4948   }
4949 
4950   /// See AbstractAttribute::updateImpl(...).
4951   ChangeStatus updateImpl(Attributor &A) override;
4952 
4953   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4954   virtual void
4955   getDeducedAttributes(LLVMContext &Ctx,
4956                        SmallVectorImpl<Attribute> &Attrs) const override {
4957     if (!isAssumedNoCaptureMaybeReturned())
4958       return;
4959 
4960     if (isArgumentPosition()) {
4961       if (isAssumedNoCapture())
4962         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4963       else if (ManifestInternal)
4964         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4965     }
4966   }
4967 
4968   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4969   /// depending on the ability of the function associated with \p IRP to capture
4970   /// state in memory and through "returning/throwing", respectively.
4971   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4972                                                    const Function &F,
4973                                                    BitIntegerState &State) {
4974     // TODO: Once we have memory behavior attributes we should use them here.
4975 
4976     // If we know we cannot communicate or write to memory, we do not care about
4977     // ptr2int anymore.
4978     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4979         F.getReturnType()->isVoidTy()) {
4980       State.addKnownBits(NO_CAPTURE);
4981       return;
4982     }
4983 
4984     // A function cannot capture state in memory if it only reads memory, it can
4985     // however return/throw state and the state might be influenced by the
4986     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4987     if (F.onlyReadsMemory())
4988       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4989 
4990     // A function cannot communicate state back if it does not through
4991     // exceptions and doesn not return values.
4992     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4993       State.addKnownBits(NOT_CAPTURED_IN_RET);
4994 
4995     // Check existing "returned" attributes.
4996     int ArgNo = IRP.getCalleeArgNo();
4997     if (F.doesNotThrow() && ArgNo >= 0) {
4998       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4999         if (F.hasParamAttribute(u, Attribute::Returned)) {
5000           if (u == unsigned(ArgNo))
5001             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
5002           else if (F.onlyReadsMemory())
5003             State.addKnownBits(NO_CAPTURE);
5004           else
5005             State.addKnownBits(NOT_CAPTURED_IN_RET);
5006           break;
5007         }
5008     }
5009   }
5010 
5011   /// See AbstractState::getAsStr().
5012   const std::string getAsStr() const override {
5013     if (isKnownNoCapture())
5014       return "known not-captured";
5015     if (isAssumedNoCapture())
5016       return "assumed not-captured";
5017     if (isKnownNoCaptureMaybeReturned())
5018       return "known not-captured-maybe-returned";
5019     if (isAssumedNoCaptureMaybeReturned())
5020       return "assumed not-captured-maybe-returned";
5021     return "assumed-captured";
5022   }
5023 
5024   /// Check the use \p U and update \p State accordingly. Return true if we
5025   /// should continue to update the state.
5026   bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5027                 bool &Follow) {
5028     Instruction *UInst = cast<Instruction>(U.getUser());
5029     LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "
5030                       << *UInst << "\n");
5031 
5032     // Deal with ptr2int by following uses.
5033     if (isa<PtrToIntInst>(UInst)) {
5034       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
5035       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5036                           /* Return */ true);
5037     }
5038 
5039     // For stores we already checked if we can follow them, if they make it
5040     // here we give up.
5041     if (isa<StoreInst>(UInst))
5042       return isCapturedIn(State, /* Memory */ true, /* Integer */ false,
5043                           /* Return */ false);
5044 
5045     // Explicitly catch return instructions.
5046     if (isa<ReturnInst>(UInst)) {
5047       if (UInst->getFunction() == getAnchorScope())
5048         return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5049                             /* Return */ true);
5050       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5051                           /* Return */ true);
5052     }
5053 
5054     // For now we only use special logic for call sites. However, the tracker
5055     // itself knows about a lot of other non-capturing cases already.
5056     auto *CB = dyn_cast<CallBase>(UInst);
5057     if (!CB || !CB->isArgOperand(&U))
5058       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5059                           /* Return */ true);
5060 
5061     unsigned ArgNo = CB->getArgOperandNo(&U);
5062     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5063     // If we have a abstract no-capture attribute for the argument we can use
5064     // it to justify a non-capture attribute here. This allows recursion!
5065     auto &ArgNoCaptureAA =
5066         A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED);
5067     if (ArgNoCaptureAA.isAssumedNoCapture())
5068       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5069                           /* Return */ false);
5070     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5071       Follow = true;
5072       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5073                           /* Return */ false);
5074     }
5075 
5076     // Lastly, we could not find a reason no-capture can be assumed so we don't.
5077     return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5078                         /* Return */ true);
5079   }
5080 
5081   /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5082   /// \p CapturedInRet, then return true if we should continue updating the
5083   /// state.
5084   static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5085                            bool CapturedInInt, bool CapturedInRet) {
5086     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5087                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5088     if (CapturedInMem)
5089       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5090     if (CapturedInInt)
5091       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5092     if (CapturedInRet)
5093       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5094     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5095   }
5096 };
5097 
5098 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5099   const IRPosition &IRP = getIRPosition();
5100   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5101                                   : &IRP.getAssociatedValue();
5102   if (!V)
5103     return indicatePessimisticFixpoint();
5104 
5105   const Function *F =
5106       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5107   assert(F && "Expected a function!");
5108   const IRPosition &FnPos = IRPosition::function(*F);
5109 
5110   AANoCapture::StateType T;
5111 
5112   // Readonly means we cannot capture through memory.
5113   bool IsKnown;
5114   if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5115     T.addKnownBits(NOT_CAPTURED_IN_MEM);
5116     if (IsKnown)
5117       addKnownBits(NOT_CAPTURED_IN_MEM);
5118   }
5119 
5120   // Make sure all returned values are different than the underlying value.
5121   // TODO: we could do this in a more sophisticated way inside
5122   //       AAReturnedValues, e.g., track all values that escape through returns
5123   //       directly somehow.
5124   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
5125     if (!RVAA.getState().isValidState())
5126       return false;
5127     bool SeenConstant = false;
5128     for (auto &It : RVAA.returned_values()) {
5129       if (isa<Constant>(It.first)) {
5130         if (SeenConstant)
5131           return false;
5132         SeenConstant = true;
5133       } else if (!isa<Argument>(It.first) ||
5134                  It.first == getAssociatedArgument())
5135         return false;
5136     }
5137     return true;
5138   };
5139 
5140   const auto &NoUnwindAA =
5141       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
5142   if (NoUnwindAA.isAssumedNoUnwind()) {
5143     bool IsVoidTy = F->getReturnType()->isVoidTy();
5144     const AAReturnedValues *RVAA =
5145         IsVoidTy ? nullptr
5146                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
5147 
5148                                                  DepClassTy::OPTIONAL);
5149     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
5150       T.addKnownBits(NOT_CAPTURED_IN_RET);
5151       if (T.isKnown(NOT_CAPTURED_IN_MEM))
5152         return ChangeStatus::UNCHANGED;
5153       if (NoUnwindAA.isKnownNoUnwind() &&
5154           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
5155         addKnownBits(NOT_CAPTURED_IN_RET);
5156         if (isKnown(NOT_CAPTURED_IN_MEM))
5157           return indicateOptimisticFixpoint();
5158       }
5159     }
5160   }
5161 
5162   auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
5163     const auto &DerefAA = A.getAAFor<AADereferenceable>(
5164         *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
5165     return DerefAA.getAssumedDereferenceableBytes();
5166   };
5167 
5168   auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
5169     switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
5170     case UseCaptureKind::NO_CAPTURE:
5171       return true;
5172     case UseCaptureKind::MAY_CAPTURE:
5173       return checkUse(A, T, U, Follow);
5174     case UseCaptureKind::PASSTHROUGH:
5175       Follow = true;
5176       return true;
5177     }
5178     llvm_unreachable("Unexpected use capture kind!");
5179   };
5180 
5181   if (!A.checkForAllUses(UseCheck, *this, *V))
5182     return indicatePessimisticFixpoint();
5183 
5184   AANoCapture::StateType &S = getState();
5185   auto Assumed = S.getAssumed();
5186   S.intersectAssumedBits(T.getAssumed());
5187   if (!isAssumedNoCaptureMaybeReturned())
5188     return indicatePessimisticFixpoint();
5189   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5190                                    : ChangeStatus::CHANGED;
5191 }
5192 
5193 /// NoCapture attribute for function arguments.
5194 struct AANoCaptureArgument final : AANoCaptureImpl {
5195   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5196       : AANoCaptureImpl(IRP, A) {}
5197 
5198   /// See AbstractAttribute::trackStatistics()
5199   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5200 };
5201 
5202 /// NoCapture attribute for call site arguments.
5203 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5204   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5205       : AANoCaptureImpl(IRP, A) {}
5206 
5207   /// See AbstractAttribute::initialize(...).
5208   void initialize(Attributor &A) override {
5209     if (Argument *Arg = getAssociatedArgument())
5210       if (Arg->hasByValAttr())
5211         indicateOptimisticFixpoint();
5212     AANoCaptureImpl::initialize(A);
5213   }
5214 
5215   /// See AbstractAttribute::updateImpl(...).
5216   ChangeStatus updateImpl(Attributor &A) override {
5217     // TODO: Once we have call site specific value information we can provide
5218     //       call site specific liveness information and then it makes
5219     //       sense to specialize attributes for call sites arguments instead of
5220     //       redirecting requests to the callee argument.
5221     Argument *Arg = getAssociatedArgument();
5222     if (!Arg)
5223       return indicatePessimisticFixpoint();
5224     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5225     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5226     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5227   }
5228 
5229   /// See AbstractAttribute::trackStatistics()
5230   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5231 };
5232 
5233 /// NoCapture attribute for floating values.
5234 struct AANoCaptureFloating final : AANoCaptureImpl {
5235   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5236       : AANoCaptureImpl(IRP, A) {}
5237 
5238   /// See AbstractAttribute::trackStatistics()
5239   void trackStatistics() const override {
5240     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5241   }
5242 };
5243 
5244 /// NoCapture attribute for function return value.
5245 struct AANoCaptureReturned final : AANoCaptureImpl {
5246   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5247       : AANoCaptureImpl(IRP, A) {
5248     llvm_unreachable("NoCapture is not applicable to function returns!");
5249   }
5250 
5251   /// See AbstractAttribute::initialize(...).
5252   void initialize(Attributor &A) override {
5253     llvm_unreachable("NoCapture is not applicable to function returns!");
5254   }
5255 
5256   /// See AbstractAttribute::updateImpl(...).
5257   ChangeStatus updateImpl(Attributor &A) override {
5258     llvm_unreachable("NoCapture is not applicable to function returns!");
5259   }
5260 
5261   /// See AbstractAttribute::trackStatistics()
5262   void trackStatistics() const override {}
5263 };
5264 
5265 /// NoCapture attribute deduction for a call site return value.
5266 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5267   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5268       : AANoCaptureImpl(IRP, A) {}
5269 
5270   /// See AbstractAttribute::initialize(...).
5271   void initialize(Attributor &A) override {
5272     const Function *F = getAnchorScope();
5273     // Check what state the associated function can actually capture.
5274     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5275   }
5276 
5277   /// See AbstractAttribute::trackStatistics()
5278   void trackStatistics() const override {
5279     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5280   }
5281 };
5282 } // namespace
5283 
5284 /// ------------------ Value Simplify Attribute ----------------------------
5285 
5286 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5287   // FIXME: Add a typecast support.
5288   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5289       SimplifiedAssociatedValue, Other, Ty);
5290   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5291     return false;
5292 
5293   LLVM_DEBUG({
5294     if (SimplifiedAssociatedValue)
5295       dbgs() << "[ValueSimplify] is assumed to be "
5296              << **SimplifiedAssociatedValue << "\n";
5297     else
5298       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5299   });
5300   return true;
5301 }
5302 
5303 namespace {
5304 struct AAValueSimplifyImpl : AAValueSimplify {
5305   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5306       : AAValueSimplify(IRP, A) {}
5307 
5308   /// See AbstractAttribute::initialize(...).
5309   void initialize(Attributor &A) override {
5310     if (getAssociatedValue().getType()->isVoidTy())
5311       indicatePessimisticFixpoint();
5312     if (A.hasSimplificationCallback(getIRPosition()))
5313       indicatePessimisticFixpoint();
5314   }
5315 
5316   /// See AbstractAttribute::getAsStr().
5317   const std::string getAsStr() const override {
5318     LLVM_DEBUG({
5319       dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " ";
5320       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5321         dbgs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5322     });
5323     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5324                           : "not-simple";
5325   }
5326 
5327   /// See AbstractAttribute::trackStatistics()
5328   void trackStatistics() const override {}
5329 
5330   /// See AAValueSimplify::getAssumedSimplifiedValue()
5331   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5332     return SimplifiedAssociatedValue;
5333   }
5334 
5335   /// Ensure the return value is \p V with type \p Ty, if not possible return
5336   /// nullptr. If \p Check is true we will only verify such an operation would
5337   /// suceed and return a non-nullptr value if that is the case. No IR is
5338   /// generated or modified.
5339   static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI,
5340                            bool Check) {
5341     if (auto *TypedV = AA::getWithType(V, Ty))
5342       return TypedV;
5343     if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty))
5344       return Check ? &V
5345                    : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(&V, &Ty,
5346                                                                       "", CtxI);
5347     return nullptr;
5348   }
5349 
5350   /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble.
5351   /// If \p Check is true we will only verify such an operation would suceed and
5352   /// return a non-nullptr value if that is the case. No IR is generated or
5353   /// modified.
5354   static Value *reproduceInst(Attributor &A,
5355                               const AbstractAttribute &QueryingAA,
5356                               Instruction &I, Type &Ty, Instruction *CtxI,
5357                               bool Check, ValueToValueMapTy &VMap) {
5358     assert(CtxI && "Cannot reproduce an instruction without context!");
5359     if (Check && (I.mayReadFromMemory() ||
5360                   !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr,
5361                                                 /* TLI */ nullptr)))
5362       return nullptr;
5363     for (Value *Op : I.operands()) {
5364       Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap);
5365       if (!NewOp) {
5366         assert(Check && "Manifest of new value unexpectedly failed!");
5367         return nullptr;
5368       }
5369       if (!Check)
5370         VMap[Op] = NewOp;
5371     }
5372     if (Check)
5373       return &I;
5374 
5375     Instruction *CloneI = I.clone();
5376     // TODO: Try to salvage debug information here.
5377     CloneI->setDebugLoc(DebugLoc());
5378     VMap[&I] = CloneI;
5379     CloneI->insertBefore(CtxI);
5380     RemapInstruction(CloneI, VMap);
5381     return CloneI;
5382   }
5383 
5384   /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble.
5385   /// If \p Check is true we will only verify such an operation would suceed and
5386   /// return a non-nullptr value if that is the case. No IR is generated or
5387   /// modified.
5388   static Value *reproduceValue(Attributor &A,
5389                                const AbstractAttribute &QueryingAA, Value &V,
5390                                Type &Ty, Instruction *CtxI, bool Check,
5391                                ValueToValueMapTy &VMap) {
5392     if (const auto &NewV = VMap.lookup(&V))
5393       return NewV;
5394     bool UsedAssumedInformation = false;
5395     Optional<Value *> SimpleV = A.getAssumedSimplified(
5396         V, QueryingAA, UsedAssumedInformation, AA::Interprocedural);
5397     if (!SimpleV.has_value())
5398       return PoisonValue::get(&Ty);
5399     Value *EffectiveV = &V;
5400     if (SimpleV.value())
5401       EffectiveV = SimpleV.value();
5402     if (auto *C = dyn_cast<Constant>(EffectiveV))
5403       return C;
5404     if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI),
5405                                       A.getInfoCache()))
5406       return ensureType(A, *EffectiveV, Ty, CtxI, Check);
5407     if (auto *I = dyn_cast<Instruction>(EffectiveV))
5408       if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap))
5409         return ensureType(A, *NewV, Ty, CtxI, Check);
5410     return nullptr;
5411   }
5412 
5413   /// Return a value we can use as replacement for the associated one, or
5414   /// nullptr if we don't have one that makes sense.
5415   Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
5416     Value *NewV = SimplifiedAssociatedValue
5417                       ? SimplifiedAssociatedValue.value()
5418                       : UndefValue::get(getAssociatedType());
5419     if (NewV && NewV != &getAssociatedValue()) {
5420       ValueToValueMapTy VMap;
5421       // First verify we can reprduce the value with the required type at the
5422       // context location before we actually start modifying the IR.
5423       if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5424                          /* CheckOnly */ true, VMap))
5425         return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5426                               /* CheckOnly */ false, VMap);
5427     }
5428     return nullptr;
5429   }
5430 
5431   /// Helper function for querying AAValueSimplify and updating candicate.
5432   /// \param IRP The value position we are trying to unify with SimplifiedValue
5433   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5434                       const IRPosition &IRP, bool Simplify = true) {
5435     bool UsedAssumedInformation = false;
5436     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5437     if (Simplify)
5438       QueryingValueSimplified = A.getAssumedSimplified(
5439           IRP, QueryingAA, UsedAssumedInformation, AA::Interprocedural);
5440     return unionAssumed(QueryingValueSimplified);
5441   }
5442 
5443   /// Returns a candidate is found or not
5444   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5445     if (!getAssociatedValue().getType()->isIntegerTy())
5446       return false;
5447 
5448     // This will also pass the call base context.
5449     const auto &AA =
5450         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5451 
5452     Optional<Constant *> COpt = AA.getAssumedConstant(A);
5453 
5454     if (!COpt) {
5455       SimplifiedAssociatedValue = llvm::None;
5456       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5457       return true;
5458     }
5459     if (auto *C = *COpt) {
5460       SimplifiedAssociatedValue = C;
5461       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5462       return true;
5463     }
5464     return false;
5465   }
5466 
5467   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5468     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5469       return true;
5470     if (askSimplifiedValueFor<AAPotentialConstantValues>(A))
5471       return true;
5472     return false;
5473   }
5474 
5475   /// See AbstractAttribute::manifest(...).
5476   ChangeStatus manifest(Attributor &A) override {
5477     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5478     for (auto &U : getAssociatedValue().uses()) {
5479       // Check if we need to adjust the insertion point to make sure the IR is
5480       // valid.
5481       Instruction *IP = dyn_cast<Instruction>(U.getUser());
5482       if (auto *PHI = dyn_cast_or_null<PHINode>(IP))
5483         IP = PHI->getIncomingBlock(U)->getTerminator();
5484       if (auto *NewV = manifestReplacementValue(A, IP)) {
5485         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue()
5486                           << " -> " << *NewV << " :: " << *this << "\n");
5487         if (A.changeUseAfterManifest(U, *NewV))
5488           Changed = ChangeStatus::CHANGED;
5489       }
5490     }
5491 
5492     return Changed | AAValueSimplify::manifest(A);
5493   }
5494 
5495   /// See AbstractState::indicatePessimisticFixpoint(...).
5496   ChangeStatus indicatePessimisticFixpoint() override {
5497     SimplifiedAssociatedValue = &getAssociatedValue();
5498     return AAValueSimplify::indicatePessimisticFixpoint();
5499   }
5500 };
5501 
5502 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5503   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5504       : AAValueSimplifyImpl(IRP, A) {}
5505 
5506   void initialize(Attributor &A) override {
5507     AAValueSimplifyImpl::initialize(A);
5508     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5509       indicatePessimisticFixpoint();
5510     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5511                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5512                 /* IgnoreSubsumingPositions */ true))
5513       indicatePessimisticFixpoint();
5514   }
5515 
5516   /// See AbstractAttribute::updateImpl(...).
5517   ChangeStatus updateImpl(Attributor &A) override {
5518     // Byval is only replacable if it is readonly otherwise we would write into
5519     // the replaced value and not the copy that byval creates implicitly.
5520     Argument *Arg = getAssociatedArgument();
5521     if (Arg->hasByValAttr()) {
5522       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5523       //       there is no race by not copying a constant byval.
5524       bool IsKnown;
5525       if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
5526         return indicatePessimisticFixpoint();
5527     }
5528 
5529     auto Before = SimplifiedAssociatedValue;
5530 
5531     auto PredForCallSite = [&](AbstractCallSite ACS) {
5532       const IRPosition &ACSArgPos =
5533           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5534       // Check if a coresponding argument was found or if it is on not
5535       // associated (which can happen for callback calls).
5536       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5537         return false;
5538 
5539       // Simplify the argument operand explicitly and check if the result is
5540       // valid in the current scope. This avoids refering to simplified values
5541       // in other functions, e.g., we don't want to say a an argument in a
5542       // static function is actually an argument in a different function.
5543       bool UsedAssumedInformation = false;
5544       Optional<Constant *> SimpleArgOp =
5545           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5546       if (!SimpleArgOp)
5547         return true;
5548       if (!SimpleArgOp.value())
5549         return false;
5550       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5551         return false;
5552       return unionAssumed(*SimpleArgOp);
5553     };
5554 
5555     // Generate a answer specific to a call site context.
5556     bool Success;
5557     bool UsedAssumedInformation = false;
5558     if (hasCallBaseContext() &&
5559         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5560       Success = PredForCallSite(
5561           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5562     else
5563       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5564                                        UsedAssumedInformation);
5565 
5566     if (!Success)
5567       if (!askSimplifiedValueForOtherAAs(A))
5568         return indicatePessimisticFixpoint();
5569 
5570     // If a candicate was found in this update, return CHANGED.
5571     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5572                                                : ChangeStatus ::CHANGED;
5573   }
5574 
5575   /// See AbstractAttribute::trackStatistics()
5576   void trackStatistics() const override {
5577     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5578   }
5579 };
5580 
5581 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5582   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5583       : AAValueSimplifyImpl(IRP, A) {}
5584 
5585   /// See AAValueSimplify::getAssumedSimplifiedValue()
5586   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5587     if (!isValidState())
5588       return nullptr;
5589     return SimplifiedAssociatedValue;
5590   }
5591 
5592   /// See AbstractAttribute::updateImpl(...).
5593   ChangeStatus updateImpl(Attributor &A) override {
5594     auto Before = SimplifiedAssociatedValue;
5595 
5596     auto ReturnInstCB = [&](Instruction &I) {
5597       auto &RI = cast<ReturnInst>(I);
5598       return checkAndUpdate(
5599           A, *this,
5600           IRPosition::value(*RI.getReturnValue(), getCallBaseContext()));
5601     };
5602 
5603     bool UsedAssumedInformation = false;
5604     if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
5605                                    UsedAssumedInformation))
5606       if (!askSimplifiedValueForOtherAAs(A))
5607         return indicatePessimisticFixpoint();
5608 
5609     // If a candicate was found in this update, return CHANGED.
5610     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5611                                                : ChangeStatus ::CHANGED;
5612   }
5613 
5614   ChangeStatus manifest(Attributor &A) override {
5615     // We queried AAValueSimplify for the returned values so they will be
5616     // replaced if a simplified form was found. Nothing to do here.
5617     return ChangeStatus::UNCHANGED;
5618   }
5619 
5620   /// See AbstractAttribute::trackStatistics()
5621   void trackStatistics() const override {
5622     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5623   }
5624 };
5625 
5626 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5627   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5628       : AAValueSimplifyImpl(IRP, A) {}
5629 
5630   /// See AbstractAttribute::initialize(...).
5631   void initialize(Attributor &A) override {
5632     AAValueSimplifyImpl::initialize(A);
5633     Value &V = getAnchorValue();
5634 
5635     // TODO: add other stuffs
5636     if (isa<Constant>(V))
5637       indicatePessimisticFixpoint();
5638   }
5639 
5640   /// See AbstractAttribute::updateImpl(...).
5641   ChangeStatus updateImpl(Attributor &A) override {
5642     auto Before = SimplifiedAssociatedValue;
5643     if (!askSimplifiedValueForOtherAAs(A))
5644       return indicatePessimisticFixpoint();
5645 
5646     // If a candicate was found in this update, return CHANGED.
5647     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5648                                                : ChangeStatus ::CHANGED;
5649   }
5650 
5651   /// See AbstractAttribute::trackStatistics()
5652   void trackStatistics() const override {
5653     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5654   }
5655 };
5656 
5657 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5658   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5659       : AAValueSimplifyImpl(IRP, A) {}
5660 
5661   /// See AbstractAttribute::initialize(...).
5662   void initialize(Attributor &A) override {
5663     SimplifiedAssociatedValue = nullptr;
5664     indicateOptimisticFixpoint();
5665   }
5666   /// See AbstractAttribute::initialize(...).
5667   ChangeStatus updateImpl(Attributor &A) override {
5668     llvm_unreachable(
5669         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5670   }
5671   /// See AbstractAttribute::trackStatistics()
5672   void trackStatistics() const override {
5673     STATS_DECLTRACK_FN_ATTR(value_simplify)
5674   }
5675 };
5676 
5677 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5678   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5679       : AAValueSimplifyFunction(IRP, A) {}
5680   /// See AbstractAttribute::trackStatistics()
5681   void trackStatistics() const override {
5682     STATS_DECLTRACK_CS_ATTR(value_simplify)
5683   }
5684 };
5685 
5686 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5687   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5688       : AAValueSimplifyImpl(IRP, A) {}
5689 
5690   void initialize(Attributor &A) override {
5691     AAValueSimplifyImpl::initialize(A);
5692     Function *Fn = getAssociatedFunction();
5693     if (!Fn) {
5694       indicatePessimisticFixpoint();
5695       return;
5696     }
5697     for (Argument &Arg : Fn->args()) {
5698       if (Arg.hasReturnedAttr()) {
5699         auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()),
5700                                                  Arg.getArgNo());
5701         if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT &&
5702             checkAndUpdate(A, *this, IRP))
5703           indicateOptimisticFixpoint();
5704         else
5705           indicatePessimisticFixpoint();
5706         return;
5707       }
5708     }
5709   }
5710 
5711   /// See AbstractAttribute::updateImpl(...).
5712   ChangeStatus updateImpl(Attributor &A) override {
5713     auto Before = SimplifiedAssociatedValue;
5714     auto &RetAA = A.getAAFor<AAReturnedValues>(
5715         *this, IRPosition::function(*getAssociatedFunction()),
5716         DepClassTy::REQUIRED);
5717     auto PredForReturned =
5718         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5719           bool UsedAssumedInformation = false;
5720           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
5721               &RetVal, *cast<CallBase>(getCtxI()), *this,
5722               UsedAssumedInformation);
5723           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5724               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
5725           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5726         };
5727     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
5728       if (!askSimplifiedValueForOtherAAs(A))
5729         return indicatePessimisticFixpoint();
5730     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5731                                                : ChangeStatus ::CHANGED;
5732   }
5733 
5734   void trackStatistics() const override {
5735     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
5736   }
5737 };
5738 
5739 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
5740   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
5741       : AAValueSimplifyFloating(IRP, A) {}
5742 
5743   /// See AbstractAttribute::manifest(...).
5744   ChangeStatus manifest(Attributor &A) override {
5745     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5746     // TODO: We should avoid simplification duplication to begin with.
5747     auto *FloatAA = A.lookupAAFor<AAValueSimplify>(
5748         IRPosition::value(getAssociatedValue()), this, DepClassTy::NONE);
5749     if (FloatAA && FloatAA->getState().isValidState())
5750       return Changed;
5751 
5752     if (auto *NewV = manifestReplacementValue(A, getCtxI())) {
5753       Use &U = cast<CallBase>(&getAnchorValue())
5754                    ->getArgOperandUse(getCallSiteArgNo());
5755       if (A.changeUseAfterManifest(U, *NewV))
5756         Changed = ChangeStatus::CHANGED;
5757     }
5758 
5759     return Changed | AAValueSimplify::manifest(A);
5760   }
5761 
5762   void trackStatistics() const override {
5763     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5764   }
5765 };
5766 } // namespace
5767 
5768 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5769 namespace {
5770 struct AAHeapToStackFunction final : public AAHeapToStack {
5771 
5772   struct AllocationInfo {
5773     /// The call that allocates the memory.
5774     CallBase *const CB;
5775 
5776     /// The library function id for the allocation.
5777     LibFunc LibraryFunctionId = NotLibFunc;
5778 
5779     /// The status wrt. a rewrite.
5780     enum {
5781       STACK_DUE_TO_USE,
5782       STACK_DUE_TO_FREE,
5783       INVALID,
5784     } Status = STACK_DUE_TO_USE;
5785 
5786     /// Flag to indicate if we encountered a use that might free this allocation
5787     /// but which is not in the deallocation infos.
5788     bool HasPotentiallyFreeingUnknownUses = false;
5789 
5790     /// Flag to indicate that we should place the new alloca in the function
5791     /// entry block rather than where the call site (CB) is.
5792     bool MoveAllocaIntoEntry = true;
5793 
5794     /// The set of free calls that use this allocation.
5795     SmallSetVector<CallBase *, 1> PotentialFreeCalls{};
5796   };
5797 
5798   struct DeallocationInfo {
5799     /// The call that deallocates the memory.
5800     CallBase *const CB;
5801     /// The value freed by the call.
5802     Value *FreedOp;
5803 
5804     /// Flag to indicate if we don't know all objects this deallocation might
5805     /// free.
5806     bool MightFreeUnknownObjects = false;
5807 
5808     /// The set of allocation calls that are potentially freed.
5809     SmallSetVector<CallBase *, 1> PotentialAllocationCalls{};
5810   };
5811 
5812   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5813       : AAHeapToStack(IRP, A) {}
5814 
5815   ~AAHeapToStackFunction() {
5816     // Ensure we call the destructor so we release any memory allocated in the
5817     // sets.
5818     for (auto &It : AllocationInfos)
5819       It.second->~AllocationInfo();
5820     for (auto &It : DeallocationInfos)
5821       It.second->~DeallocationInfo();
5822   }
5823 
5824   void initialize(Attributor &A) override {
5825     AAHeapToStack::initialize(A);
5826 
5827     const Function *F = getAnchorScope();
5828     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5829 
5830     auto AllocationIdentifierCB = [&](Instruction &I) {
5831       CallBase *CB = dyn_cast<CallBase>(&I);
5832       if (!CB)
5833         return true;
5834       if (Value *FreedOp = getFreedOperand(CB, TLI)) {
5835         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB, FreedOp};
5836         return true;
5837       }
5838       // To do heap to stack, we need to know that the allocation itself is
5839       // removable once uses are rewritten, and that we can initialize the
5840       // alloca to the same pattern as the original allocation result.
5841       if (isRemovableAlloc(CB, TLI)) {
5842         auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
5843         if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
5844           AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
5845           AllocationInfos[CB] = AI;
5846           if (TLI)
5847             TLI->getLibFunc(*CB, AI->LibraryFunctionId);
5848         }
5849       }
5850       return true;
5851     };
5852 
5853     bool UsedAssumedInformation = false;
5854     bool Success = A.checkForAllCallLikeInstructions(
5855         AllocationIdentifierCB, *this, UsedAssumedInformation,
5856         /* CheckBBLivenessOnly */ false,
5857         /* CheckPotentiallyDead */ true);
5858     (void)Success;
5859     assert(Success && "Did not expect the call base visit callback to fail!");
5860 
5861     Attributor::SimplifictionCallbackTy SCB =
5862         [](const IRPosition &, const AbstractAttribute *,
5863            bool &) -> Optional<Value *> { return nullptr; };
5864     for (const auto &It : AllocationInfos)
5865       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
5866                                        SCB);
5867     for (const auto &It : DeallocationInfos)
5868       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
5869                                        SCB);
5870   }
5871 
5872   const std::string getAsStr() const override {
5873     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
5874     for (const auto &It : AllocationInfos) {
5875       if (It.second->Status == AllocationInfo::INVALID)
5876         ++NumInvalidMallocs;
5877       else
5878         ++NumH2SMallocs;
5879     }
5880     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
5881            std::to_string(NumInvalidMallocs);
5882   }
5883 
5884   /// See AbstractAttribute::trackStatistics().
5885   void trackStatistics() const override {
5886     STATS_DECL(
5887         MallocCalls, Function,
5888         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5889     for (auto &It : AllocationInfos)
5890       if (It.second->Status != AllocationInfo::INVALID)
5891         ++BUILD_STAT_NAME(MallocCalls, Function);
5892   }
5893 
5894   bool isAssumedHeapToStack(const CallBase &CB) const override {
5895     if (isValidState())
5896       if (AllocationInfo *AI =
5897               AllocationInfos.lookup(const_cast<CallBase *>(&CB)))
5898         return AI->Status != AllocationInfo::INVALID;
5899     return false;
5900   }
5901 
5902   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
5903     if (!isValidState())
5904       return false;
5905 
5906     for (auto &It : AllocationInfos) {
5907       AllocationInfo &AI = *It.second;
5908       if (AI.Status == AllocationInfo::INVALID)
5909         continue;
5910 
5911       if (AI.PotentialFreeCalls.count(&CB))
5912         return true;
5913     }
5914 
5915     return false;
5916   }
5917 
5918   ChangeStatus manifest(Attributor &A) override {
5919     assert(getState().isValidState() &&
5920            "Attempted to manifest an invalid state!");
5921 
5922     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5923     Function *F = getAnchorScope();
5924     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5925 
5926     for (auto &It : AllocationInfos) {
5927       AllocationInfo &AI = *It.second;
5928       if (AI.Status == AllocationInfo::INVALID)
5929         continue;
5930 
5931       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
5932         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5933         A.deleteAfterManifest(*FreeCall);
5934         HasChanged = ChangeStatus::CHANGED;
5935       }
5936 
5937       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
5938                         << "\n");
5939 
5940       auto Remark = [&](OptimizationRemark OR) {
5941         LibFunc IsAllocShared;
5942         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
5943           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
5944             return OR << "Moving globalized variable to the stack.";
5945         return OR << "Moving memory allocation from the heap to the stack.";
5946       };
5947       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
5948         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
5949       else
5950         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
5951 
5952       const DataLayout &DL = A.getInfoCache().getDL();
5953       Value *Size;
5954       Optional<APInt> SizeAPI = getSize(A, *this, AI);
5955       if (SizeAPI) {
5956         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
5957       } else {
5958         LLVMContext &Ctx = AI.CB->getContext();
5959         ObjectSizeOpts Opts;
5960         ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
5961         SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB);
5962         assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&
5963                cast<ConstantInt>(SizeOffsetPair.second)->isZero());
5964         Size = SizeOffsetPair.first;
5965       }
5966 
5967       Instruction *IP =
5968           AI.MoveAllocaIntoEntry ? &F->getEntryBlock().front() : AI.CB;
5969 
5970       Align Alignment(1);
5971       if (MaybeAlign RetAlign = AI.CB->getRetAlign())
5972         Alignment = std::max(Alignment, *RetAlign);
5973       if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
5974         Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
5975         assert(AlignmentAPI && AlignmentAPI.value().getZExtValue() > 0 &&
5976                "Expected an alignment during manifest!");
5977         Alignment = std::max(
5978             Alignment, assumeAligned(AlignmentAPI.value().getZExtValue()));
5979       }
5980 
5981       // TODO: Hoist the alloca towards the function entry.
5982       unsigned AS = DL.getAllocaAddrSpace();
5983       Instruction *Alloca =
5984           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5985                          AI.CB->getName() + ".h2s", IP);
5986 
5987       if (Alloca->getType() != AI.CB->getType())
5988         Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
5989             Alloca, AI.CB->getType(), "malloc_cast", AI.CB);
5990 
5991       auto *I8Ty = Type::getInt8Ty(F->getContext());
5992       auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
5993       assert(InitVal &&
5994              "Must be able to materialize initial memory state of allocation");
5995 
5996       A.changeAfterManifest(IRPosition::inst(*AI.CB), *Alloca);
5997 
5998       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
5999         auto *NBB = II->getNormalDest();
6000         BranchInst::Create(NBB, AI.CB->getParent());
6001         A.deleteAfterManifest(*AI.CB);
6002       } else {
6003         A.deleteAfterManifest(*AI.CB);
6004       }
6005 
6006       // Initialize the alloca with the same value as used by the allocation
6007       // function.  We can skip undef as the initial value of an alloc is
6008       // undef, and the memset would simply end up being DSEd.
6009       if (!isa<UndefValue>(InitVal)) {
6010         IRBuilder<> Builder(Alloca->getNextNode());
6011         // TODO: Use alignment above if align!=1
6012         Builder.CreateMemSet(Alloca, InitVal, Size, None);
6013       }
6014       HasChanged = ChangeStatus::CHANGED;
6015     }
6016 
6017     return HasChanged;
6018   }
6019 
6020   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
6021                            Value &V) {
6022     bool UsedAssumedInformation = false;
6023     Optional<Constant *> SimpleV =
6024         A.getAssumedConstant(V, AA, UsedAssumedInformation);
6025     if (!SimpleV)
6026       return APInt(64, 0);
6027     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.value()))
6028       return CI->getValue();
6029     return llvm::None;
6030   }
6031 
6032   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6033                           AllocationInfo &AI) {
6034     auto Mapper = [&](const Value *V) -> const Value * {
6035       bool UsedAssumedInformation = false;
6036       if (Optional<Constant *> SimpleV =
6037               A.getAssumedConstant(*V, AA, UsedAssumedInformation))
6038         if (*SimpleV)
6039           return *SimpleV;
6040       return V;
6041     };
6042 
6043     const Function *F = getAnchorScope();
6044     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6045     return getAllocSize(AI.CB, TLI, Mapper);
6046   }
6047 
6048   /// Collection of all malloc-like calls in a function with associated
6049   /// information.
6050   MapVector<CallBase *, AllocationInfo *> AllocationInfos;
6051 
6052   /// Collection of all free-like calls in a function with associated
6053   /// information.
6054   MapVector<CallBase *, DeallocationInfo *> DeallocationInfos;
6055 
6056   ChangeStatus updateImpl(Attributor &A) override;
6057 };
6058 
6059 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6060   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6061   const Function *F = getAnchorScope();
6062   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6063 
6064   const auto &LivenessAA =
6065       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6066 
6067   MustBeExecutedContextExplorer &Explorer =
6068       A.getInfoCache().getMustBeExecutedContextExplorer();
6069 
6070   bool StackIsAccessibleByOtherThreads =
6071       A.getInfoCache().stackIsAccessibleByOtherThreads();
6072 
6073   LoopInfo *LI =
6074       A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(*F);
6075   Optional<bool> MayContainIrreducibleControl;
6076   auto IsInLoop = [&](BasicBlock &BB) {
6077     if (&F->getEntryBlock() == &BB)
6078       return false;
6079     if (!MayContainIrreducibleControl.has_value())
6080       MayContainIrreducibleControl = mayContainIrreducibleControl(*F, LI);
6081     if (MayContainIrreducibleControl.value())
6082       return true;
6083     if (!LI)
6084       return true;
6085     return LI->getLoopFor(&BB) != nullptr;
6086   };
6087 
6088   // Flag to ensure we update our deallocation information at most once per
6089   // updateImpl call and only if we use the free check reasoning.
6090   bool HasUpdatedFrees = false;
6091 
6092   auto UpdateFrees = [&]() {
6093     HasUpdatedFrees = true;
6094 
6095     for (auto &It : DeallocationInfos) {
6096       DeallocationInfo &DI = *It.second;
6097       // For now we cannot use deallocations that have unknown inputs, skip
6098       // them.
6099       if (DI.MightFreeUnknownObjects)
6100         continue;
6101 
6102       // No need to analyze dead calls, ignore them instead.
6103       bool UsedAssumedInformation = false;
6104       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6105                           /* CheckBBLivenessOnly */ true))
6106         continue;
6107 
6108       // Use the non-optimistic version to get the freed object.
6109       Value *Obj = getUnderlyingObject(DI.FreedOp);
6110       if (!Obj) {
6111         LLVM_DEBUG(dbgs() << "[H2S] Unknown underlying object for free!\n");
6112         DI.MightFreeUnknownObjects = true;
6113         continue;
6114       }
6115 
6116       // Free of null and undef can be ignored as no-ops (or UB in the latter
6117       // case).
6118       if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6119         continue;
6120 
6121       CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6122       if (!ObjCB) {
6123         LLVM_DEBUG(dbgs() << "[H2S] Free of a non-call object: " << *Obj
6124                           << "\n");
6125         DI.MightFreeUnknownObjects = true;
6126         continue;
6127       }
6128 
6129       AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6130       if (!AI) {
6131         LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6132                           << "\n");
6133         DI.MightFreeUnknownObjects = true;
6134         continue;
6135       }
6136 
6137       DI.PotentialAllocationCalls.insert(ObjCB);
6138     }
6139   };
6140 
6141   auto FreeCheck = [&](AllocationInfo &AI) {
6142     // If the stack is not accessible by other threads, the "must-free" logic
6143     // doesn't apply as the pointer could be shared and needs to be places in
6144     // "shareable" memory.
6145     if (!StackIsAccessibleByOtherThreads) {
6146       auto &NoSyncAA =
6147           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6148       if (!NoSyncAA.isAssumedNoSync()) {
6149         LLVM_DEBUG(
6150             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6151                       "other threads and function is not nosync:\n");
6152         return false;
6153       }
6154     }
6155     if (!HasUpdatedFrees)
6156       UpdateFrees();
6157 
6158     // TODO: Allow multi exit functions that have different free calls.
6159     if (AI.PotentialFreeCalls.size() != 1) {
6160       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6161                         << AI.PotentialFreeCalls.size() << "\n");
6162       return false;
6163     }
6164     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6165     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6166     if (!DI) {
6167       LLVM_DEBUG(
6168           dbgs() << "[H2S] unique free call was not known as deallocation call "
6169                  << *UniqueFree << "\n");
6170       return false;
6171     }
6172     if (DI->MightFreeUnknownObjects) {
6173       LLVM_DEBUG(
6174           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6175       return false;
6176     }
6177     if (DI->PotentialAllocationCalls.empty())
6178       return true;
6179     if (DI->PotentialAllocationCalls.size() > 1) {
6180       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6181                         << DI->PotentialAllocationCalls.size()
6182                         << " different allocations\n");
6183       return false;
6184     }
6185     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6186       LLVM_DEBUG(
6187           dbgs()
6188           << "[H2S] unique free call not known to free this allocation but "
6189           << **DI->PotentialAllocationCalls.begin() << "\n");
6190       return false;
6191     }
6192     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6193     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6194       LLVM_DEBUG(
6195           dbgs()
6196           << "[H2S] unique free call might not be executed with the allocation "
6197           << *UniqueFree << "\n");
6198       return false;
6199     }
6200     return true;
6201   };
6202 
6203   auto UsesCheck = [&](AllocationInfo &AI) {
6204     bool ValidUsesOnly = true;
6205 
6206     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6207       Instruction *UserI = cast<Instruction>(U.getUser());
6208       if (isa<LoadInst>(UserI))
6209         return true;
6210       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6211         if (SI->getValueOperand() == U.get()) {
6212           LLVM_DEBUG(dbgs()
6213                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6214           ValidUsesOnly = false;
6215         } else {
6216           // A store into the malloc'ed memory is fine.
6217         }
6218         return true;
6219       }
6220       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6221         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6222           return true;
6223         if (DeallocationInfos.count(CB)) {
6224           AI.PotentialFreeCalls.insert(CB);
6225           return true;
6226         }
6227 
6228         unsigned ArgNo = CB->getArgOperandNo(&U);
6229 
6230         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6231             *this, IRPosition::callsite_argument(*CB, ArgNo),
6232             DepClassTy::OPTIONAL);
6233 
6234         // If a call site argument use is nofree, we are fine.
6235         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6236             *this, IRPosition::callsite_argument(*CB, ArgNo),
6237             DepClassTy::OPTIONAL);
6238 
6239         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6240         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6241         if (MaybeCaptured ||
6242             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6243              MaybeFreed)) {
6244           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6245 
6246           // Emit a missed remark if this is missed OpenMP globalization.
6247           auto Remark = [&](OptimizationRemarkMissed ORM) {
6248             return ORM
6249                    << "Could not move globalized variable to the stack. "
6250                       "Variable is potentially captured in call. Mark "
6251                       "parameter as `__attribute__((noescape))` to override.";
6252           };
6253 
6254           if (ValidUsesOnly &&
6255               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6256             A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark);
6257 
6258           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6259           ValidUsesOnly = false;
6260         }
6261         return true;
6262       }
6263 
6264       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6265           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6266         Follow = true;
6267         return true;
6268       }
6269       // Unknown user for which we can not track uses further (in a way that
6270       // makes sense).
6271       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6272       ValidUsesOnly = false;
6273       return true;
6274     };
6275     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6276       return false;
6277     return ValidUsesOnly;
6278   };
6279 
6280   // The actual update starts here. We look at all allocations and depending on
6281   // their status perform the appropriate check(s).
6282   for (auto &It : AllocationInfos) {
6283     AllocationInfo &AI = *It.second;
6284     if (AI.Status == AllocationInfo::INVALID)
6285       continue;
6286 
6287     if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6288       Optional<APInt> APAlign = getAPInt(A, *this, *Align);
6289       if (!APAlign) {
6290         // Can't generate an alloca which respects the required alignment
6291         // on the allocation.
6292         LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6293                           << "\n");
6294         AI.Status = AllocationInfo::INVALID;
6295         Changed = ChangeStatus::CHANGED;
6296         continue;
6297       }
6298       if (APAlign->ugt(llvm::Value::MaximumAlignment) ||
6299           !APAlign->isPowerOf2()) {
6300         LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign
6301                           << "\n");
6302         AI.Status = AllocationInfo::INVALID;
6303         Changed = ChangeStatus::CHANGED;
6304         continue;
6305       }
6306     }
6307 
6308     Optional<APInt> Size = getSize(A, *this, AI);
6309     if (MaxHeapToStackSize != -1) {
6310       if (!Size || Size.value().ugt(MaxHeapToStackSize)) {
6311         LLVM_DEBUG({
6312           if (!Size)
6313             dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
6314           else
6315             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6316                    << MaxHeapToStackSize << "\n";
6317         });
6318 
6319         AI.Status = AllocationInfo::INVALID;
6320         Changed = ChangeStatus::CHANGED;
6321         continue;
6322       }
6323     }
6324 
6325     switch (AI.Status) {
6326     case AllocationInfo::STACK_DUE_TO_USE:
6327       if (UsesCheck(AI))
6328         break;
6329       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6330       LLVM_FALLTHROUGH;
6331     case AllocationInfo::STACK_DUE_TO_FREE:
6332       if (FreeCheck(AI))
6333         break;
6334       AI.Status = AllocationInfo::INVALID;
6335       Changed = ChangeStatus::CHANGED;
6336       break;
6337     case AllocationInfo::INVALID:
6338       llvm_unreachable("Invalid allocations should never reach this point!");
6339     };
6340 
6341     // Check if we still think we can move it into the entry block.
6342     if (AI.MoveAllocaIntoEntry &&
6343         (!Size.has_value() || IsInLoop(*AI.CB->getParent())))
6344       AI.MoveAllocaIntoEntry = false;
6345   }
6346 
6347   return Changed;
6348 }
6349 } // namespace
6350 
6351 /// ----------------------- Privatizable Pointers ------------------------------
6352 namespace {
6353 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6354   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6355       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6356 
6357   ChangeStatus indicatePessimisticFixpoint() override {
6358     AAPrivatizablePtr::indicatePessimisticFixpoint();
6359     PrivatizableType = nullptr;
6360     return ChangeStatus::CHANGED;
6361   }
6362 
6363   /// Identify the type we can chose for a private copy of the underlying
6364   /// argument. None means it is not clear yet, nullptr means there is none.
6365   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6366 
6367   /// Return a privatizable type that encloses both T0 and T1.
6368   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6369   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6370     if (!T0)
6371       return T1;
6372     if (!T1)
6373       return T0;
6374     if (T0 == T1)
6375       return T0;
6376     return nullptr;
6377   }
6378 
6379   Optional<Type *> getPrivatizableType() const override {
6380     return PrivatizableType;
6381   }
6382 
6383   const std::string getAsStr() const override {
6384     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6385   }
6386 
6387 protected:
6388   Optional<Type *> PrivatizableType;
6389 };
6390 
6391 // TODO: Do this for call site arguments (probably also other values) as well.
6392 
6393 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6394   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6395       : AAPrivatizablePtrImpl(IRP, A) {}
6396 
6397   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6398   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6399     // If this is a byval argument and we know all the call sites (so we can
6400     // rewrite them), there is no need to check them explicitly.
6401     bool UsedAssumedInformation = false;
6402     SmallVector<Attribute, 1> Attrs;
6403     getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true);
6404     if (!Attrs.empty() &&
6405         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6406                                true, UsedAssumedInformation))
6407       return Attrs[0].getValueAsType();
6408 
6409     Optional<Type *> Ty;
6410     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6411 
6412     // Make sure the associated call site argument has the same type at all call
6413     // sites and it is an allocation we know is safe to privatize, for now that
6414     // means we only allow alloca instructions.
6415     // TODO: We can additionally analyze the accesses in the callee to  create
6416     //       the type from that information instead. That is a little more
6417     //       involved and will be done in a follow up patch.
6418     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6419       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6420       // Check if a coresponding argument was found or if it is one not
6421       // associated (which can happen for callback calls).
6422       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6423         return false;
6424 
6425       // Check that all call sites agree on a type.
6426       auto &PrivCSArgAA =
6427           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6428       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6429 
6430       LLVM_DEBUG({
6431         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6432         if (CSTy && CSTy.value())
6433           CSTy.value()->print(dbgs());
6434         else if (CSTy)
6435           dbgs() << "<nullptr>";
6436         else
6437           dbgs() << "<none>";
6438       });
6439 
6440       Ty = combineTypes(Ty, CSTy);
6441 
6442       LLVM_DEBUG({
6443         dbgs() << " : New Type: ";
6444         if (Ty && Ty.value())
6445           Ty.value()->print(dbgs());
6446         else if (Ty)
6447           dbgs() << "<nullptr>";
6448         else
6449           dbgs() << "<none>";
6450         dbgs() << "\n";
6451       });
6452 
6453       return !Ty || Ty.value();
6454     };
6455 
6456     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6457                                 UsedAssumedInformation))
6458       return nullptr;
6459     return Ty;
6460   }
6461 
6462   /// See AbstractAttribute::updateImpl(...).
6463   ChangeStatus updateImpl(Attributor &A) override {
6464     PrivatizableType = identifyPrivatizableType(A);
6465     if (!PrivatizableType)
6466       return ChangeStatus::UNCHANGED;
6467     if (!PrivatizableType.value())
6468       return indicatePessimisticFixpoint();
6469 
6470     // The dependence is optional so we don't give up once we give up on the
6471     // alignment.
6472     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6473                         DepClassTy::OPTIONAL);
6474 
6475     // Avoid arguments with padding for now.
6476     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6477         !isDenselyPacked(*PrivatizableType, A.getInfoCache().getDL())) {
6478       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6479       return indicatePessimisticFixpoint();
6480     }
6481 
6482     // Collect the types that will replace the privatizable type in the function
6483     // signature.
6484     SmallVector<Type *, 16> ReplacementTypes;
6485     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
6486 
6487     // Verify callee and caller agree on how the promoted argument would be
6488     // passed.
6489     Function &Fn = *getIRPosition().getAnchorScope();
6490     const auto *TTI =
6491         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6492     if (!TTI) {
6493       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
6494                         << Fn.getName() << "\n");
6495       return indicatePessimisticFixpoint();
6496     }
6497 
6498     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6499       CallBase *CB = ACS.getInstruction();
6500       return TTI->areTypesABICompatible(
6501           CB->getCaller(), CB->getCalledFunction(), ReplacementTypes);
6502     };
6503     bool UsedAssumedInformation = false;
6504     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6505                                 UsedAssumedInformation)) {
6506       LLVM_DEBUG(
6507           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6508                  << Fn.getName() << "\n");
6509       return indicatePessimisticFixpoint();
6510     }
6511 
6512     // Register a rewrite of the argument.
6513     Argument *Arg = getAssociatedArgument();
6514     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6515       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6516       return indicatePessimisticFixpoint();
6517     }
6518 
6519     unsigned ArgNo = Arg->getArgNo();
6520 
6521     // Helper to check if for the given call site the associated argument is
6522     // passed to a callback where the privatization would be different.
6523     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6524       SmallVector<const Use *, 4> CallbackUses;
6525       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6526       for (const Use *U : CallbackUses) {
6527         AbstractCallSite CBACS(U);
6528         assert(CBACS && CBACS.isCallbackCall());
6529         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6530           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6531 
6532           LLVM_DEBUG({
6533             dbgs()
6534                 << "[AAPrivatizablePtr] Argument " << *Arg
6535                 << "check if can be privatized in the context of its parent ("
6536                 << Arg->getParent()->getName()
6537                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6538                    "callback ("
6539                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6540                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6541                 << CBACS.getCallArgOperand(CBArg) << " vs "
6542                 << CB.getArgOperand(ArgNo) << "\n"
6543                 << "[AAPrivatizablePtr] " << CBArg << " : "
6544                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6545           });
6546 
6547           if (CBArgNo != int(ArgNo))
6548             continue;
6549           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6550               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6551           if (CBArgPrivAA.isValidState()) {
6552             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6553             if (!CBArgPrivTy)
6554               continue;
6555             if (CBArgPrivTy.value() == PrivatizableType)
6556               continue;
6557           }
6558 
6559           LLVM_DEBUG({
6560             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6561                    << " cannot be privatized in the context of its parent ("
6562                    << Arg->getParent()->getName()
6563                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6564                       "callback ("
6565                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6566                    << ").\n[AAPrivatizablePtr] for which the argument "
6567                       "privatization is not compatible.\n";
6568           });
6569           return false;
6570         }
6571       }
6572       return true;
6573     };
6574 
6575     // Helper to check if for the given call site the associated argument is
6576     // passed to a direct call where the privatization would be different.
6577     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6578       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6579       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6580       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
6581              "Expected a direct call operand for callback call operand");
6582 
6583       LLVM_DEBUG({
6584         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6585                << " check if be privatized in the context of its parent ("
6586                << Arg->getParent()->getName()
6587                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6588                   "direct call of ("
6589                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6590                << ").\n";
6591       });
6592 
6593       Function *DCCallee = DC->getCalledFunction();
6594       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6595         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6596             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6597             DepClassTy::REQUIRED);
6598         if (DCArgPrivAA.isValidState()) {
6599           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6600           if (!DCArgPrivTy)
6601             return true;
6602           if (DCArgPrivTy.value() == PrivatizableType)
6603             return true;
6604         }
6605       }
6606 
6607       LLVM_DEBUG({
6608         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6609                << " cannot be privatized in the context of its parent ("
6610                << Arg->getParent()->getName()
6611                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6612                   "direct call of ("
6613                << ACS.getInstruction()->getCalledFunction()->getName()
6614                << ").\n[AAPrivatizablePtr] for which the argument "
6615                   "privatization is not compatible.\n";
6616       });
6617       return false;
6618     };
6619 
6620     // Helper to check if the associated argument is used at the given abstract
6621     // call site in a way that is incompatible with the privatization assumed
6622     // here.
6623     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6624       if (ACS.isDirectCall())
6625         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6626       if (ACS.isCallbackCall())
6627         return IsCompatiblePrivArgOfDirectCS(ACS);
6628       return false;
6629     };
6630 
6631     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6632                                 UsedAssumedInformation))
6633       return indicatePessimisticFixpoint();
6634 
6635     return ChangeStatus::UNCHANGED;
6636   }
6637 
6638   /// Given a type to private \p PrivType, collect the constituates (which are
6639   /// used) in \p ReplacementTypes.
6640   static void
6641   identifyReplacementTypes(Type *PrivType,
6642                            SmallVectorImpl<Type *> &ReplacementTypes) {
6643     // TODO: For now we expand the privatization type to the fullest which can
6644     //       lead to dead arguments that need to be removed later.
6645     assert(PrivType && "Expected privatizable type!");
6646 
6647     // Traverse the type, extract constituate types on the outermost level.
6648     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6649       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6650         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6651     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6652       ReplacementTypes.append(PrivArrayType->getNumElements(),
6653                               PrivArrayType->getElementType());
6654     } else {
6655       ReplacementTypes.push_back(PrivType);
6656     }
6657   }
6658 
6659   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6660   /// The values needed are taken from the arguments of \p F starting at
6661   /// position \p ArgNo.
6662   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6663                                    unsigned ArgNo, Instruction &IP) {
6664     assert(PrivType && "Expected privatizable type!");
6665 
6666     IRBuilder<NoFolder> IRB(&IP);
6667     const DataLayout &DL = F.getParent()->getDataLayout();
6668 
6669     // Traverse the type, build GEPs and stores.
6670     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6671       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6672       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6673         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6674         Value *Ptr =
6675             constructPointer(PointeeTy, PrivType, &Base,
6676                              PrivStructLayout->getElementOffset(u), IRB, DL);
6677         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6678       }
6679     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6680       Type *PointeeTy = PrivArrayType->getElementType();
6681       Type *PointeePtrTy = PointeeTy->getPointerTo();
6682       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6683       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6684         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6685                                       u * PointeeTySize, IRB, DL);
6686         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6687       }
6688     } else {
6689       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6690     }
6691   }
6692 
6693   /// Extract values from \p Base according to the type \p PrivType at the
6694   /// call position \p ACS. The values are appended to \p ReplacementValues.
6695   void createReplacementValues(Align Alignment, Type *PrivType,
6696                                AbstractCallSite ACS, Value *Base,
6697                                SmallVectorImpl<Value *> &ReplacementValues) {
6698     assert(Base && "Expected base value!");
6699     assert(PrivType && "Expected privatizable type!");
6700     Instruction *IP = ACS.getInstruction();
6701 
6702     IRBuilder<NoFolder> IRB(IP);
6703     const DataLayout &DL = IP->getModule()->getDataLayout();
6704 
6705     Type *PrivPtrType = PrivType->getPointerTo();
6706     if (Base->getType() != PrivPtrType)
6707       Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6708           Base, PrivPtrType, "", ACS.getInstruction());
6709 
6710     // Traverse the type, build GEPs and loads.
6711     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6712       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6713       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6714         Type *PointeeTy = PrivStructType->getElementType(u);
6715         Value *Ptr =
6716             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6717                              PrivStructLayout->getElementOffset(u), IRB, DL);
6718         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6719         L->setAlignment(Alignment);
6720         ReplacementValues.push_back(L);
6721       }
6722     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6723       Type *PointeeTy = PrivArrayType->getElementType();
6724       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6725       Type *PointeePtrTy = PointeeTy->getPointerTo();
6726       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6727         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6728                                       u * PointeeTySize, IRB, DL);
6729         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6730         L->setAlignment(Alignment);
6731         ReplacementValues.push_back(L);
6732       }
6733     } else {
6734       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
6735       L->setAlignment(Alignment);
6736       ReplacementValues.push_back(L);
6737     }
6738   }
6739 
6740   /// See AbstractAttribute::manifest(...)
6741   ChangeStatus manifest(Attributor &A) override {
6742     if (!PrivatizableType)
6743       return ChangeStatus::UNCHANGED;
6744     assert(PrivatizableType.value() && "Expected privatizable type!");
6745 
6746     // Collect all tail calls in the function as we cannot allow new allocas to
6747     // escape into tail recursion.
6748     // TODO: Be smarter about new allocas escaping into tail calls.
6749     SmallVector<CallInst *, 16> TailCalls;
6750     bool UsedAssumedInformation = false;
6751     if (!A.checkForAllInstructions(
6752             [&](Instruction &I) {
6753               CallInst &CI = cast<CallInst>(I);
6754               if (CI.isTailCall())
6755                 TailCalls.push_back(&CI);
6756               return true;
6757             },
6758             *this, {Instruction::Call}, UsedAssumedInformation))
6759       return ChangeStatus::UNCHANGED;
6760 
6761     Argument *Arg = getAssociatedArgument();
6762     // Query AAAlign attribute for alignment of associated argument to
6763     // determine the best alignment of loads.
6764     const auto &AlignAA =
6765         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
6766 
6767     // Callback to repair the associated function. A new alloca is placed at the
6768     // beginning and initialized with the values passed through arguments. The
6769     // new alloca replaces the use of the old pointer argument.
6770     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
6771         [=](const Attributor::ArgumentReplacementInfo &ARI,
6772             Function &ReplacementFn, Function::arg_iterator ArgIt) {
6773           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
6774           Instruction *IP = &*EntryBB.getFirstInsertionPt();
6775           const DataLayout &DL = IP->getModule()->getDataLayout();
6776           unsigned AS = DL.getAllocaAddrSpace();
6777           Instruction *AI = new AllocaInst(PrivatizableType.value(), AS,
6778                                            Arg->getName() + ".priv", IP);
6779           createInitialization(PrivatizableType.value(), *AI, ReplacementFn,
6780                                ArgIt->getArgNo(), *IP);
6781 
6782           if (AI->getType() != Arg->getType())
6783             AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6784                 AI, Arg->getType(), "", IP);
6785           Arg->replaceAllUsesWith(AI);
6786 
6787           for (CallInst *CI : TailCalls)
6788             CI->setTailCall(false);
6789         };
6790 
6791     // Callback to repair a call site of the associated function. The elements
6792     // of the privatizable type are loaded prior to the call and passed to the
6793     // new function version.
6794     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
6795         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
6796                       AbstractCallSite ACS,
6797                       SmallVectorImpl<Value *> &NewArgOperands) {
6798           // When no alignment is specified for the load instruction,
6799           // natural alignment is assumed.
6800           createReplacementValues(
6801               AlignAA.getAssumedAlign(), *PrivatizableType, ACS,
6802               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
6803               NewArgOperands);
6804         };
6805 
6806     // Collect the types that will replace the privatizable type in the function
6807     // signature.
6808     SmallVector<Type *, 16> ReplacementTypes;
6809     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
6810 
6811     // Register a rewrite of the argument.
6812     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
6813                                            std::move(FnRepairCB),
6814                                            std::move(ACSRepairCB)))
6815       return ChangeStatus::CHANGED;
6816     return ChangeStatus::UNCHANGED;
6817   }
6818 
6819   /// See AbstractAttribute::trackStatistics()
6820   void trackStatistics() const override {
6821     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
6822   }
6823 };
6824 
6825 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
6826   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
6827       : AAPrivatizablePtrImpl(IRP, A) {}
6828 
6829   /// See AbstractAttribute::initialize(...).
6830   virtual void initialize(Attributor &A) override {
6831     // TODO: We can privatize more than arguments.
6832     indicatePessimisticFixpoint();
6833   }
6834 
6835   ChangeStatus updateImpl(Attributor &A) override {
6836     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
6837                      "updateImpl will not be called");
6838   }
6839 
6840   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6841   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6842     Value *Obj = getUnderlyingObject(&getAssociatedValue());
6843     if (!Obj) {
6844       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
6845       return nullptr;
6846     }
6847 
6848     if (auto *AI = dyn_cast<AllocaInst>(Obj))
6849       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
6850         if (CI->isOne())
6851           return AI->getAllocatedType();
6852     if (auto *Arg = dyn_cast<Argument>(Obj)) {
6853       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
6854           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
6855       if (PrivArgAA.isAssumedPrivatizablePtr())
6856         return PrivArgAA.getPrivatizableType();
6857     }
6858 
6859     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
6860                          "alloca nor privatizable argument: "
6861                       << *Obj << "!\n");
6862     return nullptr;
6863   }
6864 
6865   /// See AbstractAttribute::trackStatistics()
6866   void trackStatistics() const override {
6867     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
6868   }
6869 };
6870 
6871 struct AAPrivatizablePtrCallSiteArgument final
6872     : public AAPrivatizablePtrFloating {
6873   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
6874       : AAPrivatizablePtrFloating(IRP, A) {}
6875 
6876   /// See AbstractAttribute::initialize(...).
6877   void initialize(Attributor &A) override {
6878     if (getIRPosition().hasAttr(Attribute::ByVal))
6879       indicateOptimisticFixpoint();
6880   }
6881 
6882   /// See AbstractAttribute::updateImpl(...).
6883   ChangeStatus updateImpl(Attributor &A) override {
6884     PrivatizableType = identifyPrivatizableType(A);
6885     if (!PrivatizableType)
6886       return ChangeStatus::UNCHANGED;
6887     if (!PrivatizableType.value())
6888       return indicatePessimisticFixpoint();
6889 
6890     const IRPosition &IRP = getIRPosition();
6891     auto &NoCaptureAA =
6892         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
6893     if (!NoCaptureAA.isAssumedNoCapture()) {
6894       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
6895       return indicatePessimisticFixpoint();
6896     }
6897 
6898     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
6899     if (!NoAliasAA.isAssumedNoAlias()) {
6900       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
6901       return indicatePessimisticFixpoint();
6902     }
6903 
6904     bool IsKnown;
6905     if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) {
6906       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
6907       return indicatePessimisticFixpoint();
6908     }
6909 
6910     return ChangeStatus::UNCHANGED;
6911   }
6912 
6913   /// See AbstractAttribute::trackStatistics()
6914   void trackStatistics() const override {
6915     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
6916   }
6917 };
6918 
6919 struct AAPrivatizablePtrCallSiteReturned final
6920     : public AAPrivatizablePtrFloating {
6921   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
6922       : AAPrivatizablePtrFloating(IRP, A) {}
6923 
6924   /// See AbstractAttribute::initialize(...).
6925   void initialize(Attributor &A) override {
6926     // TODO: We can privatize more than arguments.
6927     indicatePessimisticFixpoint();
6928   }
6929 
6930   /// See AbstractAttribute::trackStatistics()
6931   void trackStatistics() const override {
6932     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
6933   }
6934 };
6935 
6936 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
6937   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
6938       : AAPrivatizablePtrFloating(IRP, A) {}
6939 
6940   /// See AbstractAttribute::initialize(...).
6941   void initialize(Attributor &A) override {
6942     // TODO: We can privatize more than arguments.
6943     indicatePessimisticFixpoint();
6944   }
6945 
6946   /// See AbstractAttribute::trackStatistics()
6947   void trackStatistics() const override {
6948     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
6949   }
6950 };
6951 } // namespace
6952 
6953 /// -------------------- Memory Behavior Attributes ----------------------------
6954 /// Includes read-none, read-only, and write-only.
6955 /// ----------------------------------------------------------------------------
6956 namespace {
6957 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
6958   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
6959       : AAMemoryBehavior(IRP, A) {}
6960 
6961   /// See AbstractAttribute::initialize(...).
6962   void initialize(Attributor &A) override {
6963     intersectAssumedBits(BEST_STATE);
6964     getKnownStateFromValue(getIRPosition(), getState());
6965     AAMemoryBehavior::initialize(A);
6966   }
6967 
6968   /// Return the memory behavior information encoded in the IR for \p IRP.
6969   static void getKnownStateFromValue(const IRPosition &IRP,
6970                                      BitIntegerState &State,
6971                                      bool IgnoreSubsumingPositions = false) {
6972     SmallVector<Attribute, 2> Attrs;
6973     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6974     for (const Attribute &Attr : Attrs) {
6975       switch (Attr.getKindAsEnum()) {
6976       case Attribute::ReadNone:
6977         State.addKnownBits(NO_ACCESSES);
6978         break;
6979       case Attribute::ReadOnly:
6980         State.addKnownBits(NO_WRITES);
6981         break;
6982       case Attribute::WriteOnly:
6983         State.addKnownBits(NO_READS);
6984         break;
6985       default:
6986         llvm_unreachable("Unexpected attribute!");
6987       }
6988     }
6989 
6990     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
6991       if (!I->mayReadFromMemory())
6992         State.addKnownBits(NO_READS);
6993       if (!I->mayWriteToMemory())
6994         State.addKnownBits(NO_WRITES);
6995     }
6996   }
6997 
6998   /// See AbstractAttribute::getDeducedAttributes(...).
6999   void getDeducedAttributes(LLVMContext &Ctx,
7000                             SmallVectorImpl<Attribute> &Attrs) const override {
7001     assert(Attrs.size() == 0);
7002     if (isAssumedReadNone())
7003       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7004     else if (isAssumedReadOnly())
7005       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
7006     else if (isAssumedWriteOnly())
7007       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
7008     assert(Attrs.size() <= 1);
7009   }
7010 
7011   /// See AbstractAttribute::manifest(...).
7012   ChangeStatus manifest(Attributor &A) override {
7013     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
7014       return ChangeStatus::UNCHANGED;
7015 
7016     const IRPosition &IRP = getIRPosition();
7017 
7018     // Check if we would improve the existing attributes first.
7019     SmallVector<Attribute, 4> DeducedAttrs;
7020     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7021     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7022           return IRP.hasAttr(Attr.getKindAsEnum(),
7023                              /* IgnoreSubsumingPositions */ true);
7024         }))
7025       return ChangeStatus::UNCHANGED;
7026 
7027     // Clear existing attributes.
7028     IRP.removeAttrs(AttrKinds);
7029 
7030     // Use the generic manifest method.
7031     return IRAttribute::manifest(A);
7032   }
7033 
7034   /// See AbstractState::getAsStr().
7035   const std::string getAsStr() const override {
7036     if (isAssumedReadNone())
7037       return "readnone";
7038     if (isAssumedReadOnly())
7039       return "readonly";
7040     if (isAssumedWriteOnly())
7041       return "writeonly";
7042     return "may-read/write";
7043   }
7044 
7045   /// The set of IR attributes AAMemoryBehavior deals with.
7046   static const Attribute::AttrKind AttrKinds[3];
7047 };
7048 
7049 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
7050     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
7051 
7052 /// Memory behavior attribute for a floating value.
7053 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
7054   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
7055       : AAMemoryBehaviorImpl(IRP, A) {}
7056 
7057   /// See AbstractAttribute::updateImpl(...).
7058   ChangeStatus updateImpl(Attributor &A) override;
7059 
7060   /// See AbstractAttribute::trackStatistics()
7061   void trackStatistics() const override {
7062     if (isAssumedReadNone())
7063       STATS_DECLTRACK_FLOATING_ATTR(readnone)
7064     else if (isAssumedReadOnly())
7065       STATS_DECLTRACK_FLOATING_ATTR(readonly)
7066     else if (isAssumedWriteOnly())
7067       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7068   }
7069 
7070 private:
7071   /// Return true if users of \p UserI might access the underlying
7072   /// variable/location described by \p U and should therefore be analyzed.
7073   bool followUsersOfUseIn(Attributor &A, const Use &U,
7074                           const Instruction *UserI);
7075 
7076   /// Update the state according to the effect of use \p U in \p UserI.
7077   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7078 };
7079 
7080 /// Memory behavior attribute for function argument.
7081 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7082   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7083       : AAMemoryBehaviorFloating(IRP, A) {}
7084 
7085   /// See AbstractAttribute::initialize(...).
7086   void initialize(Attributor &A) override {
7087     intersectAssumedBits(BEST_STATE);
7088     const IRPosition &IRP = getIRPosition();
7089     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7090     // can query it when we use has/getAttr. That would allow us to reuse the
7091     // initialize of the base class here.
7092     bool HasByVal =
7093         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7094     getKnownStateFromValue(IRP, getState(),
7095                            /* IgnoreSubsumingPositions */ HasByVal);
7096 
7097     // Initialize the use vector with all direct uses of the associated value.
7098     Argument *Arg = getAssociatedArgument();
7099     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7100       indicatePessimisticFixpoint();
7101   }
7102 
7103   ChangeStatus manifest(Attributor &A) override {
7104     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7105     if (!getAssociatedValue().getType()->isPointerTy())
7106       return ChangeStatus::UNCHANGED;
7107 
7108     // TODO: From readattrs.ll: "inalloca parameters are always
7109     //                           considered written"
7110     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7111       removeKnownBits(NO_WRITES);
7112       removeAssumedBits(NO_WRITES);
7113     }
7114     return AAMemoryBehaviorFloating::manifest(A);
7115   }
7116 
7117   /// See AbstractAttribute::trackStatistics()
7118   void trackStatistics() const override {
7119     if (isAssumedReadNone())
7120       STATS_DECLTRACK_ARG_ATTR(readnone)
7121     else if (isAssumedReadOnly())
7122       STATS_DECLTRACK_ARG_ATTR(readonly)
7123     else if (isAssumedWriteOnly())
7124       STATS_DECLTRACK_ARG_ATTR(writeonly)
7125   }
7126 };
7127 
7128 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7129   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7130       : AAMemoryBehaviorArgument(IRP, A) {}
7131 
7132   /// See AbstractAttribute::initialize(...).
7133   void initialize(Attributor &A) override {
7134     // If we don't have an associated attribute this is either a variadic call
7135     // or an indirect call, either way, nothing to do here.
7136     Argument *Arg = getAssociatedArgument();
7137     if (!Arg) {
7138       indicatePessimisticFixpoint();
7139       return;
7140     }
7141     if (Arg->hasByValAttr()) {
7142       addKnownBits(NO_WRITES);
7143       removeKnownBits(NO_READS);
7144       removeAssumedBits(NO_READS);
7145     }
7146     AAMemoryBehaviorArgument::initialize(A);
7147     if (getAssociatedFunction()->isDeclaration())
7148       indicatePessimisticFixpoint();
7149   }
7150 
7151   /// See AbstractAttribute::updateImpl(...).
7152   ChangeStatus updateImpl(Attributor &A) override {
7153     // TODO: Once we have call site specific value information we can provide
7154     //       call site specific liveness liveness information and then it makes
7155     //       sense to specialize attributes for call sites arguments instead of
7156     //       redirecting requests to the callee argument.
7157     Argument *Arg = getAssociatedArgument();
7158     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7159     auto &ArgAA =
7160         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7161     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7162   }
7163 
7164   /// See AbstractAttribute::trackStatistics()
7165   void trackStatistics() const override {
7166     if (isAssumedReadNone())
7167       STATS_DECLTRACK_CSARG_ATTR(readnone)
7168     else if (isAssumedReadOnly())
7169       STATS_DECLTRACK_CSARG_ATTR(readonly)
7170     else if (isAssumedWriteOnly())
7171       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7172   }
7173 };
7174 
7175 /// Memory behavior attribute for a call site return position.
7176 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7177   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7178       : AAMemoryBehaviorFloating(IRP, A) {}
7179 
7180   /// See AbstractAttribute::initialize(...).
7181   void initialize(Attributor &A) override {
7182     AAMemoryBehaviorImpl::initialize(A);
7183     Function *F = getAssociatedFunction();
7184     if (!F || F->isDeclaration())
7185       indicatePessimisticFixpoint();
7186   }
7187 
7188   /// See AbstractAttribute::manifest(...).
7189   ChangeStatus manifest(Attributor &A) override {
7190     // We do not annotate returned values.
7191     return ChangeStatus::UNCHANGED;
7192   }
7193 
7194   /// See AbstractAttribute::trackStatistics()
7195   void trackStatistics() const override {}
7196 };
7197 
7198 /// An AA to represent the memory behavior function attributes.
7199 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7200   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7201       : AAMemoryBehaviorImpl(IRP, A) {}
7202 
7203   /// See AbstractAttribute::updateImpl(Attributor &A).
7204   virtual ChangeStatus updateImpl(Attributor &A) override;
7205 
7206   /// See AbstractAttribute::manifest(...).
7207   ChangeStatus manifest(Attributor &A) override {
7208     Function &F = cast<Function>(getAnchorValue());
7209     if (isAssumedReadNone()) {
7210       F.removeFnAttr(Attribute::ArgMemOnly);
7211       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7212       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7213     }
7214     return AAMemoryBehaviorImpl::manifest(A);
7215   }
7216 
7217   /// See AbstractAttribute::trackStatistics()
7218   void trackStatistics() const override {
7219     if (isAssumedReadNone())
7220       STATS_DECLTRACK_FN_ATTR(readnone)
7221     else if (isAssumedReadOnly())
7222       STATS_DECLTRACK_FN_ATTR(readonly)
7223     else if (isAssumedWriteOnly())
7224       STATS_DECLTRACK_FN_ATTR(writeonly)
7225   }
7226 };
7227 
7228 /// AAMemoryBehavior attribute for call sites.
7229 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7230   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7231       : AAMemoryBehaviorImpl(IRP, A) {}
7232 
7233   /// See AbstractAttribute::initialize(...).
7234   void initialize(Attributor &A) override {
7235     AAMemoryBehaviorImpl::initialize(A);
7236     Function *F = getAssociatedFunction();
7237     if (!F || F->isDeclaration())
7238       indicatePessimisticFixpoint();
7239   }
7240 
7241   /// See AbstractAttribute::updateImpl(...).
7242   ChangeStatus updateImpl(Attributor &A) override {
7243     // TODO: Once we have call site specific value information we can provide
7244     //       call site specific liveness liveness information and then it makes
7245     //       sense to specialize attributes for call sites arguments instead of
7246     //       redirecting requests to the callee argument.
7247     Function *F = getAssociatedFunction();
7248     const IRPosition &FnPos = IRPosition::function(*F);
7249     auto &FnAA =
7250         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7251     return clampStateAndIndicateChange(getState(), FnAA.getState());
7252   }
7253 
7254   /// See AbstractAttribute::trackStatistics()
7255   void trackStatistics() const override {
7256     if (isAssumedReadNone())
7257       STATS_DECLTRACK_CS_ATTR(readnone)
7258     else if (isAssumedReadOnly())
7259       STATS_DECLTRACK_CS_ATTR(readonly)
7260     else if (isAssumedWriteOnly())
7261       STATS_DECLTRACK_CS_ATTR(writeonly)
7262   }
7263 };
7264 
7265 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7266 
7267   // The current assumed state used to determine a change.
7268   auto AssumedState = getAssumed();
7269 
7270   auto CheckRWInst = [&](Instruction &I) {
7271     // If the instruction has an own memory behavior state, use it to restrict
7272     // the local state. No further analysis is required as the other memory
7273     // state is as optimistic as it gets.
7274     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7275       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7276           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7277       intersectAssumedBits(MemBehaviorAA.getAssumed());
7278       return !isAtFixpoint();
7279     }
7280 
7281     // Remove access kind modifiers if necessary.
7282     if (I.mayReadFromMemory())
7283       removeAssumedBits(NO_READS);
7284     if (I.mayWriteToMemory())
7285       removeAssumedBits(NO_WRITES);
7286     return !isAtFixpoint();
7287   };
7288 
7289   bool UsedAssumedInformation = false;
7290   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7291                                           UsedAssumedInformation))
7292     return indicatePessimisticFixpoint();
7293 
7294   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7295                                         : ChangeStatus::UNCHANGED;
7296 }
7297 
7298 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7299 
7300   const IRPosition &IRP = getIRPosition();
7301   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7302   AAMemoryBehavior::StateType &S = getState();
7303 
7304   // First, check the function scope. We take the known information and we avoid
7305   // work if the assumed information implies the current assumed information for
7306   // this attribute. This is a valid for all but byval arguments.
7307   Argument *Arg = IRP.getAssociatedArgument();
7308   AAMemoryBehavior::base_t FnMemAssumedState =
7309       AAMemoryBehavior::StateType::getWorstState();
7310   if (!Arg || !Arg->hasByValAttr()) {
7311     const auto &FnMemAA =
7312         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7313     FnMemAssumedState = FnMemAA.getAssumed();
7314     S.addKnownBits(FnMemAA.getKnown());
7315     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7316       return ChangeStatus::UNCHANGED;
7317   }
7318 
7319   // The current assumed state used to determine a change.
7320   auto AssumedState = S.getAssumed();
7321 
7322   // Make sure the value is not captured (except through "return"), if
7323   // it is, any information derived would be irrelevant anyway as we cannot
7324   // check the potential aliases introduced by the capture. However, no need
7325   // to fall back to anythign less optimistic than the function state.
7326   const auto &ArgNoCaptureAA =
7327       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7328   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7329     S.intersectAssumedBits(FnMemAssumedState);
7330     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7331                                           : ChangeStatus::UNCHANGED;
7332   }
7333 
7334   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7335   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7336     Instruction *UserI = cast<Instruction>(U.getUser());
7337     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7338                       << " \n");
7339 
7340     // Droppable users, e.g., llvm::assume does not actually perform any action.
7341     if (UserI->isDroppable())
7342       return true;
7343 
7344     // Check if the users of UserI should also be visited.
7345     Follow = followUsersOfUseIn(A, U, UserI);
7346 
7347     // If UserI might touch memory we analyze the use in detail.
7348     if (UserI->mayReadOrWriteMemory())
7349       analyzeUseIn(A, U, UserI);
7350 
7351     return !isAtFixpoint();
7352   };
7353 
7354   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7355     return indicatePessimisticFixpoint();
7356 
7357   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7358                                         : ChangeStatus::UNCHANGED;
7359 }
7360 
7361 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7362                                                   const Instruction *UserI) {
7363   // The loaded value is unrelated to the pointer argument, no need to
7364   // follow the users of the load.
7365   if (isa<LoadInst>(UserI) || isa<ReturnInst>(UserI))
7366     return false;
7367 
7368   // By default we follow all uses assuming UserI might leak information on U,
7369   // we have special handling for call sites operands though.
7370   const auto *CB = dyn_cast<CallBase>(UserI);
7371   if (!CB || !CB->isArgOperand(&U))
7372     return true;
7373 
7374   // If the use is a call argument known not to be captured, the users of
7375   // the call do not need to be visited because they have to be unrelated to
7376   // the input. Note that this check is not trivial even though we disallow
7377   // general capturing of the underlying argument. The reason is that the
7378   // call might the argument "through return", which we allow and for which we
7379   // need to check call users.
7380   if (U.get()->getType()->isPointerTy()) {
7381     unsigned ArgNo = CB->getArgOperandNo(&U);
7382     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7383         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7384     return !ArgNoCaptureAA.isAssumedNoCapture();
7385   }
7386 
7387   return true;
7388 }
7389 
7390 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7391                                             const Instruction *UserI) {
7392   assert(UserI->mayReadOrWriteMemory());
7393 
7394   switch (UserI->getOpcode()) {
7395   default:
7396     // TODO: Handle all atomics and other side-effect operations we know of.
7397     break;
7398   case Instruction::Load:
7399     // Loads cause the NO_READS property to disappear.
7400     removeAssumedBits(NO_READS);
7401     return;
7402 
7403   case Instruction::Store:
7404     // Stores cause the NO_WRITES property to disappear if the use is the
7405     // pointer operand. Note that while capturing was taken care of somewhere
7406     // else we need to deal with stores of the value that is not looked through.
7407     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7408       removeAssumedBits(NO_WRITES);
7409     else
7410       indicatePessimisticFixpoint();
7411     return;
7412 
7413   case Instruction::Call:
7414   case Instruction::CallBr:
7415   case Instruction::Invoke: {
7416     // For call sites we look at the argument memory behavior attribute (this
7417     // could be recursive!) in order to restrict our own state.
7418     const auto *CB = cast<CallBase>(UserI);
7419 
7420     // Give up on operand bundles.
7421     if (CB->isBundleOperand(&U)) {
7422       indicatePessimisticFixpoint();
7423       return;
7424     }
7425 
7426     // Calling a function does read the function pointer, maybe write it if the
7427     // function is self-modifying.
7428     if (CB->isCallee(&U)) {
7429       removeAssumedBits(NO_READS);
7430       break;
7431     }
7432 
7433     // Adjust the possible access behavior based on the information on the
7434     // argument.
7435     IRPosition Pos;
7436     if (U.get()->getType()->isPointerTy())
7437       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7438     else
7439       Pos = IRPosition::callsite_function(*CB);
7440     const auto &MemBehaviorAA =
7441         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7442     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7443     // and at least "known".
7444     intersectAssumedBits(MemBehaviorAA.getAssumed());
7445     return;
7446   }
7447   };
7448 
7449   // Generally, look at the "may-properties" and adjust the assumed state if we
7450   // did not trigger special handling before.
7451   if (UserI->mayReadFromMemory())
7452     removeAssumedBits(NO_READS);
7453   if (UserI->mayWriteToMemory())
7454     removeAssumedBits(NO_WRITES);
7455 }
7456 } // namespace
7457 
7458 /// -------------------- Memory Locations Attributes ---------------------------
7459 /// Includes read-none, argmemonly, inaccessiblememonly,
7460 /// inaccessiblememorargmemonly
7461 /// ----------------------------------------------------------------------------
7462 
7463 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7464     AAMemoryLocation::MemoryLocationsKind MLK) {
7465   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7466     return "all memory";
7467   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7468     return "no memory";
7469   std::string S = "memory:";
7470   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7471     S += "stack,";
7472   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7473     S += "constant,";
7474   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7475     S += "internal global,";
7476   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7477     S += "external global,";
7478   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7479     S += "argument,";
7480   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7481     S += "inaccessible,";
7482   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7483     S += "malloced,";
7484   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7485     S += "unknown,";
7486   S.pop_back();
7487   return S;
7488 }
7489 
7490 namespace {
7491 struct AAMemoryLocationImpl : public AAMemoryLocation {
7492 
7493   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7494       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7495     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7496       AccessKind2Accesses[u] = nullptr;
7497   }
7498 
7499   ~AAMemoryLocationImpl() {
7500     // The AccessSets are allocated via a BumpPtrAllocator, we call
7501     // the destructor manually.
7502     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7503       if (AccessKind2Accesses[u])
7504         AccessKind2Accesses[u]->~AccessSet();
7505   }
7506 
7507   /// See AbstractAttribute::initialize(...).
7508   void initialize(Attributor &A) override {
7509     intersectAssumedBits(BEST_STATE);
7510     getKnownStateFromValue(A, getIRPosition(), getState());
7511     AAMemoryLocation::initialize(A);
7512   }
7513 
7514   /// Return the memory behavior information encoded in the IR for \p IRP.
7515   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7516                                      BitIntegerState &State,
7517                                      bool IgnoreSubsumingPositions = false) {
7518     // For internal functions we ignore `argmemonly` and
7519     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7520     // constant propagation. It is unclear if this is the best way but it is
7521     // unlikely this will cause real performance problems. If we are deriving
7522     // attributes for the anchor function we even remove the attribute in
7523     // addition to ignoring it.
7524     bool UseArgMemOnly = true;
7525     Function *AnchorFn = IRP.getAnchorScope();
7526     if (AnchorFn && A.isRunOn(*AnchorFn))
7527       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7528 
7529     SmallVector<Attribute, 2> Attrs;
7530     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7531     for (const Attribute &Attr : Attrs) {
7532       switch (Attr.getKindAsEnum()) {
7533       case Attribute::ReadNone:
7534         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7535         break;
7536       case Attribute::InaccessibleMemOnly:
7537         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7538         break;
7539       case Attribute::ArgMemOnly:
7540         if (UseArgMemOnly)
7541           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7542         else
7543           IRP.removeAttrs({Attribute::ArgMemOnly});
7544         break;
7545       case Attribute::InaccessibleMemOrArgMemOnly:
7546         if (UseArgMemOnly)
7547           State.addKnownBits(inverseLocation(
7548               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7549         else
7550           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7551         break;
7552       default:
7553         llvm_unreachable("Unexpected attribute!");
7554       }
7555     }
7556   }
7557 
7558   /// See AbstractAttribute::getDeducedAttributes(...).
7559   void getDeducedAttributes(LLVMContext &Ctx,
7560                             SmallVectorImpl<Attribute> &Attrs) const override {
7561     assert(Attrs.size() == 0);
7562     if (isAssumedReadNone()) {
7563       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7564     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7565       if (isAssumedInaccessibleMemOnly())
7566         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7567       else if (isAssumedArgMemOnly())
7568         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7569       else if (isAssumedInaccessibleOrArgMemOnly())
7570         Attrs.push_back(
7571             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7572     }
7573     assert(Attrs.size() <= 1);
7574   }
7575 
7576   /// See AbstractAttribute::manifest(...).
7577   ChangeStatus manifest(Attributor &A) override {
7578     const IRPosition &IRP = getIRPosition();
7579 
7580     // Check if we would improve the existing attributes first.
7581     SmallVector<Attribute, 4> DeducedAttrs;
7582     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7583     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7584           return IRP.hasAttr(Attr.getKindAsEnum(),
7585                              /* IgnoreSubsumingPositions */ true);
7586         }))
7587       return ChangeStatus::UNCHANGED;
7588 
7589     // Clear existing attributes.
7590     IRP.removeAttrs(AttrKinds);
7591     if (isAssumedReadNone())
7592       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7593 
7594     // Use the generic manifest method.
7595     return IRAttribute::manifest(A);
7596   }
7597 
7598   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7599   bool checkForAllAccessesToMemoryKind(
7600       function_ref<bool(const Instruction *, const Value *, AccessKind,
7601                         MemoryLocationsKind)>
7602           Pred,
7603       MemoryLocationsKind RequestedMLK) const override {
7604     if (!isValidState())
7605       return false;
7606 
7607     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7608     if (AssumedMLK == NO_LOCATIONS)
7609       return true;
7610 
7611     unsigned Idx = 0;
7612     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7613          CurMLK *= 2, ++Idx) {
7614       if (CurMLK & RequestedMLK)
7615         continue;
7616 
7617       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7618         for (const AccessInfo &AI : *Accesses)
7619           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7620             return false;
7621     }
7622 
7623     return true;
7624   }
7625 
7626   ChangeStatus indicatePessimisticFixpoint() override {
7627     // If we give up and indicate a pessimistic fixpoint this instruction will
7628     // become an access for all potential access kinds:
7629     // TODO: Add pointers for argmemonly and globals to improve the results of
7630     //       checkForAllAccessesToMemoryKind.
7631     bool Changed = false;
7632     MemoryLocationsKind KnownMLK = getKnown();
7633     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7634     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7635       if (!(CurMLK & KnownMLK))
7636         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7637                                   getAccessKindFromInst(I));
7638     return AAMemoryLocation::indicatePessimisticFixpoint();
7639   }
7640 
7641 protected:
7642   /// Helper struct to tie together an instruction that has a read or write
7643   /// effect with the pointer it accesses (if any).
7644   struct AccessInfo {
7645 
7646     /// The instruction that caused the access.
7647     const Instruction *I;
7648 
7649     /// The base pointer that is accessed, or null if unknown.
7650     const Value *Ptr;
7651 
7652     /// The kind of access (read/write/read+write).
7653     AccessKind Kind;
7654 
7655     bool operator==(const AccessInfo &RHS) const {
7656       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7657     }
7658     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7659       if (LHS.I != RHS.I)
7660         return LHS.I < RHS.I;
7661       if (LHS.Ptr != RHS.Ptr)
7662         return LHS.Ptr < RHS.Ptr;
7663       if (LHS.Kind != RHS.Kind)
7664         return LHS.Kind < RHS.Kind;
7665       return false;
7666     }
7667   };
7668 
7669   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7670   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7671   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7672   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7673 
7674   /// Categorize the pointer arguments of CB that might access memory in
7675   /// AccessedLoc and update the state and access map accordingly.
7676   void
7677   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7678                                      AAMemoryLocation::StateType &AccessedLocs,
7679                                      bool &Changed);
7680 
7681   /// Return the kind(s) of location that may be accessed by \p V.
7682   AAMemoryLocation::MemoryLocationsKind
7683   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7684 
7685   /// Return the access kind as determined by \p I.
7686   AccessKind getAccessKindFromInst(const Instruction *I) {
7687     AccessKind AK = READ_WRITE;
7688     if (I) {
7689       AK = I->mayReadFromMemory() ? READ : NONE;
7690       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7691     }
7692     return AK;
7693   }
7694 
7695   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7696   /// an access of kind \p AK to a \p MLK memory location with the access
7697   /// pointer \p Ptr.
7698   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7699                                  MemoryLocationsKind MLK, const Instruction *I,
7700                                  const Value *Ptr, bool &Changed,
7701                                  AccessKind AK = READ_WRITE) {
7702 
7703     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7704     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7705     if (!Accesses)
7706       Accesses = new (Allocator) AccessSet();
7707     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7708     State.removeAssumedBits(MLK);
7709   }
7710 
7711   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7712   /// arguments, and update the state and access map accordingly.
7713   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7714                           AAMemoryLocation::StateType &State, bool &Changed);
7715 
7716   /// Used to allocate access sets.
7717   BumpPtrAllocator &Allocator;
7718 
7719   /// The set of IR attributes AAMemoryLocation deals with.
7720   static const Attribute::AttrKind AttrKinds[4];
7721 };
7722 
7723 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7724     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7725     Attribute::InaccessibleMemOrArgMemOnly};
7726 
7727 void AAMemoryLocationImpl::categorizePtrValue(
7728     Attributor &A, const Instruction &I, const Value &Ptr,
7729     AAMemoryLocation::StateType &State, bool &Changed) {
7730   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7731                     << Ptr << " ["
7732                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
7733 
7734   SmallSetVector<Value *, 8> Objects;
7735   bool UsedAssumedInformation = false;
7736   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I,
7737                                        UsedAssumedInformation,
7738                                        AA::Intraprocedural)) {
7739     LLVM_DEBUG(
7740         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
7741     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
7742                               getAccessKindFromInst(&I));
7743     return;
7744   }
7745 
7746   for (Value *Obj : Objects) {
7747     // TODO: recognize the TBAA used for constant accesses.
7748     MemoryLocationsKind MLK = NO_LOCATIONS;
7749     if (isa<UndefValue>(Obj))
7750       continue;
7751     if (isa<Argument>(Obj)) {
7752       // TODO: For now we do not treat byval arguments as local copies performed
7753       // on the call edge, though, we should. To make that happen we need to
7754       // teach various passes, e.g., DSE, about the copy effect of a byval. That
7755       // would also allow us to mark functions only accessing byval arguments as
7756       // readnone again, atguably their acceses have no effect outside of the
7757       // function, like accesses to allocas.
7758       MLK = NO_ARGUMENT_MEM;
7759     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
7760       // Reading constant memory is not treated as a read "effect" by the
7761       // function attr pass so we won't neither. Constants defined by TBAA are
7762       // similar. (We know we do not write it because it is constant.)
7763       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
7764         if (GVar->isConstant())
7765           continue;
7766 
7767       if (GV->hasLocalLinkage())
7768         MLK = NO_GLOBAL_INTERNAL_MEM;
7769       else
7770         MLK = NO_GLOBAL_EXTERNAL_MEM;
7771     } else if (isa<ConstantPointerNull>(Obj) &&
7772                !NullPointerIsDefined(getAssociatedFunction(),
7773                                      Ptr.getType()->getPointerAddressSpace())) {
7774       continue;
7775     } else if (isa<AllocaInst>(Obj)) {
7776       MLK = NO_LOCAL_MEM;
7777     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
7778       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
7779           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
7780       if (NoAliasAA.isAssumedNoAlias())
7781         MLK = NO_MALLOCED_MEM;
7782       else
7783         MLK = NO_UNKOWN_MEM;
7784     } else {
7785       MLK = NO_UNKOWN_MEM;
7786     }
7787 
7788     assert(MLK != NO_LOCATIONS && "No location specified!");
7789     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
7790                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
7791                       << "\n");
7792     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
7793                               getAccessKindFromInst(&I));
7794   }
7795 
7796   LLVM_DEBUG(
7797       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
7798              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
7799 }
7800 
7801 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
7802     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
7803     bool &Changed) {
7804   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
7805 
7806     // Skip non-pointer arguments.
7807     const Value *ArgOp = CB.getArgOperand(ArgNo);
7808     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
7809       continue;
7810 
7811     // Skip readnone arguments.
7812     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
7813     const auto &ArgOpMemLocationAA =
7814         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
7815 
7816     if (ArgOpMemLocationAA.isAssumedReadNone())
7817       continue;
7818 
7819     // Categorize potentially accessed pointer arguments as if there was an
7820     // access instruction with them as pointer.
7821     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
7822   }
7823 }
7824 
7825 AAMemoryLocation::MemoryLocationsKind
7826 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
7827                                                   bool &Changed) {
7828   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
7829                     << I << "\n");
7830 
7831   AAMemoryLocation::StateType AccessedLocs;
7832   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
7833 
7834   if (auto *CB = dyn_cast<CallBase>(&I)) {
7835 
7836     // First check if we assume any memory is access is visible.
7837     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
7838         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
7839     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
7840                       << " [" << CBMemLocationAA << "]\n");
7841 
7842     if (CBMemLocationAA.isAssumedReadNone())
7843       return NO_LOCATIONS;
7844 
7845     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
7846       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
7847                                 Changed, getAccessKindFromInst(&I));
7848       return AccessedLocs.getAssumed();
7849     }
7850 
7851     uint32_t CBAssumedNotAccessedLocs =
7852         CBMemLocationAA.getAssumedNotAccessedLocation();
7853 
7854     // Set the argmemonly and global bit as we handle them separately below.
7855     uint32_t CBAssumedNotAccessedLocsNoArgMem =
7856         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
7857 
7858     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
7859       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
7860         continue;
7861       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
7862                                 getAccessKindFromInst(&I));
7863     }
7864 
7865     // Now handle global memory if it might be accessed. This is slightly tricky
7866     // as NO_GLOBAL_MEM has multiple bits set.
7867     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
7868     if (HasGlobalAccesses) {
7869       auto AccessPred = [&](const Instruction *, const Value *Ptr,
7870                             AccessKind Kind, MemoryLocationsKind MLK) {
7871         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
7872                                   getAccessKindFromInst(&I));
7873         return true;
7874       };
7875       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
7876               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
7877         return AccessedLocs.getWorstState();
7878     }
7879 
7880     LLVM_DEBUG(
7881         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
7882                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7883 
7884     // Now handle argument memory if it might be accessed.
7885     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
7886     if (HasArgAccesses)
7887       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
7888 
7889     LLVM_DEBUG(
7890         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
7891                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7892 
7893     return AccessedLocs.getAssumed();
7894   }
7895 
7896   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
7897     LLVM_DEBUG(
7898         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
7899                << I << " [" << *Ptr << "]\n");
7900     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
7901     return AccessedLocs.getAssumed();
7902   }
7903 
7904   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
7905                     << I << "\n");
7906   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
7907                             getAccessKindFromInst(&I));
7908   return AccessedLocs.getAssumed();
7909 }
7910 
7911 /// An AA to represent the memory behavior function attributes.
7912 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
7913   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
7914       : AAMemoryLocationImpl(IRP, A) {}
7915 
7916   /// See AbstractAttribute::updateImpl(Attributor &A).
7917   virtual ChangeStatus updateImpl(Attributor &A) override {
7918 
7919     const auto &MemBehaviorAA =
7920         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
7921     if (MemBehaviorAA.isAssumedReadNone()) {
7922       if (MemBehaviorAA.isKnownReadNone())
7923         return indicateOptimisticFixpoint();
7924       assert(isAssumedReadNone() &&
7925              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
7926       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
7927       return ChangeStatus::UNCHANGED;
7928     }
7929 
7930     // The current assumed state used to determine a change.
7931     auto AssumedState = getAssumed();
7932     bool Changed = false;
7933 
7934     auto CheckRWInst = [&](Instruction &I) {
7935       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
7936       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
7937                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
7938       removeAssumedBits(inverseLocation(MLK, false, false));
7939       // Stop once only the valid bit set in the *not assumed location*, thus
7940       // once we don't actually exclude any memory locations in the state.
7941       return getAssumedNotAccessedLocation() != VALID_STATE;
7942     };
7943 
7944     bool UsedAssumedInformation = false;
7945     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7946                                             UsedAssumedInformation))
7947       return indicatePessimisticFixpoint();
7948 
7949     Changed |= AssumedState != getAssumed();
7950     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7951   }
7952 
7953   /// See AbstractAttribute::trackStatistics()
7954   void trackStatistics() const override {
7955     if (isAssumedReadNone())
7956       STATS_DECLTRACK_FN_ATTR(readnone)
7957     else if (isAssumedArgMemOnly())
7958       STATS_DECLTRACK_FN_ATTR(argmemonly)
7959     else if (isAssumedInaccessibleMemOnly())
7960       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
7961     else if (isAssumedInaccessibleOrArgMemOnly())
7962       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
7963   }
7964 };
7965 
7966 /// AAMemoryLocation attribute for call sites.
7967 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
7968   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
7969       : AAMemoryLocationImpl(IRP, A) {}
7970 
7971   /// See AbstractAttribute::initialize(...).
7972   void initialize(Attributor &A) override {
7973     AAMemoryLocationImpl::initialize(A);
7974     Function *F = getAssociatedFunction();
7975     if (!F || F->isDeclaration())
7976       indicatePessimisticFixpoint();
7977   }
7978 
7979   /// See AbstractAttribute::updateImpl(...).
7980   ChangeStatus updateImpl(Attributor &A) override {
7981     // TODO: Once we have call site specific value information we can provide
7982     //       call site specific liveness liveness information and then it makes
7983     //       sense to specialize attributes for call sites arguments instead of
7984     //       redirecting requests to the callee argument.
7985     Function *F = getAssociatedFunction();
7986     const IRPosition &FnPos = IRPosition::function(*F);
7987     auto &FnAA =
7988         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
7989     bool Changed = false;
7990     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
7991                           AccessKind Kind, MemoryLocationsKind MLK) {
7992       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
7993                                 getAccessKindFromInst(I));
7994       return true;
7995     };
7996     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
7997       return indicatePessimisticFixpoint();
7998     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7999   }
8000 
8001   /// See AbstractAttribute::trackStatistics()
8002   void trackStatistics() const override {
8003     if (isAssumedReadNone())
8004       STATS_DECLTRACK_CS_ATTR(readnone)
8005   }
8006 };
8007 } // namespace
8008 
8009 /// ------------------ Value Constant Range Attribute -------------------------
8010 
8011 namespace {
8012 struct AAValueConstantRangeImpl : AAValueConstantRange {
8013   using StateType = IntegerRangeState;
8014   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
8015       : AAValueConstantRange(IRP, A) {}
8016 
8017   /// See AbstractAttribute::initialize(..).
8018   void initialize(Attributor &A) override {
8019     if (A.hasSimplificationCallback(getIRPosition())) {
8020       indicatePessimisticFixpoint();
8021       return;
8022     }
8023 
8024     // Intersect a range given by SCEV.
8025     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
8026 
8027     // Intersect a range given by LVI.
8028     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
8029   }
8030 
8031   /// See AbstractAttribute::getAsStr().
8032   const std::string getAsStr() const override {
8033     std::string Str;
8034     llvm::raw_string_ostream OS(Str);
8035     OS << "range(" << getBitWidth() << ")<";
8036     getKnown().print(OS);
8037     OS << " / ";
8038     getAssumed().print(OS);
8039     OS << ">";
8040     return OS.str();
8041   }
8042 
8043   /// Helper function to get a SCEV expr for the associated value at program
8044   /// point \p I.
8045   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
8046     if (!getAnchorScope())
8047       return nullptr;
8048 
8049     ScalarEvolution *SE =
8050         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8051             *getAnchorScope());
8052 
8053     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
8054         *getAnchorScope());
8055 
8056     if (!SE || !LI)
8057       return nullptr;
8058 
8059     const SCEV *S = SE->getSCEV(&getAssociatedValue());
8060     if (!I)
8061       return S;
8062 
8063     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
8064   }
8065 
8066   /// Helper function to get a range from SCEV for the associated value at
8067   /// program point \p I.
8068   ConstantRange getConstantRangeFromSCEV(Attributor &A,
8069                                          const Instruction *I = nullptr) const {
8070     if (!getAnchorScope())
8071       return getWorstState(getBitWidth());
8072 
8073     ScalarEvolution *SE =
8074         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8075             *getAnchorScope());
8076 
8077     const SCEV *S = getSCEV(A, I);
8078     if (!SE || !S)
8079       return getWorstState(getBitWidth());
8080 
8081     return SE->getUnsignedRange(S);
8082   }
8083 
8084   /// Helper function to get a range from LVI for the associated value at
8085   /// program point \p I.
8086   ConstantRange
8087   getConstantRangeFromLVI(Attributor &A,
8088                           const Instruction *CtxI = nullptr) const {
8089     if (!getAnchorScope())
8090       return getWorstState(getBitWidth());
8091 
8092     LazyValueInfo *LVI =
8093         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8094             *getAnchorScope());
8095 
8096     if (!LVI || !CtxI)
8097       return getWorstState(getBitWidth());
8098     return LVI->getConstantRange(&getAssociatedValue(),
8099                                  const_cast<Instruction *>(CtxI));
8100   }
8101 
8102   /// Return true if \p CtxI is valid for querying outside analyses.
8103   /// This basically makes sure we do not ask intra-procedural analysis
8104   /// about a context in the wrong function or a context that violates
8105   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8106   /// if the original context of this AA is OK or should be considered invalid.
8107   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8108                                                const Instruction *CtxI,
8109                                                bool AllowAACtxI) const {
8110     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8111       return false;
8112 
8113     // Our context might be in a different function, neither intra-procedural
8114     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8115     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8116       return false;
8117 
8118     // If the context is not dominated by the value there are paths to the
8119     // context that do not define the value. This cannot be handled by
8120     // LazyValueInfo so we need to bail.
8121     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8122       InformationCache &InfoCache = A.getInfoCache();
8123       const DominatorTree *DT =
8124           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8125               *I->getFunction());
8126       return DT && DT->dominates(I, CtxI);
8127     }
8128 
8129     return true;
8130   }
8131 
8132   /// See AAValueConstantRange::getKnownConstantRange(..).
8133   ConstantRange
8134   getKnownConstantRange(Attributor &A,
8135                         const Instruction *CtxI = nullptr) const override {
8136     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8137                                                  /* AllowAACtxI */ false))
8138       return getKnown();
8139 
8140     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8141     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8142     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8143   }
8144 
8145   /// See AAValueConstantRange::getAssumedConstantRange(..).
8146   ConstantRange
8147   getAssumedConstantRange(Attributor &A,
8148                           const Instruction *CtxI = nullptr) const override {
8149     // TODO: Make SCEV use Attributor assumption.
8150     //       We may be able to bound a variable range via assumptions in
8151     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8152     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8153     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8154                                                  /* AllowAACtxI */ false))
8155       return getAssumed();
8156 
8157     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8158     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8159     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8160   }
8161 
8162   /// Helper function to create MDNode for range metadata.
8163   static MDNode *
8164   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8165                             const ConstantRange &AssumedConstantRange) {
8166     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8167                                   Ty, AssumedConstantRange.getLower())),
8168                               ConstantAsMetadata::get(ConstantInt::get(
8169                                   Ty, AssumedConstantRange.getUpper()))};
8170     return MDNode::get(Ctx, LowAndHigh);
8171   }
8172 
8173   /// Return true if \p Assumed is included in \p KnownRanges.
8174   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8175 
8176     if (Assumed.isFullSet())
8177       return false;
8178 
8179     if (!KnownRanges)
8180       return true;
8181 
8182     // If multiple ranges are annotated in IR, we give up to annotate assumed
8183     // range for now.
8184 
8185     // TODO:  If there exists a known range which containts assumed range, we
8186     // can say assumed range is better.
8187     if (KnownRanges->getNumOperands() > 2)
8188       return false;
8189 
8190     ConstantInt *Lower =
8191         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8192     ConstantInt *Upper =
8193         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8194 
8195     ConstantRange Known(Lower->getValue(), Upper->getValue());
8196     return Known.contains(Assumed) && Known != Assumed;
8197   }
8198 
8199   /// Helper function to set range metadata.
8200   static bool
8201   setRangeMetadataIfisBetterRange(Instruction *I,
8202                                   const ConstantRange &AssumedConstantRange) {
8203     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8204     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8205       if (!AssumedConstantRange.isEmptySet()) {
8206         I->setMetadata(LLVMContext::MD_range,
8207                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8208                                                  AssumedConstantRange));
8209         return true;
8210       }
8211     }
8212     return false;
8213   }
8214 
8215   /// See AbstractAttribute::manifest()
8216   ChangeStatus manifest(Attributor &A) override {
8217     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8218     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8219     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8220 
8221     auto &V = getAssociatedValue();
8222     if (!AssumedConstantRange.isEmptySet() &&
8223         !AssumedConstantRange.isSingleElement()) {
8224       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8225         assert(I == getCtxI() && "Should not annotate an instruction which is "
8226                                  "not the context instruction");
8227         if (isa<CallInst>(I) || isa<LoadInst>(I))
8228           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8229             Changed = ChangeStatus::CHANGED;
8230       }
8231     }
8232 
8233     return Changed;
8234   }
8235 };
8236 
8237 struct AAValueConstantRangeArgument final
8238     : AAArgumentFromCallSiteArguments<
8239           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8240           true /* BridgeCallBaseContext */> {
8241   using Base = AAArgumentFromCallSiteArguments<
8242       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8243       true /* BridgeCallBaseContext */>;
8244   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8245       : Base(IRP, A) {}
8246 
8247   /// See AbstractAttribute::initialize(..).
8248   void initialize(Attributor &A) override {
8249     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8250       indicatePessimisticFixpoint();
8251     } else {
8252       Base::initialize(A);
8253     }
8254   }
8255 
8256   /// See AbstractAttribute::trackStatistics()
8257   void trackStatistics() const override {
8258     STATS_DECLTRACK_ARG_ATTR(value_range)
8259   }
8260 };
8261 
8262 struct AAValueConstantRangeReturned
8263     : AAReturnedFromReturnedValues<AAValueConstantRange,
8264                                    AAValueConstantRangeImpl,
8265                                    AAValueConstantRangeImpl::StateType,
8266                                    /* PropogateCallBaseContext */ true> {
8267   using Base =
8268       AAReturnedFromReturnedValues<AAValueConstantRange,
8269                                    AAValueConstantRangeImpl,
8270                                    AAValueConstantRangeImpl::StateType,
8271                                    /* PropogateCallBaseContext */ true>;
8272   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8273       : Base(IRP, A) {}
8274 
8275   /// See AbstractAttribute::initialize(...).
8276   void initialize(Attributor &A) override {}
8277 
8278   /// See AbstractAttribute::trackStatistics()
8279   void trackStatistics() const override {
8280     STATS_DECLTRACK_FNRET_ATTR(value_range)
8281   }
8282 };
8283 
8284 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8285   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8286       : AAValueConstantRangeImpl(IRP, A) {}
8287 
8288   /// See AbstractAttribute::initialize(...).
8289   void initialize(Attributor &A) override {
8290     AAValueConstantRangeImpl::initialize(A);
8291     if (isAtFixpoint())
8292       return;
8293 
8294     Value &V = getAssociatedValue();
8295 
8296     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8297       unionAssumed(ConstantRange(C->getValue()));
8298       indicateOptimisticFixpoint();
8299       return;
8300     }
8301 
8302     if (isa<UndefValue>(&V)) {
8303       // Collapse the undef state to 0.
8304       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8305       indicateOptimisticFixpoint();
8306       return;
8307     }
8308 
8309     if (isa<CallBase>(&V))
8310       return;
8311 
8312     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8313       return;
8314 
8315     // If it is a load instruction with range metadata, use it.
8316     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8317       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8318         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8319         return;
8320       }
8321 
8322     // We can work with PHI and select instruction as we traverse their operands
8323     // during update.
8324     if (isa<SelectInst>(V) || isa<PHINode>(V))
8325       return;
8326 
8327     // Otherwise we give up.
8328     indicatePessimisticFixpoint();
8329 
8330     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8331                       << getAssociatedValue() << "\n");
8332   }
8333 
8334   bool calculateBinaryOperator(
8335       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8336       const Instruction *CtxI,
8337       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8338     Value *LHS = BinOp->getOperand(0);
8339     Value *RHS = BinOp->getOperand(1);
8340 
8341     // Simplify the operands first.
8342     bool UsedAssumedInformation = false;
8343     const auto &SimplifiedLHS = A.getAssumedSimplified(
8344         IRPosition::value(*LHS, getCallBaseContext()), *this,
8345         UsedAssumedInformation, AA::Interprocedural);
8346     if (!SimplifiedLHS.has_value())
8347       return true;
8348     if (!SimplifiedLHS.value())
8349       return false;
8350     LHS = *SimplifiedLHS;
8351 
8352     const auto &SimplifiedRHS = A.getAssumedSimplified(
8353         IRPosition::value(*RHS, getCallBaseContext()), *this,
8354         UsedAssumedInformation, AA::Interprocedural);
8355     if (!SimplifiedRHS.has_value())
8356       return true;
8357     if (!SimplifiedRHS.value())
8358       return false;
8359     RHS = *SimplifiedRHS;
8360 
8361     // TODO: Allow non integers as well.
8362     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8363       return false;
8364 
8365     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8366         *this, IRPosition::value(*LHS, getCallBaseContext()),
8367         DepClassTy::REQUIRED);
8368     QuerriedAAs.push_back(&LHSAA);
8369     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8370 
8371     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8372         *this, IRPosition::value(*RHS, getCallBaseContext()),
8373         DepClassTy::REQUIRED);
8374     QuerriedAAs.push_back(&RHSAA);
8375     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8376 
8377     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8378 
8379     T.unionAssumed(AssumedRange);
8380 
8381     // TODO: Track a known state too.
8382 
8383     return T.isValidState();
8384   }
8385 
8386   bool calculateCastInst(
8387       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8388       const Instruction *CtxI,
8389       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8390     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8391     // TODO: Allow non integers as well.
8392     Value *OpV = CastI->getOperand(0);
8393 
8394     // Simplify the operand first.
8395     bool UsedAssumedInformation = false;
8396     const auto &SimplifiedOpV = A.getAssumedSimplified(
8397         IRPosition::value(*OpV, getCallBaseContext()), *this,
8398         UsedAssumedInformation, AA::Interprocedural);
8399     if (!SimplifiedOpV.has_value())
8400       return true;
8401     if (!SimplifiedOpV.value())
8402       return false;
8403     OpV = *SimplifiedOpV;
8404 
8405     if (!OpV->getType()->isIntegerTy())
8406       return false;
8407 
8408     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8409         *this, IRPosition::value(*OpV, getCallBaseContext()),
8410         DepClassTy::REQUIRED);
8411     QuerriedAAs.push_back(&OpAA);
8412     T.unionAssumed(
8413         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8414     return T.isValidState();
8415   }
8416 
8417   bool
8418   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8419                    const Instruction *CtxI,
8420                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8421     Value *LHS = CmpI->getOperand(0);
8422     Value *RHS = CmpI->getOperand(1);
8423 
8424     // Simplify the operands first.
8425     bool UsedAssumedInformation = false;
8426     const auto &SimplifiedLHS = A.getAssumedSimplified(
8427         IRPosition::value(*LHS, getCallBaseContext()), *this,
8428         UsedAssumedInformation, AA::Interprocedural);
8429     if (!SimplifiedLHS.has_value())
8430       return true;
8431     if (!SimplifiedLHS.value())
8432       return false;
8433     LHS = *SimplifiedLHS;
8434 
8435     const auto &SimplifiedRHS = A.getAssumedSimplified(
8436         IRPosition::value(*RHS, getCallBaseContext()), *this,
8437         UsedAssumedInformation, AA::Interprocedural);
8438     if (!SimplifiedRHS.has_value())
8439       return true;
8440     if (!SimplifiedRHS.value())
8441       return false;
8442     RHS = *SimplifiedRHS;
8443 
8444     // TODO: Allow non integers as well.
8445     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8446       return false;
8447 
8448     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8449         *this, IRPosition::value(*LHS, getCallBaseContext()),
8450         DepClassTy::REQUIRED);
8451     QuerriedAAs.push_back(&LHSAA);
8452     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8453         *this, IRPosition::value(*RHS, getCallBaseContext()),
8454         DepClassTy::REQUIRED);
8455     QuerriedAAs.push_back(&RHSAA);
8456     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8457     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8458 
8459     // If one of them is empty set, we can't decide.
8460     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8461       return true;
8462 
8463     bool MustTrue = false, MustFalse = false;
8464 
8465     auto AllowedRegion =
8466         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8467 
8468     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8469       MustFalse = true;
8470 
8471     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8472       MustTrue = true;
8473 
8474     assert((!MustTrue || !MustFalse) &&
8475            "Either MustTrue or MustFalse should be false!");
8476 
8477     if (MustTrue)
8478       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8479     else if (MustFalse)
8480       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8481     else
8482       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8483 
8484     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8485                       << " " << RHSAA << "\n");
8486 
8487     // TODO: Track a known state too.
8488     return T.isValidState();
8489   }
8490 
8491   /// See AbstractAttribute::updateImpl(...).
8492   ChangeStatus updateImpl(Attributor &A) override {
8493 
8494     IntegerRangeState T(getBitWidth());
8495     auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
8496       Instruction *I = dyn_cast<Instruction>(&V);
8497       if (!I || isa<CallBase>(I)) {
8498 
8499         // Simplify the operand first.
8500         bool UsedAssumedInformation = false;
8501         const auto &SimplifiedOpV = A.getAssumedSimplified(
8502             IRPosition::value(V, getCallBaseContext()), *this,
8503             UsedAssumedInformation, AA::Interprocedural);
8504         if (!SimplifiedOpV.has_value())
8505           return true;
8506         if (!SimplifiedOpV.value())
8507           return false;
8508         Value *VPtr = *SimplifiedOpV;
8509 
8510         // If the value is not instruction, we query AA to Attributor.
8511         const auto &AA = A.getAAFor<AAValueConstantRange>(
8512             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8513             DepClassTy::REQUIRED);
8514 
8515         // Clamp operator is not used to utilize a program point CtxI.
8516         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8517 
8518         return T.isValidState();
8519       }
8520 
8521       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8522       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8523         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8524           return false;
8525       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8526         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8527           return false;
8528       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8529         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8530           return false;
8531       } else {
8532         // Give up with other instructions.
8533         // TODO: Add other instructions
8534 
8535         T.indicatePessimisticFixpoint();
8536         return false;
8537       }
8538 
8539       // Catch circular reasoning in a pessimistic way for now.
8540       // TODO: Check how the range evolves and if we stripped anything, see also
8541       //       AADereferenceable or AAAlign for similar situations.
8542       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8543         if (QueriedAA != this)
8544           continue;
8545         // If we are in a stady state we do not need to worry.
8546         if (T.getAssumed() == getState().getAssumed())
8547           continue;
8548         T.indicatePessimisticFixpoint();
8549       }
8550 
8551       return T.isValidState();
8552     };
8553 
8554     if (!VisitValueCB(getAssociatedValue(), getCtxI()))
8555       return indicatePessimisticFixpoint();
8556 
8557     // Ensure that long def-use chains can't cause circular reasoning either by
8558     // introducing a cutoff below.
8559     if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
8560       return ChangeStatus::UNCHANGED;
8561     if (++NumChanges > MaxNumChanges) {
8562       LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges
8563                         << " but only " << MaxNumChanges
8564                         << " are allowed to avoid cyclic reasoning.");
8565       return indicatePessimisticFixpoint();
8566     }
8567     return ChangeStatus::CHANGED;
8568   }
8569 
8570   /// See AbstractAttribute::trackStatistics()
8571   void trackStatistics() const override {
8572     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8573   }
8574 
8575   /// Tracker to bail after too many widening steps of the constant range.
8576   int NumChanges = 0;
8577 
8578   /// Upper bound for the number of allowed changes (=widening steps) for the
8579   /// constant range before we give up.
8580   static constexpr int MaxNumChanges = 5;
8581 };
8582 
8583 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8584   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8585       : AAValueConstantRangeImpl(IRP, A) {}
8586 
8587   /// See AbstractAttribute::initialize(...).
8588   ChangeStatus updateImpl(Attributor &A) override {
8589     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8590                      "not be called");
8591   }
8592 
8593   /// See AbstractAttribute::trackStatistics()
8594   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8595 };
8596 
8597 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8598   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8599       : AAValueConstantRangeFunction(IRP, A) {}
8600 
8601   /// See AbstractAttribute::trackStatistics()
8602   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8603 };
8604 
8605 struct AAValueConstantRangeCallSiteReturned
8606     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8607                                      AAValueConstantRangeImpl,
8608                                      AAValueConstantRangeImpl::StateType,
8609                                      /* IntroduceCallBaseContext */ true> {
8610   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8611       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8612                                        AAValueConstantRangeImpl,
8613                                        AAValueConstantRangeImpl::StateType,
8614                                        /* IntroduceCallBaseContext */ true>(IRP,
8615                                                                             A) {
8616   }
8617 
8618   /// See AbstractAttribute::initialize(...).
8619   void initialize(Attributor &A) override {
8620     // If it is a load instruction with range metadata, use the metadata.
8621     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8622       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8623         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8624 
8625     AAValueConstantRangeImpl::initialize(A);
8626   }
8627 
8628   /// See AbstractAttribute::trackStatistics()
8629   void trackStatistics() const override {
8630     STATS_DECLTRACK_CSRET_ATTR(value_range)
8631   }
8632 };
8633 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8634   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8635       : AAValueConstantRangeFloating(IRP, A) {}
8636 
8637   /// See AbstractAttribute::manifest()
8638   ChangeStatus manifest(Attributor &A) override {
8639     return ChangeStatus::UNCHANGED;
8640   }
8641 
8642   /// See AbstractAttribute::trackStatistics()
8643   void trackStatistics() const override {
8644     STATS_DECLTRACK_CSARG_ATTR(value_range)
8645   }
8646 };
8647 } // namespace
8648 
8649 /// ------------------ Potential Values Attribute -------------------------
8650 
8651 namespace {
8652 struct AAPotentialConstantValuesImpl : AAPotentialConstantValues {
8653   using StateType = PotentialConstantIntValuesState;
8654 
8655   AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A)
8656       : AAPotentialConstantValues(IRP, A) {}
8657 
8658   /// See AbstractAttribute::initialize(..).
8659   void initialize(Attributor &A) override {
8660     if (A.hasSimplificationCallback(getIRPosition()))
8661       indicatePessimisticFixpoint();
8662     else
8663       AAPotentialConstantValues::initialize(A);
8664   }
8665 
8666   bool fillSetWithConstantValues(Attributor &A, const IRPosition &IRP, SetTy &S,
8667                                  bool &ContainsUndef) {
8668     SmallVector<AA::ValueAndContext> Values;
8669     bool UsedAssumedInformation = false;
8670     if (!A.getAssumedSimplifiedValues(IRP, *this, Values, AA::Interprocedural,
8671                                       UsedAssumedInformation)) {
8672       if (!IRP.getAssociatedType()->isIntegerTy())
8673         return false;
8674       auto &PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>(
8675           *this, IRP, DepClassTy::REQUIRED);
8676       if (!PotentialValuesAA.getState().isValidState())
8677         return false;
8678       ContainsUndef = PotentialValuesAA.getState().undefIsContained();
8679       S = PotentialValuesAA.getState().getAssumedSet();
8680       return true;
8681     }
8682 
8683     for (auto &It : Values) {
8684       if (isa<UndefValue>(It.getValue()))
8685         continue;
8686       auto *CI = dyn_cast<ConstantInt>(It.getValue());
8687       if (!CI)
8688         return false;
8689       S.insert(CI->getValue());
8690     }
8691     ContainsUndef = S.empty();
8692 
8693     return true;
8694   }
8695 
8696   /// See AbstractAttribute::getAsStr().
8697   const std::string getAsStr() const override {
8698     std::string Str;
8699     llvm::raw_string_ostream OS(Str);
8700     OS << getState();
8701     return OS.str();
8702   }
8703 
8704   /// See AbstractAttribute::updateImpl(...).
8705   ChangeStatus updateImpl(Attributor &A) override {
8706     return indicatePessimisticFixpoint();
8707   }
8708 };
8709 
8710 struct AAPotentialConstantValuesArgument final
8711     : AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8712                                       AAPotentialConstantValuesImpl,
8713                                       PotentialConstantIntValuesState> {
8714   using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8715                                                AAPotentialConstantValuesImpl,
8716                                                PotentialConstantIntValuesState>;
8717   AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A)
8718       : Base(IRP, A) {}
8719 
8720   /// See AbstractAttribute::initialize(..).
8721   void initialize(Attributor &A) override {
8722     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8723       indicatePessimisticFixpoint();
8724     } else {
8725       Base::initialize(A);
8726     }
8727   }
8728 
8729   /// See AbstractAttribute::trackStatistics()
8730   void trackStatistics() const override {
8731     STATS_DECLTRACK_ARG_ATTR(potential_values)
8732   }
8733 };
8734 
8735 struct AAPotentialConstantValuesReturned
8736     : AAReturnedFromReturnedValues<AAPotentialConstantValues,
8737                                    AAPotentialConstantValuesImpl> {
8738   using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues,
8739                                             AAPotentialConstantValuesImpl>;
8740   AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A)
8741       : Base(IRP, A) {}
8742 
8743   /// See AbstractAttribute::trackStatistics()
8744   void trackStatistics() const override {
8745     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8746   }
8747 };
8748 
8749 struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
8750   AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A)
8751       : AAPotentialConstantValuesImpl(IRP, A) {}
8752 
8753   /// See AbstractAttribute::initialize(..).
8754   void initialize(Attributor &A) override {
8755     AAPotentialConstantValuesImpl::initialize(A);
8756     if (isAtFixpoint())
8757       return;
8758 
8759     Value &V = getAssociatedValue();
8760 
8761     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8762       unionAssumed(C->getValue());
8763       indicateOptimisticFixpoint();
8764       return;
8765     }
8766 
8767     if (isa<UndefValue>(&V)) {
8768       unionAssumedWithUndef();
8769       indicateOptimisticFixpoint();
8770       return;
8771     }
8772 
8773     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
8774       return;
8775 
8776     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
8777       return;
8778 
8779     indicatePessimisticFixpoint();
8780 
8781     LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: "
8782                       << getAssociatedValue() << "\n");
8783   }
8784 
8785   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
8786                                 const APInt &RHS) {
8787     return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
8788   }
8789 
8790   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
8791                                  uint32_t ResultBitWidth) {
8792     Instruction::CastOps CastOp = CI->getOpcode();
8793     switch (CastOp) {
8794     default:
8795       llvm_unreachable("unsupported or not integer cast");
8796     case Instruction::Trunc:
8797       return Src.trunc(ResultBitWidth);
8798     case Instruction::SExt:
8799       return Src.sext(ResultBitWidth);
8800     case Instruction::ZExt:
8801       return Src.zext(ResultBitWidth);
8802     case Instruction::BitCast:
8803       return Src;
8804     }
8805   }
8806 
8807   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
8808                                        const APInt &LHS, const APInt &RHS,
8809                                        bool &SkipOperation, bool &Unsupported) {
8810     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
8811     // Unsupported is set to true when the binary operator is not supported.
8812     // SkipOperation is set to true when UB occur with the given operand pair
8813     // (LHS, RHS).
8814     // TODO: we should look at nsw and nuw keywords to handle operations
8815     //       that create poison or undef value.
8816     switch (BinOpcode) {
8817     default:
8818       Unsupported = true;
8819       return LHS;
8820     case Instruction::Add:
8821       return LHS + RHS;
8822     case Instruction::Sub:
8823       return LHS - RHS;
8824     case Instruction::Mul:
8825       return LHS * RHS;
8826     case Instruction::UDiv:
8827       if (RHS.isZero()) {
8828         SkipOperation = true;
8829         return LHS;
8830       }
8831       return LHS.udiv(RHS);
8832     case Instruction::SDiv:
8833       if (RHS.isZero()) {
8834         SkipOperation = true;
8835         return LHS;
8836       }
8837       return LHS.sdiv(RHS);
8838     case Instruction::URem:
8839       if (RHS.isZero()) {
8840         SkipOperation = true;
8841         return LHS;
8842       }
8843       return LHS.urem(RHS);
8844     case Instruction::SRem:
8845       if (RHS.isZero()) {
8846         SkipOperation = true;
8847         return LHS;
8848       }
8849       return LHS.srem(RHS);
8850     case Instruction::Shl:
8851       return LHS.shl(RHS);
8852     case Instruction::LShr:
8853       return LHS.lshr(RHS);
8854     case Instruction::AShr:
8855       return LHS.ashr(RHS);
8856     case Instruction::And:
8857       return LHS & RHS;
8858     case Instruction::Or:
8859       return LHS | RHS;
8860     case Instruction::Xor:
8861       return LHS ^ RHS;
8862     }
8863   }
8864 
8865   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
8866                                            const APInt &LHS, const APInt &RHS) {
8867     bool SkipOperation = false;
8868     bool Unsupported = false;
8869     APInt Result =
8870         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
8871     if (Unsupported)
8872       return false;
8873     // If SkipOperation is true, we can ignore this operand pair (L, R).
8874     if (!SkipOperation)
8875       unionAssumed(Result);
8876     return isValidState();
8877   }
8878 
8879   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
8880     auto AssumedBefore = getAssumed();
8881     Value *LHS = ICI->getOperand(0);
8882     Value *RHS = ICI->getOperand(1);
8883 
8884     bool LHSContainsUndef = false, RHSContainsUndef = false;
8885     SetTy LHSAAPVS, RHSAAPVS;
8886     if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
8887                                    LHSContainsUndef) ||
8888         !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
8889                                    RHSContainsUndef))
8890       return indicatePessimisticFixpoint();
8891 
8892     // TODO: make use of undef flag to limit potential values aggressively.
8893     bool MaybeTrue = false, MaybeFalse = false;
8894     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
8895     if (LHSContainsUndef && RHSContainsUndef) {
8896       // The result of any comparison between undefs can be soundly replaced
8897       // with undef.
8898       unionAssumedWithUndef();
8899     } else if (LHSContainsUndef) {
8900       for (const APInt &R : RHSAAPVS) {
8901         bool CmpResult = calculateICmpInst(ICI, Zero, R);
8902         MaybeTrue |= CmpResult;
8903         MaybeFalse |= !CmpResult;
8904         if (MaybeTrue & MaybeFalse)
8905           return indicatePessimisticFixpoint();
8906       }
8907     } else if (RHSContainsUndef) {
8908       for (const APInt &L : LHSAAPVS) {
8909         bool CmpResult = calculateICmpInst(ICI, L, Zero);
8910         MaybeTrue |= CmpResult;
8911         MaybeFalse |= !CmpResult;
8912         if (MaybeTrue & MaybeFalse)
8913           return indicatePessimisticFixpoint();
8914       }
8915     } else {
8916       for (const APInt &L : LHSAAPVS) {
8917         for (const APInt &R : RHSAAPVS) {
8918           bool CmpResult = calculateICmpInst(ICI, L, R);
8919           MaybeTrue |= CmpResult;
8920           MaybeFalse |= !CmpResult;
8921           if (MaybeTrue & MaybeFalse)
8922             return indicatePessimisticFixpoint();
8923         }
8924       }
8925     }
8926     if (MaybeTrue)
8927       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
8928     if (MaybeFalse)
8929       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
8930     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8931                                          : ChangeStatus::CHANGED;
8932   }
8933 
8934   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
8935     auto AssumedBefore = getAssumed();
8936     Value *LHS = SI->getTrueValue();
8937     Value *RHS = SI->getFalseValue();
8938 
8939     bool UsedAssumedInformation = false;
8940     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
8941                                                   UsedAssumedInformation);
8942 
8943     // Check if we only need one operand.
8944     bool OnlyLeft = false, OnlyRight = false;
8945     if (C && *C && (*C)->isOneValue())
8946       OnlyLeft = true;
8947     else if (C && *C && (*C)->isZeroValue())
8948       OnlyRight = true;
8949 
8950     bool LHSContainsUndef = false, RHSContainsUndef = false;
8951     SetTy LHSAAPVS, RHSAAPVS;
8952     if (!OnlyRight && !fillSetWithConstantValues(A, IRPosition::value(*LHS),
8953                                                  LHSAAPVS, LHSContainsUndef))
8954       return indicatePessimisticFixpoint();
8955 
8956     if (!OnlyLeft && !fillSetWithConstantValues(A, IRPosition::value(*RHS),
8957                                                 RHSAAPVS, RHSContainsUndef))
8958       return indicatePessimisticFixpoint();
8959 
8960     if (OnlyLeft || OnlyRight) {
8961       // select (true/false), lhs, rhs
8962       auto *OpAA = OnlyLeft ? &LHSAAPVS : &RHSAAPVS;
8963       auto Undef = OnlyLeft ? LHSContainsUndef : RHSContainsUndef;
8964 
8965       if (Undef)
8966         unionAssumedWithUndef();
8967       else {
8968         for (auto &It : *OpAA)
8969           unionAssumed(It);
8970       }
8971 
8972     } else if (LHSContainsUndef && RHSContainsUndef) {
8973       // select i1 *, undef , undef => undef
8974       unionAssumedWithUndef();
8975     } else {
8976       for (auto &It : LHSAAPVS)
8977         unionAssumed(It);
8978       for (auto &It : RHSAAPVS)
8979         unionAssumed(It);
8980     }
8981     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8982                                          : ChangeStatus::CHANGED;
8983   }
8984 
8985   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
8986     auto AssumedBefore = getAssumed();
8987     if (!CI->isIntegerCast())
8988       return indicatePessimisticFixpoint();
8989     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
8990     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
8991     Value *Src = CI->getOperand(0);
8992 
8993     bool SrcContainsUndef = false;
8994     SetTy SrcPVS;
8995     if (!fillSetWithConstantValues(A, IRPosition::value(*Src), SrcPVS,
8996                                    SrcContainsUndef))
8997       return indicatePessimisticFixpoint();
8998 
8999     if (SrcContainsUndef)
9000       unionAssumedWithUndef();
9001     else {
9002       for (const APInt &S : SrcPVS) {
9003         APInt T = calculateCastInst(CI, S, ResultBitWidth);
9004         unionAssumed(T);
9005       }
9006     }
9007     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9008                                          : ChangeStatus::CHANGED;
9009   }
9010 
9011   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
9012     auto AssumedBefore = getAssumed();
9013     Value *LHS = BinOp->getOperand(0);
9014     Value *RHS = BinOp->getOperand(1);
9015 
9016     bool LHSContainsUndef = false, RHSContainsUndef = false;
9017     SetTy LHSAAPVS, RHSAAPVS;
9018     if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
9019                                    LHSContainsUndef) ||
9020         !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
9021                                    RHSContainsUndef))
9022       return indicatePessimisticFixpoint();
9023 
9024     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9025 
9026     // TODO: make use of undef flag to limit potential values aggressively.
9027     if (LHSContainsUndef && RHSContainsUndef) {
9028       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9029         return indicatePessimisticFixpoint();
9030     } else if (LHSContainsUndef) {
9031       for (const APInt &R : RHSAAPVS) {
9032         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9033           return indicatePessimisticFixpoint();
9034       }
9035     } else if (RHSContainsUndef) {
9036       for (const APInt &L : LHSAAPVS) {
9037         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9038           return indicatePessimisticFixpoint();
9039       }
9040     } else {
9041       for (const APInt &L : LHSAAPVS) {
9042         for (const APInt &R : RHSAAPVS) {
9043           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9044             return indicatePessimisticFixpoint();
9045         }
9046       }
9047     }
9048     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9049                                          : ChangeStatus::CHANGED;
9050   }
9051 
9052   /// See AbstractAttribute::updateImpl(...).
9053   ChangeStatus updateImpl(Attributor &A) override {
9054     Value &V = getAssociatedValue();
9055     Instruction *I = dyn_cast<Instruction>(&V);
9056 
9057     if (auto *ICI = dyn_cast<ICmpInst>(I))
9058       return updateWithICmpInst(A, ICI);
9059 
9060     if (auto *SI = dyn_cast<SelectInst>(I))
9061       return updateWithSelectInst(A, SI);
9062 
9063     if (auto *CI = dyn_cast<CastInst>(I))
9064       return updateWithCastInst(A, CI);
9065 
9066     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9067       return updateWithBinaryOperator(A, BinOp);
9068 
9069     return indicatePessimisticFixpoint();
9070   }
9071 
9072   /// See AbstractAttribute::trackStatistics()
9073   void trackStatistics() const override {
9074     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9075   }
9076 };
9077 
9078 struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl {
9079   AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A)
9080       : AAPotentialConstantValuesImpl(IRP, A) {}
9081 
9082   /// See AbstractAttribute::initialize(...).
9083   ChangeStatus updateImpl(Attributor &A) override {
9084     llvm_unreachable(
9085         "AAPotentialConstantValues(Function|CallSite)::updateImpl will "
9086         "not be called");
9087   }
9088 
9089   /// See AbstractAttribute::trackStatistics()
9090   void trackStatistics() const override {
9091     STATS_DECLTRACK_FN_ATTR(potential_values)
9092   }
9093 };
9094 
9095 struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction {
9096   AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A)
9097       : AAPotentialConstantValuesFunction(IRP, A) {}
9098 
9099   /// See AbstractAttribute::trackStatistics()
9100   void trackStatistics() const override {
9101     STATS_DECLTRACK_CS_ATTR(potential_values)
9102   }
9103 };
9104 
9105 struct AAPotentialConstantValuesCallSiteReturned
9106     : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9107                                      AAPotentialConstantValuesImpl> {
9108   AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP,
9109                                             Attributor &A)
9110       : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9111                                        AAPotentialConstantValuesImpl>(IRP, A) {}
9112 
9113   /// See AbstractAttribute::trackStatistics()
9114   void trackStatistics() const override {
9115     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9116   }
9117 };
9118 
9119 struct AAPotentialConstantValuesCallSiteArgument
9120     : AAPotentialConstantValuesFloating {
9121   AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP,
9122                                             Attributor &A)
9123       : AAPotentialConstantValuesFloating(IRP, A) {}
9124 
9125   /// See AbstractAttribute::initialize(..).
9126   void initialize(Attributor &A) override {
9127     AAPotentialConstantValuesImpl::initialize(A);
9128     if (isAtFixpoint())
9129       return;
9130 
9131     Value &V = getAssociatedValue();
9132 
9133     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9134       unionAssumed(C->getValue());
9135       indicateOptimisticFixpoint();
9136       return;
9137     }
9138 
9139     if (isa<UndefValue>(&V)) {
9140       unionAssumedWithUndef();
9141       indicateOptimisticFixpoint();
9142       return;
9143     }
9144   }
9145 
9146   /// See AbstractAttribute::updateImpl(...).
9147   ChangeStatus updateImpl(Attributor &A) override {
9148     Value &V = getAssociatedValue();
9149     auto AssumedBefore = getAssumed();
9150     auto &AA = A.getAAFor<AAPotentialConstantValues>(
9151         *this, IRPosition::value(V), DepClassTy::REQUIRED);
9152     const auto &S = AA.getAssumed();
9153     unionAssumed(S);
9154     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9155                                          : ChangeStatus::CHANGED;
9156   }
9157 
9158   /// See AbstractAttribute::trackStatistics()
9159   void trackStatistics() const override {
9160     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9161   }
9162 };
9163 
9164 /// ------------------------ NoUndef Attribute ---------------------------------
9165 struct AANoUndefImpl : AANoUndef {
9166   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9167 
9168   /// See AbstractAttribute::initialize(...).
9169   void initialize(Attributor &A) override {
9170     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9171       indicateOptimisticFixpoint();
9172       return;
9173     }
9174     Value &V = getAssociatedValue();
9175     if (isa<UndefValue>(V))
9176       indicatePessimisticFixpoint();
9177     else if (isa<FreezeInst>(V))
9178       indicateOptimisticFixpoint();
9179     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9180              isGuaranteedNotToBeUndefOrPoison(&V))
9181       indicateOptimisticFixpoint();
9182     else
9183       AANoUndef::initialize(A);
9184   }
9185 
9186   /// See followUsesInMBEC
9187   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9188                        AANoUndef::StateType &State) {
9189     const Value *UseV = U->get();
9190     const DominatorTree *DT = nullptr;
9191     AssumptionCache *AC = nullptr;
9192     InformationCache &InfoCache = A.getInfoCache();
9193     if (Function *F = getAnchorScope()) {
9194       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9195       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9196     }
9197     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9198     bool TrackUse = false;
9199     // Track use for instructions which must produce undef or poison bits when
9200     // at least one operand contains such bits.
9201     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9202       TrackUse = true;
9203     return TrackUse;
9204   }
9205 
9206   /// See AbstractAttribute::getAsStr().
9207   const std::string getAsStr() const override {
9208     return getAssumed() ? "noundef" : "may-undef-or-poison";
9209   }
9210 
9211   ChangeStatus manifest(Attributor &A) override {
9212     // We don't manifest noundef attribute for dead positions because the
9213     // associated values with dead positions would be replaced with undef
9214     // values.
9215     bool UsedAssumedInformation = false;
9216     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9217                         UsedAssumedInformation))
9218       return ChangeStatus::UNCHANGED;
9219     // A position whose simplified value does not have any value is
9220     // considered to be dead. We don't manifest noundef in such positions for
9221     // the same reason above.
9222     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation,
9223                                 AA::Interprocedural)
9224              .has_value())
9225       return ChangeStatus::UNCHANGED;
9226     return AANoUndef::manifest(A);
9227   }
9228 };
9229 
9230 struct AANoUndefFloating : public AANoUndefImpl {
9231   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9232       : AANoUndefImpl(IRP, A) {}
9233 
9234   /// See AbstractAttribute::initialize(...).
9235   void initialize(Attributor &A) override {
9236     AANoUndefImpl::initialize(A);
9237     if (!getState().isAtFixpoint())
9238       if (Instruction *CtxI = getCtxI())
9239         followUsesInMBEC(*this, A, getState(), *CtxI);
9240   }
9241 
9242   /// See AbstractAttribute::updateImpl(...).
9243   ChangeStatus updateImpl(Attributor &A) override {
9244 
9245     SmallVector<AA::ValueAndContext> Values;
9246     bool UsedAssumedInformation = false;
9247     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
9248                                       AA::AnyScope, UsedAssumedInformation)) {
9249       Values.push_back({getAssociatedValue(), getCtxI()});
9250     }
9251 
9252     StateType T;
9253     auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
9254       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9255                                              DepClassTy::REQUIRED);
9256       if (this == &AA) {
9257         T.indicatePessimisticFixpoint();
9258       } else {
9259         const AANoUndef::StateType &S =
9260             static_cast<const AANoUndef::StateType &>(AA.getState());
9261         T ^= S;
9262       }
9263       return T.isValidState();
9264     };
9265 
9266     for (const auto &VAC : Values)
9267       if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI()))
9268         return indicatePessimisticFixpoint();
9269 
9270     return clampStateAndIndicateChange(getState(), T);
9271   }
9272 
9273   /// See AbstractAttribute::trackStatistics()
9274   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9275 };
9276 
9277 struct AANoUndefReturned final
9278     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9279   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9280       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9281 
9282   /// See AbstractAttribute::trackStatistics()
9283   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9284 };
9285 
9286 struct AANoUndefArgument final
9287     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9288   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9289       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9290 
9291   /// See AbstractAttribute::trackStatistics()
9292   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9293 };
9294 
9295 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9296   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9297       : AANoUndefFloating(IRP, A) {}
9298 
9299   /// See AbstractAttribute::trackStatistics()
9300   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9301 };
9302 
9303 struct AANoUndefCallSiteReturned final
9304     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9305   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9306       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9307 
9308   /// See AbstractAttribute::trackStatistics()
9309   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9310 };
9311 
9312 struct AACallEdgesImpl : public AACallEdges {
9313   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9314 
9315   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9316     return CalledFunctions;
9317   }
9318 
9319   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9320 
9321   virtual bool hasNonAsmUnknownCallee() const override {
9322     return HasUnknownCalleeNonAsm;
9323   }
9324 
9325   const std::string getAsStr() const override {
9326     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9327            std::to_string(CalledFunctions.size()) + "]";
9328   }
9329 
9330   void trackStatistics() const override {}
9331 
9332 protected:
9333   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9334     if (CalledFunctions.insert(Fn)) {
9335       Change = ChangeStatus::CHANGED;
9336       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9337                         << "\n");
9338     }
9339   }
9340 
9341   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9342     if (!HasUnknownCallee)
9343       Change = ChangeStatus::CHANGED;
9344     if (NonAsm && !HasUnknownCalleeNonAsm)
9345       Change = ChangeStatus::CHANGED;
9346     HasUnknownCalleeNonAsm |= NonAsm;
9347     HasUnknownCallee = true;
9348   }
9349 
9350 private:
9351   /// Optimistic set of functions that might be called by this position.
9352   SetVector<Function *> CalledFunctions;
9353 
9354   /// Is there any call with a unknown callee.
9355   bool HasUnknownCallee = false;
9356 
9357   /// Is there any call with a unknown callee, excluding any inline asm.
9358   bool HasUnknownCalleeNonAsm = false;
9359 };
9360 
9361 struct AACallEdgesCallSite : public AACallEdgesImpl {
9362   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9363       : AACallEdgesImpl(IRP, A) {}
9364   /// See AbstractAttribute::updateImpl(...).
9365   ChangeStatus updateImpl(Attributor &A) override {
9366     ChangeStatus Change = ChangeStatus::UNCHANGED;
9367 
9368     auto VisitValue = [&](Value &V, const Instruction *CtxI) -> bool {
9369       if (Function *Fn = dyn_cast<Function>(&V)) {
9370         addCalledFunction(Fn, Change);
9371       } else {
9372         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9373         setHasUnknownCallee(true, Change);
9374       }
9375 
9376       // Explore all values.
9377       return true;
9378     };
9379 
9380     SmallVector<AA::ValueAndContext> Values;
9381     // Process any value that we might call.
9382     auto ProcessCalledOperand = [&](Value *V, Instruction *CtxI) {
9383       bool UsedAssumedInformation = false;
9384       Values.clear();
9385       if (!A.getAssumedSimplifiedValues(IRPosition::value(*V), *this, Values,
9386                                         AA::AnyScope, UsedAssumedInformation)) {
9387         Values.push_back({*V, CtxI});
9388       }
9389       for (auto &VAC : Values)
9390         VisitValue(*VAC.getValue(), VAC.getCtxI());
9391     };
9392 
9393     CallBase *CB = cast<CallBase>(getCtxI());
9394 
9395     if (CB->isInlineAsm()) {
9396       if (!hasAssumption(*CB->getCaller(), "ompx_no_call_asm") &&
9397           !hasAssumption(*CB, "ompx_no_call_asm"))
9398         setHasUnknownCallee(false, Change);
9399       return Change;
9400     }
9401 
9402     // Process callee metadata if available.
9403     if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9404       for (auto &Op : MD->operands()) {
9405         Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9406         if (Callee)
9407           addCalledFunction(Callee, Change);
9408       }
9409       return Change;
9410     }
9411 
9412     // The most simple case.
9413     ProcessCalledOperand(CB->getCalledOperand(), CB);
9414 
9415     // Process callback functions.
9416     SmallVector<const Use *, 4u> CallbackUses;
9417     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9418     for (const Use *U : CallbackUses)
9419       ProcessCalledOperand(U->get(), CB);
9420 
9421     return Change;
9422   }
9423 };
9424 
9425 struct AACallEdgesFunction : public AACallEdgesImpl {
9426   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9427       : AACallEdgesImpl(IRP, A) {}
9428 
9429   /// See AbstractAttribute::updateImpl(...).
9430   ChangeStatus updateImpl(Attributor &A) override {
9431     ChangeStatus Change = ChangeStatus::UNCHANGED;
9432 
9433     auto ProcessCallInst = [&](Instruction &Inst) {
9434       CallBase &CB = cast<CallBase>(Inst);
9435 
9436       auto &CBEdges = A.getAAFor<AACallEdges>(
9437           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9438       if (CBEdges.hasNonAsmUnknownCallee())
9439         setHasUnknownCallee(true, Change);
9440       if (CBEdges.hasUnknownCallee())
9441         setHasUnknownCallee(false, Change);
9442 
9443       for (Function *F : CBEdges.getOptimisticEdges())
9444         addCalledFunction(F, Change);
9445 
9446       return true;
9447     };
9448 
9449     // Visit all callable instructions.
9450     bool UsedAssumedInformation = false;
9451     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9452                                            UsedAssumedInformation,
9453                                            /* CheckBBLivenessOnly */ true)) {
9454       // If we haven't looked at all call like instructions, assume that there
9455       // are unknown callees.
9456       setHasUnknownCallee(true, Change);
9457     }
9458 
9459     return Change;
9460   }
9461 };
9462 
9463 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9464 private:
9465   struct QuerySet {
9466     void markReachable(const Function &Fn) {
9467       Reachable.insert(&Fn);
9468       Unreachable.erase(&Fn);
9469     }
9470 
9471     /// If there is no information about the function None is returned.
9472     Optional<bool> isCachedReachable(const Function &Fn) {
9473       // Assume that we can reach the function.
9474       // TODO: Be more specific with the unknown callee.
9475       if (CanReachUnknownCallee)
9476         return true;
9477 
9478       if (Reachable.count(&Fn))
9479         return true;
9480 
9481       if (Unreachable.count(&Fn))
9482         return false;
9483 
9484       return llvm::None;
9485     }
9486 
9487     /// Set of functions that we know for sure is reachable.
9488     DenseSet<const Function *> Reachable;
9489 
9490     /// Set of functions that are unreachable, but might become reachable.
9491     DenseSet<const Function *> Unreachable;
9492 
9493     /// If we can reach a function with a call to a unknown function we assume
9494     /// that we can reach any function.
9495     bool CanReachUnknownCallee = false;
9496   };
9497 
9498   struct QueryResolver : public QuerySet {
9499     ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9500                         ArrayRef<const AACallEdges *> AAEdgesList) {
9501       ChangeStatus Change = ChangeStatus::UNCHANGED;
9502 
9503       for (auto *AAEdges : AAEdgesList) {
9504         if (AAEdges->hasUnknownCallee()) {
9505           if (!CanReachUnknownCallee) {
9506             LLVM_DEBUG(dbgs()
9507                        << "[QueryResolver] Edges include unknown callee!\n");
9508             Change = ChangeStatus::CHANGED;
9509           }
9510           CanReachUnknownCallee = true;
9511           return Change;
9512         }
9513       }
9514 
9515       for (const Function *Fn : make_early_inc_range(Unreachable)) {
9516         if (checkIfReachable(A, AA, AAEdgesList, *Fn)) {
9517           Change = ChangeStatus::CHANGED;
9518           markReachable(*Fn);
9519         }
9520       }
9521       return Change;
9522     }
9523 
9524     bool isReachable(Attributor &A, AAFunctionReachability &AA,
9525                      ArrayRef<const AACallEdges *> AAEdgesList,
9526                      const Function &Fn) {
9527       Optional<bool> Cached = isCachedReachable(Fn);
9528       if (Cached)
9529         return Cached.value();
9530 
9531       // The query was not cached, thus it is new. We need to request an update
9532       // explicitly to make sure this the information is properly run to a
9533       // fixpoint.
9534       A.registerForUpdate(AA);
9535 
9536       // We need to assume that this function can't reach Fn to prevent
9537       // an infinite loop if this function is recursive.
9538       Unreachable.insert(&Fn);
9539 
9540       bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9541       if (Result)
9542         markReachable(Fn);
9543       return Result;
9544     }
9545 
9546     bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9547                           ArrayRef<const AACallEdges *> AAEdgesList,
9548                           const Function &Fn) const {
9549 
9550       // Handle the most trivial case first.
9551       for (auto *AAEdges : AAEdgesList) {
9552         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9553 
9554         if (Edges.count(const_cast<Function *>(&Fn)))
9555           return true;
9556       }
9557 
9558       SmallVector<const AAFunctionReachability *, 8> Deps;
9559       for (auto &AAEdges : AAEdgesList) {
9560         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9561 
9562         for (Function *Edge : Edges) {
9563           // Functions that do not call back into the module can be ignored.
9564           if (Edge->hasFnAttribute(Attribute::NoCallback))
9565             continue;
9566 
9567           // We don't need a dependency if the result is reachable.
9568           const AAFunctionReachability &EdgeReachability =
9569               A.getAAFor<AAFunctionReachability>(
9570                   AA, IRPosition::function(*Edge), DepClassTy::NONE);
9571           Deps.push_back(&EdgeReachability);
9572 
9573           if (EdgeReachability.canReach(A, Fn))
9574             return true;
9575         }
9576       }
9577 
9578       // The result is false for now, set dependencies and leave.
9579       for (auto *Dep : Deps)
9580         A.recordDependence(*Dep, AA, DepClassTy::REQUIRED);
9581 
9582       return false;
9583     }
9584   };
9585 
9586   /// Get call edges that can be reached by this instruction.
9587   bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability,
9588                              const Instruction &Inst,
9589                              SmallVector<const AACallEdges *> &Result) const {
9590     // Determine call like instructions that we can reach from the inst.
9591     auto CheckCallBase = [&](Instruction &CBInst) {
9592       if (!Reachability.isAssumedReachable(A, Inst, CBInst))
9593         return true;
9594 
9595       auto &CB = cast<CallBase>(CBInst);
9596       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9597           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9598 
9599       Result.push_back(&AAEdges);
9600       return true;
9601     };
9602 
9603     bool UsedAssumedInformation = false;
9604     return A.checkForAllCallLikeInstructions(CheckCallBase, *this,
9605                                              UsedAssumedInformation,
9606                                              /* CheckBBLivenessOnly */ true);
9607   }
9608 
9609 public:
9610   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9611       : AAFunctionReachability(IRP, A) {}
9612 
9613   bool canReach(Attributor &A, const Function &Fn) const override {
9614     if (!isValidState())
9615       return true;
9616 
9617     const AACallEdges &AAEdges =
9618         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9619 
9620     // Attributor returns attributes as const, so this function has to be
9621     // const for users of this attribute to use it without having to do
9622     // a const_cast.
9623     // This is a hack for us to be able to cache queries.
9624     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9625     bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis,
9626                                                           {&AAEdges}, Fn);
9627 
9628     return Result;
9629   }
9630 
9631   /// Can \p CB reach \p Fn
9632   bool canReach(Attributor &A, CallBase &CB,
9633                 const Function &Fn) const override {
9634     if (!isValidState())
9635       return true;
9636 
9637     const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9638         *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9639 
9640     // Attributor returns attributes as const, so this function has to be
9641     // const for users of this attribute to use it without having to do
9642     // a const_cast.
9643     // This is a hack for us to be able to cache queries.
9644     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9645     QueryResolver &CBQuery = NonConstThis->CBQueries[&CB];
9646 
9647     bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn);
9648 
9649     return Result;
9650   }
9651 
9652   bool instructionCanReach(Attributor &A, const Instruction &Inst,
9653                            const Function &Fn,
9654                            bool UseBackwards) const override {
9655     if (!isValidState())
9656       return true;
9657 
9658     if (UseBackwards)
9659       return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr);
9660 
9661     const auto &Reachability = A.getAAFor<AAReachability>(
9662         *this, IRPosition::function(*getAssociatedFunction()),
9663         DepClassTy::REQUIRED);
9664 
9665     SmallVector<const AACallEdges *> CallEdges;
9666     bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges);
9667     // Attributor returns attributes as const, so this function has to be
9668     // const for users of this attribute to use it without having to do
9669     // a const_cast.
9670     // This is a hack for us to be able to cache queries.
9671     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9672     QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst];
9673     if (!AllKnown) {
9674       LLVM_DEBUG(dbgs() << "[AAReachability] Not all reachable edges known, "
9675                            "may reach unknown callee!\n");
9676       InstQSet.CanReachUnknownCallee = true;
9677     }
9678 
9679     return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn);
9680   }
9681 
9682   /// See AbstractAttribute::updateImpl(...).
9683   ChangeStatus updateImpl(Attributor &A) override {
9684     const AACallEdges &AAEdges =
9685         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9686     ChangeStatus Change = ChangeStatus::UNCHANGED;
9687 
9688     Change |= WholeFunction.update(A, *this, {&AAEdges});
9689 
9690     for (auto &CBPair : CBQueries) {
9691       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9692           *this, IRPosition::callsite_function(*CBPair.first),
9693           DepClassTy::REQUIRED);
9694 
9695       Change |= CBPair.second.update(A, *this, {&AAEdges});
9696     }
9697 
9698     // Update the Instruction queries.
9699     if (!InstQueries.empty()) {
9700       const AAReachability *Reachability = &A.getAAFor<AAReachability>(
9701           *this, IRPosition::function(*getAssociatedFunction()),
9702           DepClassTy::REQUIRED);
9703 
9704       // Check for local callbases first.
9705       for (auto &InstPair : InstQueries) {
9706         SmallVector<const AACallEdges *> CallEdges;
9707         bool AllKnown =
9708             getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges);
9709         // Update will return change if we this effects any queries.
9710         if (!AllKnown) {
9711           LLVM_DEBUG(dbgs() << "[AAReachability] Not all reachable edges "
9712                                "known, may reach unknown callee!\n");
9713           InstPair.second.CanReachUnknownCallee = true;
9714         }
9715         Change |= InstPair.second.update(A, *this, CallEdges);
9716       }
9717     }
9718 
9719     return Change;
9720   }
9721 
9722   const std::string getAsStr() const override {
9723     size_t QueryCount =
9724         WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
9725 
9726     return "FunctionReachability [" +
9727            (canReachUnknownCallee()
9728                 ? "unknown"
9729                 : (std::to_string(WholeFunction.Reachable.size()) + "," +
9730                    std::to_string(QueryCount))) +
9731            "]";
9732   }
9733 
9734   void trackStatistics() const override {}
9735 
9736 private:
9737   bool canReachUnknownCallee() const override {
9738     return WholeFunction.CanReachUnknownCallee;
9739   }
9740 
9741   /// Used to answer if a the whole function can reacha a specific function.
9742   QueryResolver WholeFunction;
9743 
9744   /// Used to answer if a call base inside this function can reach a specific
9745   /// function.
9746   MapVector<const CallBase *, QueryResolver> CBQueries;
9747 
9748   /// This is for instruction queries than scan "forward".
9749   MapVector<const Instruction *, QueryResolver> InstQueries;
9750 };
9751 } // namespace
9752 
9753 template <typename AAType>
9754 static Optional<Constant *>
9755 askForAssumedConstant(Attributor &A, const AbstractAttribute &QueryingAA,
9756                       const IRPosition &IRP, Type &Ty) {
9757   if (!Ty.isIntegerTy())
9758     return nullptr;
9759 
9760   // This will also pass the call base context.
9761   const auto &AA = A.getAAFor<AAType>(QueryingAA, IRP, DepClassTy::NONE);
9762 
9763   Optional<Constant *> COpt = AA.getAssumedConstant(A);
9764 
9765   if (!COpt.has_value()) {
9766     A.recordDependence(AA, QueryingAA, DepClassTy::OPTIONAL);
9767     return llvm::None;
9768   }
9769   if (auto *C = COpt.value()) {
9770     A.recordDependence(AA, QueryingAA, DepClassTy::OPTIONAL);
9771     return C;
9772   }
9773   return nullptr;
9774 }
9775 
9776 Value *AAPotentialValues::getSingleValue(
9777     Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP,
9778     SmallVectorImpl<AA::ValueAndContext> &Values) {
9779   Type &Ty = *IRP.getAssociatedType();
9780   Optional<Value *> V;
9781   for (auto &It : Values) {
9782     V = AA::combineOptionalValuesInAAValueLatice(V, It.getValue(), &Ty);
9783     if (V.has_value() && !V.value())
9784       break;
9785   }
9786   if (!V.has_value())
9787     return UndefValue::get(&Ty);
9788   return V.value();
9789 }
9790 
9791 namespace {
9792 struct AAPotentialValuesImpl : AAPotentialValues {
9793   using StateType = PotentialLLVMValuesState;
9794 
9795   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
9796       : AAPotentialValues(IRP, A) {}
9797 
9798   /// See AbstractAttribute::initialize(..).
9799   void initialize(Attributor &A) override {
9800     if (A.hasSimplificationCallback(getIRPosition())) {
9801       indicatePessimisticFixpoint();
9802       return;
9803     }
9804     Value *Stripped = getAssociatedValue().stripPointerCasts();
9805     if (isa<Constant>(Stripped)) {
9806       addValue(A, getState(), *Stripped, getCtxI(), AA::AnyScope,
9807                getAnchorScope());
9808       indicateOptimisticFixpoint();
9809       return;
9810     }
9811     AAPotentialValues::initialize(A);
9812   }
9813 
9814   /// See AbstractAttribute::getAsStr().
9815   const std::string getAsStr() const override {
9816     std::string Str;
9817     llvm::raw_string_ostream OS(Str);
9818     OS << getState();
9819     return OS.str();
9820   }
9821 
9822   template <typename AAType>
9823   static Optional<Value *> askOtherAA(Attributor &A,
9824                                       const AbstractAttribute &AA,
9825                                       const IRPosition &IRP, Type &Ty) {
9826     if (isa<Constant>(IRP.getAssociatedValue()))
9827       return &IRP.getAssociatedValue();
9828     Optional<Constant *> C = askForAssumedConstant<AAType>(A, AA, IRP, Ty);
9829     if (!C)
9830       return llvm::None;
9831     if (C.value())
9832       if (auto *CC = AA::getWithType(**C, Ty))
9833         return CC;
9834     return nullptr;
9835   }
9836 
9837   void addValue(Attributor &A, StateType &State, Value &V,
9838                 const Instruction *CtxI, AA::ValueScope S,
9839                 Function *AnchorScope) const {
9840 
9841     IRPosition ValIRP = IRPosition::value(V);
9842     if (auto *CB = dyn_cast_or_null<CallBase>(CtxI)) {
9843       for (auto &U : CB->args()) {
9844         if (U.get() != &V)
9845           continue;
9846         ValIRP = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
9847         break;
9848       }
9849     }
9850 
9851     Value *VPtr = &V;
9852     if (ValIRP.getAssociatedType()->isIntegerTy()) {
9853       Type &Ty = *getAssociatedType();
9854       Optional<Value *> SimpleV =
9855           askOtherAA<AAValueConstantRange>(A, *this, ValIRP, Ty);
9856       if (SimpleV.has_value() && !SimpleV.value()) {
9857         auto &PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>(
9858             *this, ValIRP, DepClassTy::OPTIONAL);
9859         if (PotentialConstantsAA.isValidState()) {
9860           for (auto &It : PotentialConstantsAA.getAssumedSet()) {
9861             State.unionAssumed({{*ConstantInt::get(&Ty, It), nullptr}, S});
9862           }
9863           assert(!PotentialConstantsAA.undefIsContained() &&
9864                  "Undef should be an explicit value!");
9865           return;
9866         }
9867       }
9868       if (!SimpleV.has_value())
9869         return;
9870 
9871       if (SimpleV.value())
9872         VPtr = SimpleV.value();
9873     }
9874 
9875     if (isa<ConstantInt>(VPtr))
9876       CtxI = nullptr;
9877     if (!AA::isValidInScope(*VPtr, AnchorScope))
9878       S = AA::ValueScope(S | AA::Interprocedural);
9879 
9880     State.unionAssumed({{*VPtr, CtxI}, S});
9881   }
9882 
9883   /// Helper struct to tie a value+context pair together with the scope for
9884   /// which this is the simplified version.
9885   struct ItemInfo {
9886     AA::ValueAndContext I;
9887     AA::ValueScope S;
9888   };
9889 
9890   bool recurseForValue(Attributor &A, const IRPosition &IRP, AA::ValueScope S) {
9891     SmallMapVector<AA::ValueAndContext, int, 8> ValueScopeMap;
9892     for (auto CS : {AA::Intraprocedural, AA::Interprocedural}) {
9893       if (!(CS & S))
9894         continue;
9895 
9896       bool UsedAssumedInformation = false;
9897       SmallVector<AA::ValueAndContext> Values;
9898       if (!A.getAssumedSimplifiedValues(IRP, this, Values, CS,
9899                                         UsedAssumedInformation))
9900         return false;
9901 
9902       for (auto &It : Values)
9903         ValueScopeMap[It] += CS;
9904     }
9905     for (auto &It : ValueScopeMap)
9906       addValue(A, getState(), *It.first.getValue(), It.first.getCtxI(),
9907                AA::ValueScope(It.second), getAnchorScope());
9908 
9909     return true;
9910   }
9911 
9912   void giveUpOnIntraprocedural(Attributor &A) {
9913     auto NewS = StateType::getBestState(getState());
9914     for (auto &It : getAssumedSet()) {
9915       if (It.second == AA::Intraprocedural)
9916         continue;
9917       addValue(A, NewS, *It.first.getValue(), It.first.getCtxI(),
9918                AA::Interprocedural, getAnchorScope());
9919     }
9920     assert(!undefIsContained() && "Undef should be an explicit value!");
9921     addValue(A, NewS, getAssociatedValue(), getCtxI(), AA::Intraprocedural,
9922              getAnchorScope());
9923     getState() = NewS;
9924   }
9925 
9926   /// See AbstractState::indicatePessimisticFixpoint(...).
9927   ChangeStatus indicatePessimisticFixpoint() override {
9928     getState() = StateType::getBestState(getState());
9929     getState().unionAssumed({{getAssociatedValue(), getCtxI()}, AA::AnyScope});
9930     AAPotentialValues::indicateOptimisticFixpoint();
9931     return ChangeStatus::CHANGED;
9932   }
9933 
9934   /// See AbstractAttribute::updateImpl(...).
9935   ChangeStatus updateImpl(Attributor &A) override {
9936     return indicatePessimisticFixpoint();
9937   }
9938 
9939   /// See AbstractAttribute::manifest(...).
9940   ChangeStatus manifest(Attributor &A) override {
9941     SmallVector<AA::ValueAndContext> Values;
9942     for (AA::ValueScope S : {AA::Interprocedural, AA::Intraprocedural}) {
9943       Values.clear();
9944       if (!getAssumedSimplifiedValues(A, Values, S))
9945         continue;
9946       Value &OldV = getAssociatedValue();
9947       if (isa<UndefValue>(OldV))
9948         continue;
9949       Value *NewV = getSingleValue(A, *this, getIRPosition(), Values);
9950       if (!NewV || NewV == &OldV)
9951         continue;
9952       if (getCtxI() &&
9953           !AA::isValidAtPosition({*NewV, *getCtxI()}, A.getInfoCache()))
9954         continue;
9955       if (A.changeAfterManifest(getIRPosition(), *NewV))
9956         return ChangeStatus::CHANGED;
9957     }
9958     return ChangeStatus::UNCHANGED;
9959   }
9960 
9961   bool getAssumedSimplifiedValues(Attributor &A,
9962                                   SmallVectorImpl<AA::ValueAndContext> &Values,
9963                                   AA::ValueScope S) const override {
9964     if (!isValidState())
9965       return false;
9966     for (auto &It : getAssumedSet())
9967       if (It.second & S)
9968         Values.push_back(It.first);
9969     assert(!undefIsContained() && "Undef should be an explicit value!");
9970     return true;
9971   }
9972 };
9973 
9974 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
9975   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
9976       : AAPotentialValuesImpl(IRP, A) {}
9977 
9978   /// See AbstractAttribute::updateImpl(...).
9979   ChangeStatus updateImpl(Attributor &A) override {
9980     auto AssumedBefore = getAssumed();
9981 
9982     genericValueTraversal(A);
9983 
9984     return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
9985                                            : ChangeStatus::CHANGED;
9986   }
9987 
9988   /// Helper struct to remember which AAIsDead instances we actually used.
9989   struct LivenessInfo {
9990     const AAIsDead *LivenessAA = nullptr;
9991     bool AnyDead = false;
9992   };
9993 
9994   /// Check if \p Cmp is a comparison we can simplify.
9995   ///
9996   /// We handle multiple cases, one in which at least one operand is an
9997   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
9998   /// operand. Return true if successful, in that case Worklist will be updated.
9999   bool handleCmp(Attributor &A, CmpInst &Cmp, ItemInfo II,
10000                  SmallVectorImpl<ItemInfo> &Worklist) {
10001     Value *LHS = Cmp.getOperand(0);
10002     Value *RHS = Cmp.getOperand(1);
10003 
10004     // Simplify the operands first.
10005     bool UsedAssumedInformation = false;
10006     const auto &SimplifiedLHS = A.getAssumedSimplified(
10007         IRPosition::value(*LHS, getCallBaseContext()), *this,
10008         UsedAssumedInformation, AA::Intraprocedural);
10009     if (!SimplifiedLHS.has_value())
10010       return true;
10011     if (!SimplifiedLHS.value())
10012       return false;
10013     LHS = *SimplifiedLHS;
10014 
10015     const auto &SimplifiedRHS = A.getAssumedSimplified(
10016         IRPosition::value(*RHS, getCallBaseContext()), *this,
10017         UsedAssumedInformation, AA::Intraprocedural);
10018     if (!SimplifiedRHS.has_value())
10019       return true;
10020     if (!SimplifiedRHS.value())
10021       return false;
10022     RHS = *SimplifiedRHS;
10023 
10024     LLVMContext &Ctx = Cmp.getContext();
10025     // Handle the trivial case first in which we don't even need to think about
10026     // null or non-null.
10027     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
10028       Constant *NewV =
10029           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
10030       addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S,
10031                getAnchorScope());
10032       return true;
10033     }
10034 
10035     // From now on we only handle equalities (==, !=).
10036     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
10037     if (!ICmp || !ICmp->isEquality())
10038       return false;
10039 
10040     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
10041     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
10042     if (!LHSIsNull && !RHSIsNull)
10043       return false;
10044 
10045     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
10046     // non-nullptr operand and if we assume it's non-null we can conclude the
10047     // result of the comparison.
10048     assert((LHSIsNull || RHSIsNull) &&
10049            "Expected nullptr versus non-nullptr comparison at this point");
10050 
10051     // The index is the operand that we assume is not null.
10052     unsigned PtrIdx = LHSIsNull;
10053     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
10054         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
10055         DepClassTy::REQUIRED);
10056     if (!PtrNonNullAA.isAssumedNonNull())
10057       return false;
10058 
10059     // The new value depends on the predicate, true for != and false for ==.
10060     Constant *NewV = ConstantInt::get(Type::getInt1Ty(Ctx),
10061                                       ICmp->getPredicate() == CmpInst::ICMP_NE);
10062     addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S, getAnchorScope());
10063     return true;
10064   }
10065 
10066   bool handleSelectInst(Attributor &A, SelectInst &SI, ItemInfo II,
10067                         SmallVectorImpl<ItemInfo> &Worklist) {
10068     const Instruction *CtxI = II.I.getCtxI();
10069     bool UsedAssumedInformation = false;
10070 
10071     Optional<Constant *> C =
10072         A.getAssumedConstant(*SI.getCondition(), *this, UsedAssumedInformation);
10073     bool NoValueYet = !C.has_value();
10074     if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
10075       return true;
10076     if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
10077       if (CI->isZero())
10078         Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S});
10079       else
10080         Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S});
10081     } else {
10082       // We could not simplify the condition, assume both values.
10083       Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S});
10084       Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S});
10085     }
10086     return true;
10087   }
10088 
10089   bool handleLoadInst(Attributor &A, LoadInst &LI, ItemInfo II,
10090                       SmallVectorImpl<ItemInfo> &Worklist) {
10091     SmallSetVector<Value *, 4> PotentialCopies;
10092     SmallSetVector<Instruction *, 4> PotentialValueOrigins;
10093     bool UsedAssumedInformation = false;
10094     if (!AA::getPotentiallyLoadedValues(A, LI, PotentialCopies,
10095                                         PotentialValueOrigins, *this,
10096                                         UsedAssumedInformation,
10097                                         /* OnlyExact */ true)) {
10098       LLVM_DEBUG(dbgs() << "[AAPotentialValues] Failed to get potentially "
10099                            "loaded values for load instruction "
10100                         << LI << "\n");
10101       return false;
10102     }
10103 
10104     // Do not simplify loads that are only used in llvm.assume if we cannot also
10105     // remove all stores that may feed into the load. The reason is that the
10106     // assume is probably worth something as long as the stores are around.
10107     InformationCache &InfoCache = A.getInfoCache();
10108     if (InfoCache.isOnlyUsedByAssume(LI)) {
10109       if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
10110             if (!I)
10111               return true;
10112             if (auto *SI = dyn_cast<StoreInst>(I))
10113               return A.isAssumedDead(SI->getOperandUse(0), this,
10114                                      /* LivenessAA */ nullptr,
10115                                      UsedAssumedInformation,
10116                                      /* CheckBBLivenessOnly */ false);
10117             return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
10118                                    UsedAssumedInformation,
10119                                    /* CheckBBLivenessOnly */ false);
10120           })) {
10121         LLVM_DEBUG(dbgs() << "[AAPotentialValues] Load is onl used by assumes "
10122                              "and we cannot delete all the stores: "
10123                           << LI << "\n");
10124         return false;
10125       }
10126     }
10127 
10128     // Values have to be dynamically unique or we loose the fact that a
10129     // single llvm::Value might represent two runtime values (e.g.,
10130     // stack locations in different recursive calls).
10131     const Instruction *CtxI = II.I.getCtxI();
10132     bool ScopeIsLocal = (II.S & AA::Intraprocedural);
10133     bool AllLocal = ScopeIsLocal;
10134     bool DynamicallyUnique = llvm::all_of(PotentialCopies, [&](Value *PC) {
10135       AllLocal &= AA::isValidInScope(*PC, getAnchorScope());
10136       return AA::isDynamicallyUnique(A, *this, *PC);
10137     });
10138     if (!DynamicallyUnique) {
10139       LLVM_DEBUG(dbgs() << "[AAPotentialValues] Not all potentially loaded "
10140                            "values are dynamically unique: "
10141                         << LI << "\n");
10142       return false;
10143     }
10144 
10145     for (auto *PotentialCopy : PotentialCopies) {
10146       if (AllLocal) {
10147         Worklist.push_back({{*PotentialCopy, CtxI}, II.S});
10148       } else {
10149         Worklist.push_back({{*PotentialCopy, CtxI}, AA::Interprocedural});
10150       }
10151     }
10152     if (!AllLocal && ScopeIsLocal)
10153       addValue(A, getState(), LI, CtxI, AA::Intraprocedural, getAnchorScope());
10154     return true;
10155   }
10156 
10157   bool handlePHINode(
10158       Attributor &A, PHINode &PHI, ItemInfo II,
10159       SmallVectorImpl<ItemInfo> &Worklist,
10160       SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) {
10161     auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
10162       LivenessInfo &LI = LivenessAAs[&F];
10163       if (!LI.LivenessAA)
10164         LI.LivenessAA = &A.getAAFor<AAIsDead>(*this, IRPosition::function(F),
10165                                               DepClassTy::NONE);
10166       return LI;
10167     };
10168 
10169     LivenessInfo &LI = GetLivenessInfo(*PHI.getFunction());
10170     for (unsigned u = 0, e = PHI.getNumIncomingValues(); u < e; u++) {
10171       BasicBlock *IncomingBB = PHI.getIncomingBlock(u);
10172       if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI.getParent())) {
10173         LI.AnyDead = true;
10174         continue;
10175       }
10176       Worklist.push_back(
10177           {{*PHI.getIncomingValue(u), IncomingBB->getTerminator()}, II.S});
10178     }
10179     return true;
10180   }
10181 
10182   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
10183   /// simplify any operand of the instruction \p I. Return true if successful,
10184   /// in that case Worklist will be updated.
10185   bool handleGenericInst(Attributor &A, Instruction &I, ItemInfo II,
10186                          SmallVectorImpl<ItemInfo> &Worklist) {
10187     bool SomeSimplified = false;
10188     bool UsedAssumedInformation = false;
10189 
10190     SmallVector<Value *, 8> NewOps(I.getNumOperands());
10191     int Idx = 0;
10192     for (Value *Op : I.operands()) {
10193       const auto &SimplifiedOp = A.getAssumedSimplified(
10194           IRPosition::value(*Op, getCallBaseContext()), *this,
10195           UsedAssumedInformation, AA::Intraprocedural);
10196       // If we are not sure about any operand we are not sure about the entire
10197       // instruction, we'll wait.
10198       if (!SimplifiedOp.has_value())
10199         return true;
10200 
10201       if (SimplifiedOp.value())
10202         NewOps[Idx] = SimplifiedOp.value();
10203       else
10204         NewOps[Idx] = Op;
10205 
10206       SomeSimplified |= (NewOps[Idx] != Op);
10207       ++Idx;
10208     }
10209 
10210     // We won't bother with the InstSimplify interface if we didn't simplify any
10211     // operand ourselves.
10212     if (!SomeSimplified)
10213       return false;
10214 
10215     InformationCache &InfoCache = A.getInfoCache();
10216     Function *F = I.getFunction();
10217     const auto *DT =
10218         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
10219     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
10220     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
10221     OptimizationRemarkEmitter *ORE = nullptr;
10222 
10223     const DataLayout &DL = I.getModule()->getDataLayout();
10224     SimplifyQuery Q(DL, TLI, DT, AC, &I);
10225     Value *NewV = simplifyInstructionWithOperands(&I, NewOps, Q, ORE);
10226     if (!NewV || NewV == &I)
10227       return false;
10228 
10229     LLVM_DEBUG(dbgs() << "Generic inst " << I << " assumed simplified to "
10230                       << *NewV << "\n");
10231     Worklist.push_back({{*NewV, II.I.getCtxI()}, II.S});
10232     return true;
10233   }
10234 
10235   bool simplifyInstruction(
10236       Attributor &A, Instruction &I, ItemInfo II,
10237       SmallVectorImpl<ItemInfo> &Worklist,
10238       SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) {
10239     if (auto *CI = dyn_cast<CmpInst>(&I))
10240       if (handleCmp(A, *CI, II, Worklist))
10241         return true;
10242 
10243     switch (I.getOpcode()) {
10244     case Instruction::Select:
10245       return handleSelectInst(A, cast<SelectInst>(I), II, Worklist);
10246     case Instruction::PHI:
10247       return handlePHINode(A, cast<PHINode>(I), II, Worklist, LivenessAAs);
10248     case Instruction::Load:
10249       return handleLoadInst(A, cast<LoadInst>(I), II, Worklist);
10250     default:
10251       return handleGenericInst(A, I, II, Worklist);
10252     };
10253     return false;
10254   }
10255 
10256   void genericValueTraversal(Attributor &A) {
10257     SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs;
10258 
10259     Value *InitialV = &getAssociatedValue();
10260     SmallSet<AA::ValueAndContext, 16> Visited;
10261     SmallVector<ItemInfo, 16> Worklist;
10262     Worklist.push_back({{*InitialV, getCtxI()}, AA::AnyScope});
10263 
10264     int Iteration = 0;
10265     do {
10266       ItemInfo II = Worklist.pop_back_val();
10267       Value *V = II.I.getValue();
10268       assert(V);
10269       const Instruction *CtxI = II.I.getCtxI();
10270       AA::ValueScope S = II.S;
10271 
10272       // Check if we should process the current value. To prevent endless
10273       // recursion keep a record of the values we followed!
10274       if (!Visited.insert(II.I).second)
10275         continue;
10276 
10277       // Make sure we limit the compile time for complex expressions.
10278       if (Iteration++ >= MaxPotentialValuesIterations) {
10279         LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
10280                           << Iteration << "!\n");
10281         addValue(A, getState(), *V, CtxI, S, getAnchorScope());
10282         continue;
10283       }
10284 
10285       // Explicitly look through calls with a "returned" attribute if we do
10286       // not have a pointer as stripPointerCasts only works on them.
10287       Value *NewV = nullptr;
10288       if (V->getType()->isPointerTy()) {
10289         NewV = AA::getWithType(*V->stripPointerCasts(), *V->getType());
10290       } else {
10291         auto *CB = dyn_cast<CallBase>(V);
10292         if (CB && CB->getCalledFunction()) {
10293           for (Argument &Arg : CB->getCalledFunction()->args())
10294             if (Arg.hasReturnedAttr()) {
10295               NewV = CB->getArgOperand(Arg.getArgNo());
10296               break;
10297             }
10298         }
10299       }
10300       if (NewV && NewV != V) {
10301         Worklist.push_back({{*NewV, CtxI}, S});
10302         continue;
10303       }
10304 
10305       if (auto *I = dyn_cast<Instruction>(V)) {
10306         if (simplifyInstruction(A, *I, II, Worklist, LivenessAAs))
10307           continue;
10308       }
10309 
10310       if (V != InitialV || isa<Argument>(V))
10311         if (recurseForValue(A, IRPosition::value(*V), II.S))
10312           continue;
10313 
10314       // If we haven't stripped anything we give up.
10315       if (V == InitialV && CtxI == getCtxI()) {
10316         indicatePessimisticFixpoint();
10317         return;
10318       }
10319 
10320       addValue(A, getState(), *V, CtxI, S, getAnchorScope());
10321     } while (!Worklist.empty());
10322 
10323     // If we actually used liveness information so we have to record a
10324     // dependence.
10325     for (auto &It : LivenessAAs)
10326       if (It.second.AnyDead)
10327         A.recordDependence(*It.second.LivenessAA, *this, DepClassTy::OPTIONAL);
10328   }
10329 
10330   /// See AbstractAttribute::trackStatistics()
10331   void trackStatistics() const override {
10332     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
10333   }
10334 };
10335 
10336 struct AAPotentialValuesArgument final : AAPotentialValuesImpl {
10337   using Base = AAPotentialValuesImpl;
10338   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
10339       : Base(IRP, A) {}
10340 
10341   /// See AbstractAttribute::initialize(..).
10342   void initialize(Attributor &A) override {
10343     auto &Arg = cast<Argument>(getAssociatedValue());
10344     if (Arg.hasPointeeInMemoryValueAttr())
10345       indicatePessimisticFixpoint();
10346   }
10347 
10348   /// See AbstractAttribute::updateImpl(...).
10349   ChangeStatus updateImpl(Attributor &A) override {
10350     auto AssumedBefore = getAssumed();
10351 
10352     unsigned CSArgNo = getCallSiteArgNo();
10353 
10354     bool UsedAssumedInformation = false;
10355     SmallVector<AA::ValueAndContext> Values;
10356     auto CallSitePred = [&](AbstractCallSite ACS) {
10357       const auto CSArgIRP = IRPosition::callsite_argument(ACS, CSArgNo);
10358       if (CSArgIRP.getPositionKind() == IRP_INVALID)
10359         return false;
10360 
10361       if (!A.getAssumedSimplifiedValues(CSArgIRP, this, Values,
10362                                         AA::Interprocedural,
10363                                         UsedAssumedInformation))
10364         return false;
10365 
10366       return isValidState();
10367     };
10368 
10369     if (!A.checkForAllCallSites(CallSitePred, *this,
10370                                 /* RequireAllCallSites */ true,
10371                                 UsedAssumedInformation))
10372       return indicatePessimisticFixpoint();
10373 
10374     Function *Fn = getAssociatedFunction();
10375     bool AnyNonLocal = false;
10376     for (auto &It : Values) {
10377       if (isa<Constant>(It.getValue())) {
10378         addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope,
10379                  getAnchorScope());
10380         continue;
10381       }
10382       if (!AA::isDynamicallyUnique(A, *this, *It.getValue()))
10383         return indicatePessimisticFixpoint();
10384 
10385       if (auto *Arg = dyn_cast<Argument>(It.getValue()))
10386         if (Arg->getParent() == Fn) {
10387           addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope,
10388                    getAnchorScope());
10389           continue;
10390         }
10391       addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::Interprocedural,
10392                getAnchorScope());
10393       AnyNonLocal = true;
10394     }
10395     if (undefIsContained())
10396       unionAssumedWithUndef();
10397     if (AnyNonLocal)
10398       giveUpOnIntraprocedural(A);
10399 
10400     return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
10401                                            : ChangeStatus::CHANGED;
10402   }
10403 
10404   /// See AbstractAttribute::trackStatistics()
10405   void trackStatistics() const override {
10406     STATS_DECLTRACK_ARG_ATTR(potential_values)
10407   }
10408 };
10409 
10410 struct AAPotentialValuesReturned
10411     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
10412   using Base =
10413       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
10414   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
10415       : Base(IRP, A) {}
10416 
10417   /// See AbstractAttribute::initialize(..).
10418   void initialize(Attributor &A) override {
10419     if (A.hasSimplificationCallback(getIRPosition()))
10420       indicatePessimisticFixpoint();
10421     else
10422       AAPotentialValues::initialize(A);
10423   }
10424 
10425   ChangeStatus manifest(Attributor &A) override {
10426     // We queried AAValueSimplify for the returned values so they will be
10427     // replaced if a simplified form was found. Nothing to do here.
10428     return ChangeStatus::UNCHANGED;
10429   }
10430 
10431   ChangeStatus indicatePessimisticFixpoint() override {
10432     return AAPotentialValues::indicatePessimisticFixpoint();
10433   }
10434 
10435   /// See AbstractAttribute::trackStatistics()
10436   void trackStatistics() const override {
10437     STATS_DECLTRACK_FNRET_ATTR(potential_values)
10438   }
10439 };
10440 
10441 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
10442   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
10443       : AAPotentialValuesImpl(IRP, A) {}
10444 
10445   /// See AbstractAttribute::updateImpl(...).
10446   ChangeStatus updateImpl(Attributor &A) override {
10447     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
10448                      "not be called");
10449   }
10450 
10451   /// See AbstractAttribute::trackStatistics()
10452   void trackStatistics() const override {
10453     STATS_DECLTRACK_FN_ATTR(potential_values)
10454   }
10455 };
10456 
10457 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
10458   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
10459       : AAPotentialValuesFunction(IRP, A) {}
10460 
10461   /// See AbstractAttribute::trackStatistics()
10462   void trackStatistics() const override {
10463     STATS_DECLTRACK_CS_ATTR(potential_values)
10464   }
10465 };
10466 
10467 struct AAPotentialValuesCallSiteReturned : AAPotentialValuesImpl {
10468   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
10469       : AAPotentialValuesImpl(IRP, A) {}
10470 
10471   /// See AbstractAttribute::updateImpl(...).
10472   ChangeStatus updateImpl(Attributor &A) override {
10473     auto AssumedBefore = getAssumed();
10474 
10475     Function *Callee = getAssociatedFunction();
10476     if (!Callee)
10477       return indicatePessimisticFixpoint();
10478 
10479     bool UsedAssumedInformation = false;
10480     auto *CB = cast<CallBase>(getCtxI());
10481     if (CB->isMustTailCall() &&
10482         !A.isAssumedDead(IRPosition::inst(*CB), this, nullptr,
10483                          UsedAssumedInformation))
10484       return indicatePessimisticFixpoint();
10485 
10486     SmallVector<AA::ValueAndContext> Values;
10487     if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this,
10488                                       Values, AA::Intraprocedural,
10489                                       UsedAssumedInformation))
10490       return indicatePessimisticFixpoint();
10491 
10492     Function *Caller = CB->getCaller();
10493 
10494     bool AnyNonLocal = false;
10495     for (auto &It : Values) {
10496       Value *V = It.getValue();
10497       Optional<Value *> CallerV = A.translateArgumentToCallSiteContent(
10498           V, *CB, *this, UsedAssumedInformation);
10499       if (!CallerV.has_value()) {
10500         // Nothing to do as long as no value was determined.
10501         continue;
10502       }
10503       V = CallerV.value() ? CallerV.value() : V;
10504       if (AA::isDynamicallyUnique(A, *this, *V) &&
10505           AA::isValidInScope(*V, Caller)) {
10506         if (CallerV.value()) {
10507           SmallVector<AA::ValueAndContext> ArgValues;
10508           IRPosition IRP = IRPosition::value(*V);
10509           if (auto *Arg = dyn_cast<Argument>(V))
10510             if (Arg->getParent() == CB->getCalledFunction())
10511               IRP = IRPosition::callsite_argument(*CB, Arg->getArgNo());
10512           if (recurseForValue(A, IRP, AA::AnyScope))
10513             continue;
10514         }
10515         addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope());
10516       } else {
10517         AnyNonLocal = true;
10518         break;
10519       }
10520     }
10521     if (AnyNonLocal) {
10522       Values.clear();
10523       if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this,
10524                                         Values, AA::Interprocedural,
10525                                         UsedAssumedInformation))
10526         return indicatePessimisticFixpoint();
10527       AnyNonLocal = false;
10528       getState() = PotentialLLVMValuesState::getBestState();
10529       for (auto &It : Values) {
10530         Value *V = It.getValue();
10531         if (!AA::isDynamicallyUnique(A, *this, *V))
10532           return indicatePessimisticFixpoint();
10533         if (AA::isValidInScope(*V, Caller)) {
10534           addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope());
10535         } else {
10536           AnyNonLocal = true;
10537           addValue(A, getState(), *V, CB, AA::Interprocedural,
10538                    getAnchorScope());
10539         }
10540       }
10541       if (AnyNonLocal)
10542         giveUpOnIntraprocedural(A);
10543     }
10544     return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
10545                                            : ChangeStatus::CHANGED;
10546   }
10547 
10548   ChangeStatus indicatePessimisticFixpoint() override {
10549     return AAPotentialValues::indicatePessimisticFixpoint();
10550   }
10551 
10552   /// See AbstractAttribute::trackStatistics()
10553   void trackStatistics() const override {
10554     STATS_DECLTRACK_CSRET_ATTR(potential_values)
10555   }
10556 };
10557 
10558 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
10559   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
10560       : AAPotentialValuesFloating(IRP, A) {}
10561 
10562   /// See AbstractAttribute::trackStatistics()
10563   void trackStatistics() const override {
10564     STATS_DECLTRACK_CSARG_ATTR(potential_values)
10565   }
10566 };
10567 } // namespace
10568 
10569 /// ---------------------- Assumption Propagation ------------------------------
10570 namespace {
10571 struct AAAssumptionInfoImpl : public AAAssumptionInfo {
10572   AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
10573                        const DenseSet<StringRef> &Known)
10574       : AAAssumptionInfo(IRP, A, Known) {}
10575 
10576   bool hasAssumption(const StringRef Assumption) const override {
10577     return isValidState() && setContains(Assumption);
10578   }
10579 
10580   /// See AbstractAttribute::getAsStr()
10581   const std::string getAsStr() const override {
10582     const SetContents &Known = getKnown();
10583     const SetContents &Assumed = getAssumed();
10584 
10585     const std::string KnownStr =
10586         llvm::join(Known.getSet().begin(), Known.getSet().end(), ",");
10587     const std::string AssumedStr =
10588         (Assumed.isUniversal())
10589             ? "Universal"
10590             : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ",");
10591 
10592     return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
10593   }
10594 };
10595 
10596 /// Propagates assumption information from parent functions to all of their
10597 /// successors. An assumption can be propagated if the containing function
10598 /// dominates the called function.
10599 ///
10600 /// We start with a "known" set of assumptions already valid for the associated
10601 /// function and an "assumed" set that initially contains all possible
10602 /// assumptions. The assumed set is inter-procedurally updated by narrowing its
10603 /// contents as concrete values are known. The concrete values are seeded by the
10604 /// first nodes that are either entries into the call graph, or contains no
10605 /// assumptions. Each node is updated as the intersection of the assumed state
10606 /// with all of its predecessors.
10607 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
10608   AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
10609       : AAAssumptionInfoImpl(IRP, A,
10610                              getAssumptions(*IRP.getAssociatedFunction())) {}
10611 
10612   /// See AbstractAttribute::manifest(...).
10613   ChangeStatus manifest(Attributor &A) override {
10614     const auto &Assumptions = getKnown();
10615 
10616     // Don't manifest a universal set if it somehow made it here.
10617     if (Assumptions.isUniversal())
10618       return ChangeStatus::UNCHANGED;
10619 
10620     Function *AssociatedFunction = getAssociatedFunction();
10621 
10622     bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet());
10623 
10624     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10625   }
10626 
10627   /// See AbstractAttribute::updateImpl(...).
10628   ChangeStatus updateImpl(Attributor &A) override {
10629     bool Changed = false;
10630 
10631     auto CallSitePred = [&](AbstractCallSite ACS) {
10632       const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
10633           *this, IRPosition::callsite_function(*ACS.getInstruction()),
10634           DepClassTy::REQUIRED);
10635       // Get the set of assumptions shared by all of this function's callers.
10636       Changed |= getIntersection(AssumptionAA.getAssumed());
10637       return !getAssumed().empty() || !getKnown().empty();
10638     };
10639 
10640     bool UsedAssumedInformation = false;
10641     // Get the intersection of all assumptions held by this node's predecessors.
10642     // If we don't know all the call sites then this is either an entry into the
10643     // call graph or an empty node. This node is known to only contain its own
10644     // assumptions and can be propagated to its successors.
10645     if (!A.checkForAllCallSites(CallSitePred, *this, true,
10646                                 UsedAssumedInformation))
10647       return indicatePessimisticFixpoint();
10648 
10649     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10650   }
10651 
10652   void trackStatistics() const override {}
10653 };
10654 
10655 /// Assumption Info defined for call sites.
10656 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
10657 
10658   AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
10659       : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
10660 
10661   /// See AbstractAttribute::initialize(...).
10662   void initialize(Attributor &A) override {
10663     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10664     A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10665   }
10666 
10667   /// See AbstractAttribute::manifest(...).
10668   ChangeStatus manifest(Attributor &A) override {
10669     // Don't manifest a universal set if it somehow made it here.
10670     if (getKnown().isUniversal())
10671       return ChangeStatus::UNCHANGED;
10672 
10673     CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue());
10674     bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet());
10675 
10676     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10677   }
10678 
10679   /// See AbstractAttribute::updateImpl(...).
10680   ChangeStatus updateImpl(Attributor &A) override {
10681     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10682     auto &AssumptionAA =
10683         A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10684     bool Changed = getIntersection(AssumptionAA.getAssumed());
10685     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10686   }
10687 
10688   /// See AbstractAttribute::trackStatistics()
10689   void trackStatistics() const override {}
10690 
10691 private:
10692   /// Helper to initialized the known set as all the assumptions this call and
10693   /// the callee contain.
10694   DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
10695     const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
10696     auto Assumptions = getAssumptions(CB);
10697     if (Function *F = IRP.getAssociatedFunction())
10698       set_union(Assumptions, getAssumptions(*F));
10699     if (Function *F = IRP.getAssociatedFunction())
10700       set_union(Assumptions, getAssumptions(*F));
10701     return Assumptions;
10702   }
10703 };
10704 } // namespace
10705 
10706 AACallGraphNode *AACallEdgeIterator::operator*() const {
10707   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
10708       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
10709 }
10710 
10711 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
10712 
10713 const char AAReturnedValues::ID = 0;
10714 const char AANoUnwind::ID = 0;
10715 const char AANoSync::ID = 0;
10716 const char AANoFree::ID = 0;
10717 const char AANonNull::ID = 0;
10718 const char AANoRecurse::ID = 0;
10719 const char AAWillReturn::ID = 0;
10720 const char AAUndefinedBehavior::ID = 0;
10721 const char AANoAlias::ID = 0;
10722 const char AAReachability::ID = 0;
10723 const char AANoReturn::ID = 0;
10724 const char AAIsDead::ID = 0;
10725 const char AADereferenceable::ID = 0;
10726 const char AAAlign::ID = 0;
10727 const char AAInstanceInfo::ID = 0;
10728 const char AANoCapture::ID = 0;
10729 const char AAValueSimplify::ID = 0;
10730 const char AAHeapToStack::ID = 0;
10731 const char AAPrivatizablePtr::ID = 0;
10732 const char AAMemoryBehavior::ID = 0;
10733 const char AAMemoryLocation::ID = 0;
10734 const char AAValueConstantRange::ID = 0;
10735 const char AAPotentialConstantValues::ID = 0;
10736 const char AAPotentialValues::ID = 0;
10737 const char AANoUndef::ID = 0;
10738 const char AACallEdges::ID = 0;
10739 const char AAFunctionReachability::ID = 0;
10740 const char AAPointerInfo::ID = 0;
10741 const char AAAssumptionInfo::ID = 0;
10742 
10743 // Macro magic to create the static generator function for attributes that
10744 // follow the naming scheme.
10745 
10746 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
10747   case IRPosition::PK:                                                         \
10748     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
10749 
10750 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
10751   case IRPosition::PK:                                                         \
10752     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
10753     ++NumAAs;                                                                  \
10754     break;
10755 
10756 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
10757   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10758     CLASS *AA = nullptr;                                                       \
10759     switch (IRP.getPositionKind()) {                                           \
10760       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10761       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10762       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10763       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10764       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10765       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10766       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10767       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10768     }                                                                          \
10769     return *AA;                                                                \
10770   }
10771 
10772 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
10773   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10774     CLASS *AA = nullptr;                                                       \
10775     switch (IRP.getPositionKind()) {                                           \
10776       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10777       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
10778       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10779       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10780       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10781       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10782       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10783       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10784     }                                                                          \
10785     return *AA;                                                                \
10786   }
10787 
10788 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
10789   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10790     CLASS *AA = nullptr;                                                       \
10791     switch (IRP.getPositionKind()) {                                           \
10792       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10793       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10794       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10795       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10796       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10797       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10798       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10799       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10800     }                                                                          \
10801     return *AA;                                                                \
10802   }
10803 
10804 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
10805   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10806     CLASS *AA = nullptr;                                                       \
10807     switch (IRP.getPositionKind()) {                                           \
10808       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10809       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10810       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10811       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10812       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10813       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10814       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10815       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10816     }                                                                          \
10817     return *AA;                                                                \
10818   }
10819 
10820 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
10821   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10822     CLASS *AA = nullptr;                                                       \
10823     switch (IRP.getPositionKind()) {                                           \
10824       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10825       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10826       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10827       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10828       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10829       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10830       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10831       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10832     }                                                                          \
10833     return *AA;                                                                \
10834   }
10835 
10836 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
10837 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
10838 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
10839 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
10840 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
10841 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
10842 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
10843 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
10844 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo)
10845 
10846 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
10847 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
10848 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
10849 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
10850 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
10851 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo)
10852 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
10853 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
10854 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues)
10855 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
10856 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
10857 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
10858 
10859 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
10860 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
10861 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
10862 
10863 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
10864 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
10865 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
10866 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
10867 
10868 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
10869 
10870 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
10871 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
10872 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
10873 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
10874 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
10875 #undef SWITCH_PK_CREATE
10876 #undef SWITCH_PK_INV
10877