1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/MapVector.h"
19 #include "llvm/ADT/SCCIterator.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetOperations.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/CaptureTracking.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/LazyValueInfo.h"
32 #include "llvm/Analysis/MemoryBuiltins.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/ScalarEvolution.h"
35 #include "llvm/Analysis/TargetTransformInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/IR/Argument.h"
38 #include "llvm/IR/Assumptions.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/Constant.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/GlobalValue.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/InstrTypes.h"
47 #include "llvm/IR/Instruction.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/NoFolder.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/IR/ValueHandle.h"
53 #include "llvm/Support/Alignment.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/ErrorHandling.h"
57 #include "llvm/Support/GraphWriter.h"
58 #include "llvm/Support/MathExtras.h"
59 #include "llvm/Support/raw_ostream.h"
60 #include "llvm/Transforms/Utils/Local.h"
61 #include "llvm/Transforms/Utils/ValueMapper.h"
62 #include <cassert>
63 
64 using namespace llvm;
65 
66 #define DEBUG_TYPE "attributor"
67 
68 static cl::opt<bool> ManifestInternal(
69     "attributor-manifest-internal", cl::Hidden,
70     cl::desc("Manifest Attributor internal string attributes."),
71     cl::init(false));
72 
73 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
74                                        cl::Hidden);
75 
76 template <>
77 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
78 
79 template <> unsigned llvm::PotentialLLVMValuesState::MaxPotentialValues = -1;
80 
81 static cl::opt<unsigned, true> MaxPotentialValues(
82     "attributor-max-potential-values", cl::Hidden,
83     cl::desc("Maximum number of potential values to be "
84              "tracked for each position."),
85     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
86     cl::init(7));
87 
88 static cl::opt<int> MaxPotentialValuesIterations(
89     "attributor-max-potential-values-iterations", cl::Hidden,
90     cl::desc(
91         "Maximum number of iterations we keep dismantling potential values."),
92     cl::init(64));
93 
94 static cl::opt<unsigned> MaxInterferingAccesses(
95     "attributor-max-interfering-accesses", cl::Hidden,
96     cl::desc("Maximum number of interfering accesses to "
97              "check before assuming all might interfere."),
98     cl::init(6));
99 
100 STATISTIC(NumAAs, "Number of abstract attributes created");
101 
102 // Some helper macros to deal with statistics tracking.
103 //
104 // Usage:
105 // For simple IR attribute tracking overload trackStatistics in the abstract
106 // attribute and choose the right STATS_DECLTRACK_********* macro,
107 // e.g.,:
108 //  void trackStatistics() const override {
109 //    STATS_DECLTRACK_ARG_ATTR(returned)
110 //  }
111 // If there is a single "increment" side one can use the macro
112 // STATS_DECLTRACK with a custom message. If there are multiple increment
113 // sides, STATS_DECL and STATS_TRACK can also be used separately.
114 //
115 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
116   ("Number of " #TYPE " marked '" #NAME "'")
117 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
118 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
119 #define STATS_DECL(NAME, TYPE, MSG)                                            \
120   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
121 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
122 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
123   {                                                                            \
124     STATS_DECL(NAME, TYPE, MSG)                                                \
125     STATS_TRACK(NAME, TYPE)                                                    \
126   }
127 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
128   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
129 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
130   STATS_DECLTRACK(NAME, CSArguments,                                           \
131                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
132 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
133   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
134 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
135   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
136 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
137   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
138                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
139 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
140   STATS_DECLTRACK(NAME, CSReturn,                                              \
141                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
142 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
143   STATS_DECLTRACK(NAME, Floating,                                              \
144                   ("Number of floating values known to be '" #NAME "'"))
145 
146 // Specialization of the operator<< for abstract attributes subclasses. This
147 // disambiguates situations where multiple operators are applicable.
148 namespace llvm {
149 #define PIPE_OPERATOR(CLASS)                                                   \
150   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
151     return OS << static_cast<const AbstractAttribute &>(AA);                   \
152   }
153 
154 PIPE_OPERATOR(AAIsDead)
155 PIPE_OPERATOR(AANoUnwind)
156 PIPE_OPERATOR(AANoSync)
157 PIPE_OPERATOR(AANoRecurse)
158 PIPE_OPERATOR(AAWillReturn)
159 PIPE_OPERATOR(AANoReturn)
160 PIPE_OPERATOR(AAReturnedValues)
161 PIPE_OPERATOR(AANonNull)
162 PIPE_OPERATOR(AANoAlias)
163 PIPE_OPERATOR(AADereferenceable)
164 PIPE_OPERATOR(AAAlign)
165 PIPE_OPERATOR(AAInstanceInfo)
166 PIPE_OPERATOR(AANoCapture)
167 PIPE_OPERATOR(AAValueSimplify)
168 PIPE_OPERATOR(AANoFree)
169 PIPE_OPERATOR(AAHeapToStack)
170 PIPE_OPERATOR(AAReachability)
171 PIPE_OPERATOR(AAMemoryBehavior)
172 PIPE_OPERATOR(AAMemoryLocation)
173 PIPE_OPERATOR(AAValueConstantRange)
174 PIPE_OPERATOR(AAPrivatizablePtr)
175 PIPE_OPERATOR(AAUndefinedBehavior)
176 PIPE_OPERATOR(AAPotentialConstantValues)
177 PIPE_OPERATOR(AAPotentialValues)
178 PIPE_OPERATOR(AANoUndef)
179 PIPE_OPERATOR(AACallEdges)
180 PIPE_OPERATOR(AAFunctionReachability)
181 PIPE_OPERATOR(AAPointerInfo)
182 PIPE_OPERATOR(AAAssumptionInfo)
183 
184 #undef PIPE_OPERATOR
185 
186 template <>
187 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
188                                                      const DerefState &R) {
189   ChangeStatus CS0 =
190       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
191   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
192   return CS0 | CS1;
193 }
194 
195 } // namespace llvm
196 
197 /// Checks if a type could have padding bytes.
198 static bool isDenselyPacked(Type *Ty, const DataLayout &DL) {
199   // There is no size information, so be conservative.
200   if (!Ty->isSized())
201     return false;
202 
203   // If the alloc size is not equal to the storage size, then there are padding
204   // bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128.
205   if (DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty))
206     return false;
207 
208   // FIXME: This isn't the right way to check for padding in vectors with
209   // non-byte-size elements.
210   if (VectorType *SeqTy = dyn_cast<VectorType>(Ty))
211     return isDenselyPacked(SeqTy->getElementType(), DL);
212 
213   // For array types, check for padding within members.
214   if (ArrayType *SeqTy = dyn_cast<ArrayType>(Ty))
215     return isDenselyPacked(SeqTy->getElementType(), DL);
216 
217   if (!isa<StructType>(Ty))
218     return true;
219 
220   // Check for padding within and between elements of a struct.
221   StructType *StructTy = cast<StructType>(Ty);
222   const StructLayout *Layout = DL.getStructLayout(StructTy);
223   uint64_t StartPos = 0;
224   for (unsigned I = 0, E = StructTy->getNumElements(); I < E; ++I) {
225     Type *ElTy = StructTy->getElementType(I);
226     if (!isDenselyPacked(ElTy, DL))
227       return false;
228     if (StartPos != Layout->getElementOffsetInBits(I))
229       return false;
230     StartPos += DL.getTypeAllocSizeInBits(ElTy);
231   }
232 
233   return true;
234 }
235 
236 /// Get pointer operand of memory accessing instruction. If \p I is
237 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
238 /// is set to false and the instruction is volatile, return nullptr.
239 static const Value *getPointerOperand(const Instruction *I,
240                                       bool AllowVolatile) {
241   if (!AllowVolatile && I->isVolatile())
242     return nullptr;
243 
244   if (auto *LI = dyn_cast<LoadInst>(I)) {
245     return LI->getPointerOperand();
246   }
247 
248   if (auto *SI = dyn_cast<StoreInst>(I)) {
249     return SI->getPointerOperand();
250   }
251 
252   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
253     return CXI->getPointerOperand();
254   }
255 
256   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
257     return RMWI->getPointerOperand();
258   }
259 
260   return nullptr;
261 }
262 
263 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
264 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
265 /// getelement pointer instructions that traverse the natural type of \p Ptr if
266 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
267 /// through a cast to i8*.
268 ///
269 /// TODO: This could probably live somewhere more prominantly if it doesn't
270 ///       already exist.
271 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
272                                int64_t Offset, IRBuilder<NoFolder> &IRB,
273                                const DataLayout &DL) {
274   assert(Offset >= 0 && "Negative offset not supported yet!");
275   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
276                     << "-bytes as " << *ResTy << "\n");
277 
278   if (Offset) {
279     Type *Ty = PtrElemTy;
280     APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
281     SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
282 
283     SmallVector<Value *, 4> ValIndices;
284     std::string GEPName = Ptr->getName().str();
285     for (const APInt &Index : IntIndices) {
286       ValIndices.push_back(IRB.getInt(Index));
287       GEPName += "." + std::to_string(Index.getZExtValue());
288     }
289 
290     // Create a GEP for the indices collected above.
291     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
292 
293     // If an offset is left we use byte-wise adjustment.
294     if (IntOffset != 0) {
295       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
296       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
297                           GEPName + ".b" + Twine(IntOffset.getZExtValue()));
298     }
299   }
300 
301   // Ensure the result has the requested type.
302   Ptr = IRB.CreatePointerBitCastOrAddrSpaceCast(Ptr, ResTy,
303                                                 Ptr->getName() + ".cast");
304 
305   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
306   return Ptr;
307 }
308 
309 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
310                                      SmallSetVector<Value *, 8> &Objects,
311                                      const AbstractAttribute &QueryingAA,
312                                      const Instruction *CtxI,
313                                      bool &UsedAssumedInformation,
314                                      AA::ValueScope S,
315                                      SmallPtrSetImpl<Value *> *SeenObjects) {
316   SmallPtrSet<Value *, 8> LocalSeenObjects;
317   if (!SeenObjects)
318     SeenObjects = &LocalSeenObjects;
319 
320   SmallVector<AA::ValueAndContext> Values;
321   if (!A.getAssumedSimplifiedValues(IRPosition::value(Ptr), &QueryingAA, Values,
322                                     S, UsedAssumedInformation)) {
323     Objects.insert(const_cast<Value *>(&Ptr));
324     return true;
325   }
326 
327   for (auto &VAC : Values) {
328     Value *UO = getUnderlyingObject(VAC.getValue());
329     if (UO && UO != VAC.getValue() && SeenObjects->insert(UO).second) {
330       if (!getAssumedUnderlyingObjects(A, *UO, Objects, QueryingAA,
331                                        VAC.getCtxI(), UsedAssumedInformation, S,
332                                        SeenObjects))
333         return false;
334       continue;
335     }
336     Objects.insert(VAC.getValue());
337   }
338   return true;
339 }
340 
341 static const Value *
342 stripAndAccumulateOffsets(Attributor &A, const AbstractAttribute &QueryingAA,
343                           const Value *Val, const DataLayout &DL, APInt &Offset,
344                           bool GetMinOffset, bool AllowNonInbounds,
345                           bool UseAssumed = false) {
346 
347   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
348     const IRPosition &Pos = IRPosition::value(V);
349     // Only track dependence if we are going to use the assumed info.
350     const AAValueConstantRange &ValueConstantRangeAA =
351         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
352                                          UseAssumed ? DepClassTy::OPTIONAL
353                                                     : DepClassTy::NONE);
354     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
355                                      : ValueConstantRangeAA.getKnown();
356     if (Range.isFullSet())
357       return false;
358 
359     // We can only use the lower part of the range because the upper part can
360     // be higher than what the value can really be.
361     if (GetMinOffset)
362       ROffset = Range.getSignedMin();
363     else
364       ROffset = Range.getSignedMax();
365     return true;
366   };
367 
368   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
369                                                 /* AllowInvariant */ true,
370                                                 AttributorAnalysis);
371 }
372 
373 static const Value *
374 getMinimalBaseOfPointer(Attributor &A, const AbstractAttribute &QueryingAA,
375                         const Value *Ptr, int64_t &BytesOffset,
376                         const DataLayout &DL, bool AllowNonInbounds = false) {
377   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
378   const Value *Base =
379       stripAndAccumulateOffsets(A, QueryingAA, Ptr, DL, OffsetAPInt,
380                                 /* GetMinOffset */ true, AllowNonInbounds);
381 
382   BytesOffset = OffsetAPInt.getSExtValue();
383   return Base;
384 }
385 
386 /// Clamp the information known for all returned values of a function
387 /// (identified by \p QueryingAA) into \p S.
388 template <typename AAType, typename StateType = typename AAType::StateType>
389 static void clampReturnedValueStates(
390     Attributor &A, const AAType &QueryingAA, StateType &S,
391     const IRPosition::CallBaseContext *CBContext = nullptr) {
392   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
393                     << QueryingAA << " into " << S << "\n");
394 
395   assert((QueryingAA.getIRPosition().getPositionKind() ==
396               IRPosition::IRP_RETURNED ||
397           QueryingAA.getIRPosition().getPositionKind() ==
398               IRPosition::IRP_CALL_SITE_RETURNED) &&
399          "Can only clamp returned value states for a function returned or call "
400          "site returned position!");
401 
402   // Use an optional state as there might not be any return values and we want
403   // to join (IntegerState::operator&) the state of all there are.
404   Optional<StateType> T;
405 
406   // Callback for each possibly returned value.
407   auto CheckReturnValue = [&](Value &RV) -> bool {
408     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
409     const AAType &AA =
410         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
411     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
412                       << " @ " << RVPos << "\n");
413     const StateType &AAS = AA.getState();
414     if (!T)
415       T = StateType::getBestState(AAS);
416     *T &= AAS;
417     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
418                       << "\n");
419     return T->isValidState();
420   };
421 
422   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
423     S.indicatePessimisticFixpoint();
424   else if (T)
425     S ^= *T;
426 }
427 
428 namespace {
429 /// Helper class for generic deduction: return value -> returned position.
430 template <typename AAType, typename BaseType,
431           typename StateType = typename BaseType::StateType,
432           bool PropagateCallBaseContext = false>
433 struct AAReturnedFromReturnedValues : public BaseType {
434   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
435       : BaseType(IRP, A) {}
436 
437   /// See AbstractAttribute::updateImpl(...).
438   ChangeStatus updateImpl(Attributor &A) override {
439     StateType S(StateType::getBestState(this->getState()));
440     clampReturnedValueStates<AAType, StateType>(
441         A, *this, S,
442         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
443     // TODO: If we know we visited all returned values, thus no are assumed
444     // dead, we can take the known information from the state T.
445     return clampStateAndIndicateChange<StateType>(this->getState(), S);
446   }
447 };
448 
449 /// Clamp the information known at all call sites for a given argument
450 /// (identified by \p QueryingAA) into \p S.
451 template <typename AAType, typename StateType = typename AAType::StateType>
452 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
453                                         StateType &S) {
454   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
455                     << QueryingAA << " into " << S << "\n");
456 
457   assert(QueryingAA.getIRPosition().getPositionKind() ==
458              IRPosition::IRP_ARGUMENT &&
459          "Can only clamp call site argument states for an argument position!");
460 
461   // Use an optional state as there might not be any return values and we want
462   // to join (IntegerState::operator&) the state of all there are.
463   Optional<StateType> T;
464 
465   // The argument number which is also the call site argument number.
466   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
467 
468   auto CallSiteCheck = [&](AbstractCallSite ACS) {
469     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
470     // Check if a coresponding argument was found or if it is on not associated
471     // (which can happen for callback calls).
472     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
473       return false;
474 
475     const AAType &AA =
476         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
477     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
478                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
479     const StateType &AAS = AA.getState();
480     if (!T)
481       T = StateType::getBestState(AAS);
482     *T &= AAS;
483     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
484                       << "\n");
485     return T->isValidState();
486   };
487 
488   bool UsedAssumedInformation = false;
489   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
490                               UsedAssumedInformation))
491     S.indicatePessimisticFixpoint();
492   else if (T)
493     S ^= *T;
494 }
495 
496 /// This function is the bridge between argument position and the call base
497 /// context.
498 template <typename AAType, typename BaseType,
499           typename StateType = typename AAType::StateType>
500 bool getArgumentStateFromCallBaseContext(Attributor &A,
501                                          BaseType &QueryingAttribute,
502                                          IRPosition &Pos, StateType &State) {
503   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
504          "Expected an 'argument' position !");
505   const CallBase *CBContext = Pos.getCallBaseContext();
506   if (!CBContext)
507     return false;
508 
509   int ArgNo = Pos.getCallSiteArgNo();
510   assert(ArgNo >= 0 && "Invalid Arg No!");
511 
512   const auto &AA = A.getAAFor<AAType>(
513       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
514       DepClassTy::REQUIRED);
515   const StateType &CBArgumentState =
516       static_cast<const StateType &>(AA.getState());
517 
518   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
519                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
520                     << "\n");
521 
522   // NOTE: If we want to do call site grouping it should happen here.
523   State ^= CBArgumentState;
524   return true;
525 }
526 
527 /// Helper class for generic deduction: call site argument -> argument position.
528 template <typename AAType, typename BaseType,
529           typename StateType = typename AAType::StateType,
530           bool BridgeCallBaseContext = false>
531 struct AAArgumentFromCallSiteArguments : public BaseType {
532   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
533       : BaseType(IRP, A) {}
534 
535   /// See AbstractAttribute::updateImpl(...).
536   ChangeStatus updateImpl(Attributor &A) override {
537     StateType S = StateType::getBestState(this->getState());
538 
539     if (BridgeCallBaseContext) {
540       bool Success =
541           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
542               A, *this, this->getIRPosition(), S);
543       if (Success)
544         return clampStateAndIndicateChange<StateType>(this->getState(), S);
545     }
546     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
547 
548     // TODO: If we know we visited all incoming values, thus no are assumed
549     // dead, we can take the known information from the state T.
550     return clampStateAndIndicateChange<StateType>(this->getState(), S);
551   }
552 };
553 
554 /// Helper class for generic replication: function returned -> cs returned.
555 template <typename AAType, typename BaseType,
556           typename StateType = typename BaseType::StateType,
557           bool IntroduceCallBaseContext = false>
558 struct AACallSiteReturnedFromReturned : public BaseType {
559   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
560       : BaseType(IRP, A) {}
561 
562   /// See AbstractAttribute::updateImpl(...).
563   ChangeStatus updateImpl(Attributor &A) override {
564     assert(this->getIRPosition().getPositionKind() ==
565                IRPosition::IRP_CALL_SITE_RETURNED &&
566            "Can only wrap function returned positions for call site returned "
567            "positions!");
568     auto &S = this->getState();
569 
570     const Function *AssociatedFunction =
571         this->getIRPosition().getAssociatedFunction();
572     if (!AssociatedFunction)
573       return S.indicatePessimisticFixpoint();
574 
575     CallBase &CBContext = cast<CallBase>(this->getAnchorValue());
576     if (IntroduceCallBaseContext)
577       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
578                         << CBContext << "\n");
579 
580     IRPosition FnPos = IRPosition::returned(
581         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
582     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
583     return clampStateAndIndicateChange(S, AA.getState());
584   }
585 };
586 
587 /// Helper function to accumulate uses.
588 template <class AAType, typename StateType = typename AAType::StateType>
589 static void followUsesInContext(AAType &AA, Attributor &A,
590                                 MustBeExecutedContextExplorer &Explorer,
591                                 const Instruction *CtxI,
592                                 SetVector<const Use *> &Uses,
593                                 StateType &State) {
594   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
595   for (unsigned u = 0; u < Uses.size(); ++u) {
596     const Use *U = Uses[u];
597     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
598       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
599       if (Found && AA.followUseInMBEC(A, U, UserI, State))
600         for (const Use &Us : UserI->uses())
601           Uses.insert(&Us);
602     }
603   }
604 }
605 
606 /// Use the must-be-executed-context around \p I to add information into \p S.
607 /// The AAType class is required to have `followUseInMBEC` method with the
608 /// following signature and behaviour:
609 ///
610 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
611 /// U - Underlying use.
612 /// I - The user of the \p U.
613 /// Returns true if the value should be tracked transitively.
614 ///
615 template <class AAType, typename StateType = typename AAType::StateType>
616 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
617                              Instruction &CtxI) {
618 
619   // Container for (transitive) uses of the associated value.
620   SetVector<const Use *> Uses;
621   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
622     Uses.insert(&U);
623 
624   MustBeExecutedContextExplorer &Explorer =
625       A.getInfoCache().getMustBeExecutedContextExplorer();
626 
627   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
628 
629   if (S.isAtFixpoint())
630     return;
631 
632   SmallVector<const BranchInst *, 4> BrInsts;
633   auto Pred = [&](const Instruction *I) {
634     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
635       if (Br->isConditional())
636         BrInsts.push_back(Br);
637     return true;
638   };
639 
640   // Here, accumulate conditional branch instructions in the context. We
641   // explore the child paths and collect the known states. The disjunction of
642   // those states can be merged to its own state. Let ParentState_i be a state
643   // to indicate the known information for an i-th branch instruction in the
644   // context. ChildStates are created for its successors respectively.
645   //
646   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
647   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
648   //      ...
649   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
650   //
651   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
652   //
653   // FIXME: Currently, recursive branches are not handled. For example, we
654   // can't deduce that ptr must be dereferenced in below function.
655   //
656   // void f(int a, int c, int *ptr) {
657   //    if(a)
658   //      if (b) {
659   //        *ptr = 0;
660   //      } else {
661   //        *ptr = 1;
662   //      }
663   //    else {
664   //      if (b) {
665   //        *ptr = 0;
666   //      } else {
667   //        *ptr = 1;
668   //      }
669   //    }
670   // }
671 
672   Explorer.checkForAllContext(&CtxI, Pred);
673   for (const BranchInst *Br : BrInsts) {
674     StateType ParentState;
675 
676     // The known state of the parent state is a conjunction of children's
677     // known states so it is initialized with a best state.
678     ParentState.indicateOptimisticFixpoint();
679 
680     for (const BasicBlock *BB : Br->successors()) {
681       StateType ChildState;
682 
683       size_t BeforeSize = Uses.size();
684       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
685 
686       // Erase uses which only appear in the child.
687       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
688         It = Uses.erase(It);
689 
690       ParentState &= ChildState;
691     }
692 
693     // Use only known state.
694     S += ParentState;
695   }
696 }
697 } // namespace
698 
699 /// ------------------------ PointerInfo ---------------------------------------
700 
701 namespace llvm {
702 namespace AA {
703 namespace PointerInfo {
704 
705 struct State;
706 
707 } // namespace PointerInfo
708 } // namespace AA
709 
710 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
711 template <>
712 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
713   using Access = AAPointerInfo::Access;
714   static inline Access getEmptyKey();
715   static inline Access getTombstoneKey();
716   static unsigned getHashValue(const Access &A);
717   static bool isEqual(const Access &LHS, const Access &RHS);
718 };
719 
720 /// Helper that allows OffsetAndSize as a key in a DenseMap.
721 template <>
722 struct DenseMapInfo<AAPointerInfo ::OffsetAndSize>
723     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
724 
725 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
726 /// but the instruction
727 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
728   using Base = DenseMapInfo<Instruction *>;
729   using Access = AAPointerInfo::Access;
730   static inline Access getEmptyKey();
731   static inline Access getTombstoneKey();
732   static unsigned getHashValue(const Access &A);
733   static bool isEqual(const Access &LHS, const Access &RHS);
734 };
735 
736 } // namespace llvm
737 
738 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
739 struct AA::PointerInfo::State : public AbstractState {
740 
741   ~State() {
742     // We do not delete the Accesses objects but need to destroy them still.
743     for (auto &It : AccessBins)
744       It.second->~Accesses();
745   }
746 
747   /// Return the best possible representable state.
748   static State getBestState(const State &SIS) { return State(); }
749 
750   /// Return the worst possible representable state.
751   static State getWorstState(const State &SIS) {
752     State R;
753     R.indicatePessimisticFixpoint();
754     return R;
755   }
756 
757   State() = default;
758   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {
759     SIS.AccessBins.clear();
760   }
761 
762   const State &getAssumed() const { return *this; }
763 
764   /// See AbstractState::isValidState().
765   bool isValidState() const override { return BS.isValidState(); }
766 
767   /// See AbstractState::isAtFixpoint().
768   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
769 
770   /// See AbstractState::indicateOptimisticFixpoint().
771   ChangeStatus indicateOptimisticFixpoint() override {
772     BS.indicateOptimisticFixpoint();
773     return ChangeStatus::UNCHANGED;
774   }
775 
776   /// See AbstractState::indicatePessimisticFixpoint().
777   ChangeStatus indicatePessimisticFixpoint() override {
778     BS.indicatePessimisticFixpoint();
779     return ChangeStatus::CHANGED;
780   }
781 
782   State &operator=(const State &R) {
783     if (this == &R)
784       return *this;
785     BS = R.BS;
786     AccessBins = R.AccessBins;
787     return *this;
788   }
789 
790   State &operator=(State &&R) {
791     if (this == &R)
792       return *this;
793     std::swap(BS, R.BS);
794     std::swap(AccessBins, R.AccessBins);
795     return *this;
796   }
797 
798   bool operator==(const State &R) const {
799     if (BS != R.BS)
800       return false;
801     if (AccessBins.size() != R.AccessBins.size())
802       return false;
803     auto It = begin(), RIt = R.begin(), E = end();
804     while (It != E) {
805       if (It->getFirst() != RIt->getFirst())
806         return false;
807       auto &Accs = It->getSecond();
808       auto &RAccs = RIt->getSecond();
809       if (Accs->size() != RAccs->size())
810         return false;
811       for (const auto &ZipIt : llvm::zip(*Accs, *RAccs))
812         if (std::get<0>(ZipIt) != std::get<1>(ZipIt))
813           return false;
814       ++It;
815       ++RIt;
816     }
817     return true;
818   }
819   bool operator!=(const State &R) const { return !(*this == R); }
820 
821   /// We store accesses in a set with the instruction as key.
822   struct Accesses {
823     SmallVector<AAPointerInfo::Access, 4> Accesses;
824     DenseMap<const Instruction *, unsigned> Map;
825 
826     unsigned size() const { return Accesses.size(); }
827 
828     using vec_iterator = decltype(Accesses)::iterator;
829     vec_iterator begin() { return Accesses.begin(); }
830     vec_iterator end() { return Accesses.end(); }
831 
832     using iterator = decltype(Map)::const_iterator;
833     iterator find(AAPointerInfo::Access &Acc) {
834       return Map.find(Acc.getRemoteInst());
835     }
836     iterator find_end() { return Map.end(); }
837 
838     AAPointerInfo::Access &get(iterator &It) {
839       return Accesses[It->getSecond()];
840     }
841 
842     void insert(AAPointerInfo::Access &Acc) {
843       Map[Acc.getRemoteInst()] = Accesses.size();
844       Accesses.push_back(Acc);
845     }
846   };
847 
848   /// We store all accesses in bins denoted by their offset and size.
849   using AccessBinsTy = DenseMap<AAPointerInfo::OffsetAndSize, Accesses *>;
850 
851   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
852   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
853 
854 protected:
855   /// The bins with all the accesses for the associated pointer.
856   AccessBinsTy AccessBins;
857 
858   /// Add a new access to the state at offset \p Offset and with size \p Size.
859   /// The access is associated with \p I, writes \p Content (if anything), and
860   /// is of kind \p Kind.
861   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
862   ChangeStatus addAccess(Attributor &A, int64_t Offset, int64_t Size,
863                          Instruction &I, Optional<Value *> Content,
864                          AAPointerInfo::AccessKind Kind, Type *Ty,
865                          Instruction *RemoteI = nullptr,
866                          Accesses *BinPtr = nullptr) {
867     AAPointerInfo::OffsetAndSize Key{Offset, Size};
868     Accesses *&Bin = BinPtr ? BinPtr : AccessBins[Key];
869     if (!Bin)
870       Bin = new (A.Allocator) Accesses;
871     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
872     // Check if we have an access for this instruction in this bin, if not,
873     // simply add it.
874     auto It = Bin->find(Acc);
875     if (It == Bin->find_end()) {
876       Bin->insert(Acc);
877       return ChangeStatus::CHANGED;
878     }
879     // If the existing access is the same as then new one, nothing changed.
880     AAPointerInfo::Access &Current = Bin->get(It);
881     AAPointerInfo::Access Before = Current;
882     // The new one will be combined with the existing one.
883     Current &= Acc;
884     return Current == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
885   }
886 
887   /// See AAPointerInfo::forallInterferingAccesses.
888   bool forallInterferingAccesses(
889       AAPointerInfo::OffsetAndSize OAS,
890       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
891     if (!isValidState())
892       return false;
893 
894     for (auto &It : AccessBins) {
895       AAPointerInfo::OffsetAndSize ItOAS = It.getFirst();
896       if (!OAS.mayOverlap(ItOAS))
897         continue;
898       bool IsExact = OAS == ItOAS && !OAS.offsetOrSizeAreUnknown();
899       for (auto &Access : *It.getSecond())
900         if (!CB(Access, IsExact))
901           return false;
902     }
903     return true;
904   }
905 
906   /// See AAPointerInfo::forallInterferingAccesses.
907   bool forallInterferingAccesses(
908       Instruction &I,
909       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
910     if (!isValidState())
911       return false;
912 
913     // First find the offset and size of I.
914     AAPointerInfo::OffsetAndSize OAS(-1, -1);
915     for (auto &It : AccessBins) {
916       for (auto &Access : *It.getSecond()) {
917         if (Access.getRemoteInst() == &I) {
918           OAS = It.getFirst();
919           break;
920         }
921       }
922       if (OAS.getSize() != -1)
923         break;
924     }
925     // No access for I was found, we are done.
926     if (OAS.getSize() == -1)
927       return true;
928 
929     // Now that we have an offset and size, find all overlapping ones and use
930     // the callback on the accesses.
931     return forallInterferingAccesses(OAS, CB);
932   }
933 
934 private:
935   /// State to track fixpoint and validity.
936   BooleanState BS;
937 };
938 
939 namespace {
940 struct AAPointerInfoImpl
941     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
942   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
943   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
944 
945   /// See AbstractAttribute::initialize(...).
946   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
947 
948   /// See AbstractAttribute::getAsStr().
949   const std::string getAsStr() const override {
950     return std::string("PointerInfo ") +
951            (isValidState() ? (std::string("#") +
952                               std::to_string(AccessBins.size()) + " bins")
953                            : "<invalid>");
954   }
955 
956   /// See AbstractAttribute::manifest(...).
957   ChangeStatus manifest(Attributor &A) override {
958     return AAPointerInfo::manifest(A);
959   }
960 
961   bool forallInterferingAccesses(
962       OffsetAndSize OAS,
963       function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
964       const override {
965     return State::forallInterferingAccesses(OAS, CB);
966   }
967   bool forallInterferingAccesses(
968       Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
969       function_ref<bool(const Access &, bool)> UserCB) const override {
970     SmallPtrSet<const Access *, 8> DominatingWrites;
971     SmallVector<std::pair<const Access *, bool>, 8> InterferingAccesses;
972 
973     Function &Scope = *I.getFunction();
974     const auto &NoSyncAA = A.getAAFor<AANoSync>(
975         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
976     const auto *ExecDomainAA = A.lookupAAFor<AAExecutionDomain>(
977         IRPosition::function(Scope), &QueryingAA, DepClassTy::OPTIONAL);
978     const bool NoSync = NoSyncAA.isAssumedNoSync();
979 
980     // Helper to determine if we need to consider threading, which we cannot
981     // right now. However, if the function is (assumed) nosync or the thread
982     // executing all instructions is the main thread only we can ignore
983     // threading.
984     auto CanIgnoreThreading = [&](const Instruction &I) -> bool {
985       if (NoSync)
986         return true;
987       if (ExecDomainAA && ExecDomainAA->isExecutedByInitialThreadOnly(I))
988         return true;
989       return false;
990     };
991 
992     // Helper to determine if the access is executed by the same thread as the
993     // load, for now it is sufficient to avoid any potential threading effects
994     // as we cannot deal with them anyway.
995     auto IsSameThreadAsLoad = [&](const Access &Acc) -> bool {
996       return CanIgnoreThreading(*Acc.getLocalInst());
997     };
998 
999     // TODO: Use inter-procedural reachability and dominance.
1000     const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1001         QueryingAA, IRPosition::function(Scope), DepClassTy::OPTIONAL);
1002 
1003     const bool FindInterferingWrites = I.mayReadFromMemory();
1004     const bool FindInterferingReads = I.mayWriteToMemory();
1005     const bool UseDominanceReasoning = FindInterferingWrites;
1006     const bool CanUseCFGResoning = CanIgnoreThreading(I);
1007     InformationCache &InfoCache = A.getInfoCache();
1008     const DominatorTree *DT =
1009         NoRecurseAA.isKnownNoRecurse() && UseDominanceReasoning
1010             ? InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
1011                   Scope)
1012             : nullptr;
1013 
1014     enum GPUAddressSpace : unsigned {
1015       Generic = 0,
1016       Global = 1,
1017       Shared = 3,
1018       Constant = 4,
1019       Local = 5,
1020     };
1021 
1022     // Helper to check if a value has "kernel lifetime", that is it will not
1023     // outlive a GPU kernel. This is true for shared, constant, and local
1024     // globals on AMD and NVIDIA GPUs.
1025     auto HasKernelLifetime = [&](Value *V, Module &M) {
1026       Triple T(M.getTargetTriple());
1027       if (!(T.isAMDGPU() || T.isNVPTX()))
1028         return false;
1029       switch (V->getType()->getPointerAddressSpace()) {
1030       case GPUAddressSpace::Shared:
1031       case GPUAddressSpace::Constant:
1032       case GPUAddressSpace::Local:
1033         return true;
1034       default:
1035         return false;
1036       };
1037     };
1038 
1039     // The IsLiveInCalleeCB will be used by the AA::isPotentiallyReachable query
1040     // to determine if we should look at reachability from the callee. For
1041     // certain pointers we know the lifetime and we do not have to step into the
1042     // callee to determine reachability as the pointer would be dead in the
1043     // callee. See the conditional initialization below.
1044     std::function<bool(const Function &)> IsLiveInCalleeCB;
1045 
1046     if (auto *AI = dyn_cast<AllocaInst>(&getAssociatedValue())) {
1047       // If the alloca containing function is not recursive the alloca
1048       // must be dead in the callee.
1049       const Function *AIFn = AI->getFunction();
1050       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1051           *this, IRPosition::function(*AIFn), DepClassTy::OPTIONAL);
1052       if (NoRecurseAA.isAssumedNoRecurse()) {
1053         IsLiveInCalleeCB = [AIFn](const Function &Fn) { return AIFn != &Fn; };
1054       }
1055     } else if (auto *GV = dyn_cast<GlobalValue>(&getAssociatedValue())) {
1056       // If the global has kernel lifetime we can stop if we reach a kernel
1057       // as it is "dead" in the (unknown) callees.
1058       if (HasKernelLifetime(GV, *GV->getParent()))
1059         IsLiveInCalleeCB = [](const Function &Fn) {
1060           return !Fn.hasFnAttribute("kernel");
1061         };
1062     }
1063 
1064     auto AccessCB = [&](const Access &Acc, bool Exact) {
1065       if ((!FindInterferingWrites || !Acc.isWrite()) &&
1066           (!FindInterferingReads || !Acc.isRead()))
1067         return true;
1068 
1069       // For now we only filter accesses based on CFG reasoning which does not
1070       // work yet if we have threading effects, or the access is complicated.
1071       if (CanUseCFGResoning) {
1072         if ((!Acc.isWrite() ||
1073              !AA::isPotentiallyReachable(A, *Acc.getLocalInst(), I, QueryingAA,
1074                                          IsLiveInCalleeCB)) &&
1075             (!Acc.isRead() ||
1076              !AA::isPotentiallyReachable(A, I, *Acc.getLocalInst(), QueryingAA,
1077                                          IsLiveInCalleeCB)))
1078           return true;
1079         if (DT && Exact && (Acc.getLocalInst()->getFunction() == &Scope) &&
1080             IsSameThreadAsLoad(Acc)) {
1081           if (DT->dominates(Acc.getLocalInst(), &I))
1082             DominatingWrites.insert(&Acc);
1083         }
1084       }
1085 
1086       InterferingAccesses.push_back({&Acc, Exact});
1087       return true;
1088     };
1089     if (!State::forallInterferingAccesses(I, AccessCB))
1090       return false;
1091 
1092     // If we cannot use CFG reasoning we only filter the non-write accesses
1093     // and are done here.
1094     if (!CanUseCFGResoning) {
1095       for (auto &It : InterferingAccesses)
1096         if (!UserCB(*It.first, It.second))
1097           return false;
1098       return true;
1099     }
1100 
1101     // Helper to determine if we can skip a specific write access. This is in
1102     // the worst case quadratic as we are looking for another write that will
1103     // hide the effect of this one.
1104     auto CanSkipAccess = [&](const Access &Acc, bool Exact) {
1105       if (!IsSameThreadAsLoad(Acc))
1106         return false;
1107       if (!DominatingWrites.count(&Acc))
1108         return false;
1109       for (const Access *DomAcc : DominatingWrites) {
1110         assert(Acc.getLocalInst()->getFunction() ==
1111                    DomAcc->getLocalInst()->getFunction() &&
1112                "Expected dominating writes to be in the same function!");
1113 
1114         if (DomAcc != &Acc &&
1115             DT->dominates(Acc.getLocalInst(), DomAcc->getLocalInst())) {
1116           return true;
1117         }
1118       }
1119       return false;
1120     };
1121 
1122     // Run the user callback on all accesses we cannot skip and return if that
1123     // succeeded for all or not.
1124     unsigned NumInterferingAccesses = InterferingAccesses.size();
1125     for (auto &It : InterferingAccesses) {
1126       if (!DT || NumInterferingAccesses > MaxInterferingAccesses ||
1127           !CanSkipAccess(*It.first, It.second)) {
1128         if (!UserCB(*It.first, It.second))
1129           return false;
1130       }
1131     }
1132     return true;
1133   }
1134 
1135   ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA,
1136                                     int64_t Offset, CallBase &CB,
1137                                     bool FromCallee = false) {
1138     using namespace AA::PointerInfo;
1139     if (!OtherAA.getState().isValidState() || !isValidState())
1140       return indicatePessimisticFixpoint();
1141 
1142     const auto &OtherAAImpl = static_cast<const AAPointerInfoImpl &>(OtherAA);
1143     bool IsByval =
1144         FromCallee && OtherAAImpl.getAssociatedArgument()->hasByValAttr();
1145 
1146     // Combine the accesses bin by bin.
1147     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1148     for (auto &It : OtherAAImpl.getState()) {
1149       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1150       if (Offset != OffsetAndSize::Unknown)
1151         OAS = OffsetAndSize(It.first.getOffset() + Offset, It.first.getSize());
1152       Accesses *Bin = AccessBins.lookup(OAS);
1153       for (const AAPointerInfo::Access &RAcc : *It.second) {
1154         if (IsByval && !RAcc.isRead())
1155           continue;
1156         bool UsedAssumedInformation = false;
1157         AccessKind AK = RAcc.getKind();
1158         Optional<Value *> Content = RAcc.getContent();
1159         if (FromCallee) {
1160           Content = A.translateArgumentToCallSiteContent(
1161               RAcc.getContent(), CB, *this, UsedAssumedInformation);
1162           AK = AccessKind(
1163               AK & (IsByval ? AccessKind::AK_READ : AccessKind::AK_READ_WRITE));
1164         }
1165         Changed =
1166             Changed | addAccess(A, OAS.getOffset(), OAS.getSize(), CB, Content,
1167                                 AK, RAcc.getType(), RAcc.getRemoteInst(), Bin);
1168       }
1169     }
1170     return Changed;
1171   }
1172 
1173   /// Statistic tracking for all AAPointerInfo implementations.
1174   /// See AbstractAttribute::trackStatistics().
1175   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1176 };
1177 
1178 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1179   using AccessKind = AAPointerInfo::AccessKind;
1180   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1181       : AAPointerInfoImpl(IRP, A) {}
1182 
1183   /// See AbstractAttribute::initialize(...).
1184   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1185 
1186   /// Deal with an access and signal if it was handled successfully.
1187   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1188                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1189                     ChangeStatus &Changed, Type *Ty,
1190                     int64_t Size = OffsetAndSize::Unknown) {
1191     using namespace AA::PointerInfo;
1192     // No need to find a size if one is given or the offset is unknown.
1193     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1194         Ty) {
1195       const DataLayout &DL = A.getDataLayout();
1196       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1197       if (!AccessSize.isScalable())
1198         Size = AccessSize.getFixedSize();
1199     }
1200     Changed = Changed | addAccess(A, Offset, Size, I, Content, Kind, Ty);
1201     return true;
1202   };
1203 
1204   /// Helper struct, will support ranges eventually.
1205   struct OffsetInfo {
1206     int64_t Offset = OffsetAndSize::Unknown;
1207 
1208     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1209   };
1210 
1211   /// See AbstractAttribute::updateImpl(...).
1212   ChangeStatus updateImpl(Attributor &A) override {
1213     using namespace AA::PointerInfo;
1214     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1215     Value &AssociatedValue = getAssociatedValue();
1216 
1217     const DataLayout &DL = A.getDataLayout();
1218     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1219     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1220 
1221     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo PtrOI,
1222                                      bool &Follow) {
1223       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1224       UsrOI = PtrOI;
1225       Follow = true;
1226       return true;
1227     };
1228 
1229     const auto *TLI = getAnchorScope()
1230                           ? A.getInfoCache().getTargetLibraryInfoForFunction(
1231                                 *getAnchorScope())
1232                           : nullptr;
1233     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1234       Value *CurPtr = U.get();
1235       User *Usr = U.getUser();
1236       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1237                         << *Usr << "\n");
1238       assert(OffsetInfoMap.count(CurPtr) &&
1239              "The current pointer offset should have been seeded!");
1240 
1241       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1242         if (CE->isCast())
1243           return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1244         if (CE->isCompare())
1245           return true;
1246         if (!isa<GEPOperator>(CE)) {
1247           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1248                             << "\n");
1249           return false;
1250         }
1251       }
1252       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1253         // Note the order here, the Usr access might change the map, CurPtr is
1254         // already in it though.
1255         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1256         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1257         UsrOI = PtrOI;
1258 
1259         // TODO: Use range information.
1260         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1261             !GEP->hasAllConstantIndices()) {
1262           UsrOI.Offset = OffsetAndSize::Unknown;
1263           Follow = true;
1264           return true;
1265         }
1266 
1267         SmallVector<Value *, 8> Indices;
1268         for (Use &Idx : GEP->indices()) {
1269           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1270             Indices.push_back(CIdx);
1271             continue;
1272           }
1273 
1274           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1275                             << " : " << *Idx << "\n");
1276           return false;
1277         }
1278         UsrOI.Offset = PtrOI.Offset + DL.getIndexedOffsetInType(
1279                                           GEP->getSourceElementType(), Indices);
1280         Follow = true;
1281         return true;
1282       }
1283       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1284         return HandlePassthroughUser(Usr, OffsetInfoMap[CurPtr], Follow);
1285 
1286       // For PHIs we need to take care of the recurrence explicitly as the value
1287       // might change while we iterate through a loop. For now, we give up if
1288       // the PHI is not invariant.
1289       if (isa<PHINode>(Usr)) {
1290         // Note the order here, the Usr access might change the map, CurPtr is
1291         // already in it though.
1292         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1293         OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1294         // Check if the PHI is invariant (so far).
1295         if (UsrOI == PtrOI)
1296           return true;
1297 
1298         // Check if the PHI operand has already an unknown offset as we can't
1299         // improve on that anymore.
1300         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1301           UsrOI = PtrOI;
1302           Follow = true;
1303           return true;
1304         }
1305 
1306         // Check if the PHI operand is not dependent on the PHI itself.
1307         // TODO: This is not great as we look at the pointer type. However, it
1308         // is unclear where the Offset size comes from with typeless pointers.
1309         APInt Offset(
1310             DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1311             0);
1312         if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1313                                     DL, Offset, /* AllowNonInbounds */ true)) {
1314           if (Offset != PtrOI.Offset) {
1315             LLVM_DEBUG(dbgs()
1316                        << "[AAPointerInfo] PHI operand pointer offset mismatch "
1317                        << *CurPtr << " in " << *Usr << "\n");
1318             return false;
1319           }
1320           return HandlePassthroughUser(Usr, PtrOI, Follow);
1321         }
1322 
1323         // TODO: Approximate in case we know the direction of the recurrence.
1324         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1325                           << *CurPtr << " in " << *Usr << "\n");
1326         UsrOI = PtrOI;
1327         UsrOI.Offset = OffsetAndSize::Unknown;
1328         Follow = true;
1329         return true;
1330       }
1331 
1332       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1333         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1334                             AccessKind::AK_READ, OffsetInfoMap[CurPtr].Offset,
1335                             Changed, LoadI->getType());
1336       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1337         if (StoreI->getValueOperand() == CurPtr) {
1338           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1339                             << *StoreI << "\n");
1340           return false;
1341         }
1342         bool UsedAssumedInformation = false;
1343         Optional<Value *> Content =
1344             A.getAssumedSimplified(*StoreI->getValueOperand(), *this,
1345                                    UsedAssumedInformation, AA::Interprocedural);
1346         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1347                             OffsetInfoMap[CurPtr].Offset, Changed,
1348                             StoreI->getValueOperand()->getType());
1349       }
1350       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1351         if (CB->isLifetimeStartOrEnd())
1352           return true;
1353         if (TLI && isFreeCall(CB, TLI))
1354           return true;
1355         if (CB->isArgOperand(&U)) {
1356           unsigned ArgNo = CB->getArgOperandNo(&U);
1357           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1358               *this, IRPosition::callsite_argument(*CB, ArgNo),
1359               DepClassTy::REQUIRED);
1360           Changed = translateAndAddState(A, CSArgPI,
1361                                          OffsetInfoMap[CurPtr].Offset, *CB) |
1362                     Changed;
1363           return true;
1364         }
1365         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1366                           << "\n");
1367         // TODO: Allow some call uses
1368         return false;
1369       }
1370 
1371       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1372       return false;
1373     };
1374     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
1375       if (OffsetInfoMap.count(NewU))
1376         return OffsetInfoMap[NewU] == OffsetInfoMap[OldU];
1377       OffsetInfoMap[NewU] = OffsetInfoMap[OldU];
1378       return true;
1379     };
1380     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1381                            /* CheckBBLivenessOnly */ true, DepClassTy::OPTIONAL,
1382                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
1383       return indicatePessimisticFixpoint();
1384 
1385     LLVM_DEBUG({
1386       dbgs() << "Accesses by bin after update:\n";
1387       for (auto &It : AccessBins) {
1388         dbgs() << "[" << It.first.getOffset() << "-"
1389                << It.first.getOffset() + It.first.getSize()
1390                << "] : " << It.getSecond()->size() << "\n";
1391         for (auto &Acc : *It.getSecond()) {
1392           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1393                  << "\n";
1394           if (Acc.getLocalInst() != Acc.getRemoteInst())
1395             dbgs() << "     -->                         "
1396                    << *Acc.getRemoteInst() << "\n";
1397           if (!Acc.isWrittenValueYetUndetermined()) {
1398             if (Acc.getWrittenValue())
1399               dbgs() << "       - c: " << *Acc.getWrittenValue() << "\n";
1400             else
1401               dbgs() << "       - c: <unknown>\n";
1402           }
1403         }
1404       }
1405     });
1406 
1407     return Changed;
1408   }
1409 
1410   /// See AbstractAttribute::trackStatistics()
1411   void trackStatistics() const override {
1412     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1413   }
1414 };
1415 
1416 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1417   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1418       : AAPointerInfoImpl(IRP, A) {}
1419 
1420   /// See AbstractAttribute::updateImpl(...).
1421   ChangeStatus updateImpl(Attributor &A) override {
1422     return indicatePessimisticFixpoint();
1423   }
1424 
1425   /// See AbstractAttribute::trackStatistics()
1426   void trackStatistics() const override {
1427     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1428   }
1429 };
1430 
1431 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1432   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1433       : AAPointerInfoFloating(IRP, A) {}
1434 
1435   /// See AbstractAttribute::initialize(...).
1436   void initialize(Attributor &A) override {
1437     AAPointerInfoFloating::initialize(A);
1438     if (getAnchorScope()->isDeclaration())
1439       indicatePessimisticFixpoint();
1440   }
1441 
1442   /// See AbstractAttribute::trackStatistics()
1443   void trackStatistics() const override {
1444     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1445   }
1446 };
1447 
1448 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1449   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1450       : AAPointerInfoFloating(IRP, A) {}
1451 
1452   /// See AbstractAttribute::updateImpl(...).
1453   ChangeStatus updateImpl(Attributor &A) override {
1454     using namespace AA::PointerInfo;
1455     // We handle memory intrinsics explicitly, at least the first (=
1456     // destination) and second (=source) arguments as we know how they are
1457     // accessed.
1458     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1459       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1460       int64_t LengthVal = OffsetAndSize::Unknown;
1461       if (Length)
1462         LengthVal = Length->getSExtValue();
1463       Value &Ptr = getAssociatedValue();
1464       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1465       ChangeStatus Changed = ChangeStatus::UNCHANGED;
1466       if (ArgNo == 0) {
1467         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1468                      nullptr, LengthVal);
1469       } else if (ArgNo == 1) {
1470         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1471                      nullptr, LengthVal);
1472       } else {
1473         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1474                           << *MI << "\n");
1475         return indicatePessimisticFixpoint();
1476       }
1477       return Changed;
1478     }
1479 
1480     // TODO: Once we have call site specific value information we can provide
1481     //       call site specific liveness information and then it makes
1482     //       sense to specialize attributes for call sites arguments instead of
1483     //       redirecting requests to the callee argument.
1484     Argument *Arg = getAssociatedArgument();
1485     if (!Arg)
1486       return indicatePessimisticFixpoint();
1487     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1488     auto &ArgAA =
1489         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1490     return translateAndAddState(A, ArgAA, 0, *cast<CallBase>(getCtxI()),
1491                                 /* FromCallee */ true);
1492   }
1493 
1494   /// See AbstractAttribute::trackStatistics()
1495   void trackStatistics() const override {
1496     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1497   }
1498 };
1499 
1500 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1501   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1502       : AAPointerInfoFloating(IRP, A) {}
1503 
1504   /// See AbstractAttribute::trackStatistics()
1505   void trackStatistics() const override {
1506     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1507   }
1508 };
1509 } // namespace
1510 
1511 /// -----------------------NoUnwind Function Attribute--------------------------
1512 
1513 namespace {
1514 struct AANoUnwindImpl : AANoUnwind {
1515   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1516 
1517   const std::string getAsStr() const override {
1518     return getAssumed() ? "nounwind" : "may-unwind";
1519   }
1520 
1521   /// See AbstractAttribute::updateImpl(...).
1522   ChangeStatus updateImpl(Attributor &A) override {
1523     auto Opcodes = {
1524         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1525         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1526         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1527 
1528     auto CheckForNoUnwind = [&](Instruction &I) {
1529       if (!I.mayThrow())
1530         return true;
1531 
1532       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1533         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1534             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1535         return NoUnwindAA.isAssumedNoUnwind();
1536       }
1537       return false;
1538     };
1539 
1540     bool UsedAssumedInformation = false;
1541     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1542                                    UsedAssumedInformation))
1543       return indicatePessimisticFixpoint();
1544 
1545     return ChangeStatus::UNCHANGED;
1546   }
1547 };
1548 
1549 struct AANoUnwindFunction final : public AANoUnwindImpl {
1550   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1551       : AANoUnwindImpl(IRP, A) {}
1552 
1553   /// See AbstractAttribute::trackStatistics()
1554   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1555 };
1556 
1557 /// NoUnwind attribute deduction for a call sites.
1558 struct AANoUnwindCallSite final : AANoUnwindImpl {
1559   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1560       : AANoUnwindImpl(IRP, A) {}
1561 
1562   /// See AbstractAttribute::initialize(...).
1563   void initialize(Attributor &A) override {
1564     AANoUnwindImpl::initialize(A);
1565     Function *F = getAssociatedFunction();
1566     if (!F || F->isDeclaration())
1567       indicatePessimisticFixpoint();
1568   }
1569 
1570   /// See AbstractAttribute::updateImpl(...).
1571   ChangeStatus updateImpl(Attributor &A) override {
1572     // TODO: Once we have call site specific value information we can provide
1573     //       call site specific liveness information and then it makes
1574     //       sense to specialize attributes for call sites arguments instead of
1575     //       redirecting requests to the callee argument.
1576     Function *F = getAssociatedFunction();
1577     const IRPosition &FnPos = IRPosition::function(*F);
1578     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1579     return clampStateAndIndicateChange(getState(), FnAA.getState());
1580   }
1581 
1582   /// See AbstractAttribute::trackStatistics()
1583   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1584 };
1585 } // namespace
1586 
1587 /// --------------------- Function Return Values -------------------------------
1588 
1589 namespace {
1590 /// "Attribute" that collects all potential returned values and the return
1591 /// instructions that they arise from.
1592 ///
1593 /// If there is a unique returned value R, the manifest method will:
1594 ///   - mark R with the "returned" attribute, if R is an argument.
1595 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1596 
1597   /// Mapping of values potentially returned by the associated function to the
1598   /// return instructions that might return them.
1599   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1600 
1601   /// State flags
1602   ///
1603   ///{
1604   bool IsFixed = false;
1605   bool IsValidState = true;
1606   ///}
1607 
1608 public:
1609   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1610       : AAReturnedValues(IRP, A) {}
1611 
1612   /// See AbstractAttribute::initialize(...).
1613   void initialize(Attributor &A) override {
1614     // Reset the state.
1615     IsFixed = false;
1616     IsValidState = true;
1617     ReturnedValues.clear();
1618 
1619     Function *F = getAssociatedFunction();
1620     if (!F || F->isDeclaration()) {
1621       indicatePessimisticFixpoint();
1622       return;
1623     }
1624     assert(!F->getReturnType()->isVoidTy() &&
1625            "Did not expect a void return type!");
1626 
1627     // The map from instruction opcodes to those instructions in the function.
1628     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1629 
1630     // Look through all arguments, if one is marked as returned we are done.
1631     for (Argument &Arg : F->args()) {
1632       if (Arg.hasReturnedAttr()) {
1633         auto &ReturnInstSet = ReturnedValues[&Arg];
1634         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1635           for (Instruction *RI : *Insts)
1636             ReturnInstSet.insert(cast<ReturnInst>(RI));
1637 
1638         indicateOptimisticFixpoint();
1639         return;
1640       }
1641     }
1642 
1643     if (!A.isFunctionIPOAmendable(*F))
1644       indicatePessimisticFixpoint();
1645   }
1646 
1647   /// See AbstractAttribute::manifest(...).
1648   ChangeStatus manifest(Attributor &A) override;
1649 
1650   /// See AbstractAttribute::getState(...).
1651   AbstractState &getState() override { return *this; }
1652 
1653   /// See AbstractAttribute::getState(...).
1654   const AbstractState &getState() const override { return *this; }
1655 
1656   /// See AbstractAttribute::updateImpl(Attributor &A).
1657   ChangeStatus updateImpl(Attributor &A) override;
1658 
1659   llvm::iterator_range<iterator> returned_values() override {
1660     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1661   }
1662 
1663   llvm::iterator_range<const_iterator> returned_values() const override {
1664     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1665   }
1666 
1667   /// Return the number of potential return values, -1 if unknown.
1668   size_t getNumReturnValues() const override {
1669     return isValidState() ? ReturnedValues.size() : -1;
1670   }
1671 
1672   /// Return an assumed unique return value if a single candidate is found. If
1673   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1674   /// Optional::NoneType.
1675   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1676 
1677   /// See AbstractState::checkForAllReturnedValues(...).
1678   bool checkForAllReturnedValuesAndReturnInsts(
1679       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1680       const override;
1681 
1682   /// Pretty print the attribute similar to the IR representation.
1683   const std::string getAsStr() const override;
1684 
1685   /// See AbstractState::isAtFixpoint().
1686   bool isAtFixpoint() const override { return IsFixed; }
1687 
1688   /// See AbstractState::isValidState().
1689   bool isValidState() const override { return IsValidState; }
1690 
1691   /// See AbstractState::indicateOptimisticFixpoint(...).
1692   ChangeStatus indicateOptimisticFixpoint() override {
1693     IsFixed = true;
1694     return ChangeStatus::UNCHANGED;
1695   }
1696 
1697   ChangeStatus indicatePessimisticFixpoint() override {
1698     IsFixed = true;
1699     IsValidState = false;
1700     return ChangeStatus::CHANGED;
1701   }
1702 };
1703 
1704 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1705   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1706 
1707   // Bookkeeping.
1708   assert(isValidState());
1709   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1710                   "Number of function with known return values");
1711 
1712   // Check if we have an assumed unique return value that we could manifest.
1713   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1714 
1715   if (!UniqueRV || !UniqueRV.getValue())
1716     return Changed;
1717 
1718   // Bookkeeping.
1719   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1720                   "Number of function with unique return");
1721   // If the assumed unique return value is an argument, annotate it.
1722   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1723     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1724             getAssociatedFunction()->getReturnType())) {
1725       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1726       Changed = IRAttribute::manifest(A);
1727     }
1728   }
1729   return Changed;
1730 }
1731 
1732 const std::string AAReturnedValuesImpl::getAsStr() const {
1733   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1734          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1735 }
1736 
1737 Optional<Value *>
1738 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1739   // If checkForAllReturnedValues provides a unique value, ignoring potential
1740   // undef values that can also be present, it is assumed to be the actual
1741   // return value and forwarded to the caller of this method. If there are
1742   // multiple, a nullptr is returned indicating there cannot be a unique
1743   // returned value.
1744   Optional<Value *> UniqueRV;
1745   Type *Ty = getAssociatedFunction()->getReturnType();
1746 
1747   auto Pred = [&](Value &RV) -> bool {
1748     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1749     return UniqueRV != Optional<Value *>(nullptr);
1750   };
1751 
1752   if (!A.checkForAllReturnedValues(Pred, *this))
1753     UniqueRV = nullptr;
1754 
1755   return UniqueRV;
1756 }
1757 
1758 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1759     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1760     const {
1761   if (!isValidState())
1762     return false;
1763 
1764   // Check all returned values but ignore call sites as long as we have not
1765   // encountered an overdefined one during an update.
1766   for (auto &It : ReturnedValues) {
1767     Value *RV = It.first;
1768     if (!Pred(*RV, It.second))
1769       return false;
1770   }
1771 
1772   return true;
1773 }
1774 
1775 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1776   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1777 
1778   SmallVector<AA::ValueAndContext> Values;
1779   bool UsedAssumedInformation = false;
1780   auto ReturnInstCB = [&](Instruction &I) {
1781     ReturnInst &Ret = cast<ReturnInst>(I);
1782     Values.clear();
1783     if (!A.getAssumedSimplifiedValues(IRPosition::value(*Ret.getReturnValue()),
1784                                       *this, Values, AA::Intraprocedural,
1785                                       UsedAssumedInformation))
1786       Values.push_back({*Ret.getReturnValue(), Ret});
1787 
1788     for (auto &VAC : Values) {
1789       assert(AA::isValidInScope(*VAC.getValue(), Ret.getFunction()) &&
1790              "Assumed returned value should be valid in function scope!");
1791       if (ReturnedValues[VAC.getValue()].insert(&Ret))
1792         Changed = ChangeStatus::CHANGED;
1793     }
1794     return true;
1795   };
1796 
1797   // Discover returned values from all live returned instructions in the
1798   // associated function.
1799   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1800                                  UsedAssumedInformation))
1801     return indicatePessimisticFixpoint();
1802   return Changed;
1803 }
1804 
1805 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1806   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1807       : AAReturnedValuesImpl(IRP, A) {}
1808 
1809   /// See AbstractAttribute::trackStatistics()
1810   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1811 };
1812 
1813 /// Returned values information for a call sites.
1814 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1815   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1816       : AAReturnedValuesImpl(IRP, A) {}
1817 
1818   /// See AbstractAttribute::initialize(...).
1819   void initialize(Attributor &A) override {
1820     // TODO: Once we have call site specific value information we can provide
1821     //       call site specific liveness information and then it makes
1822     //       sense to specialize attributes for call sites instead of
1823     //       redirecting requests to the callee.
1824     llvm_unreachable("Abstract attributes for returned values are not "
1825                      "supported for call sites yet!");
1826   }
1827 
1828   /// See AbstractAttribute::updateImpl(...).
1829   ChangeStatus updateImpl(Attributor &A) override {
1830     return indicatePessimisticFixpoint();
1831   }
1832 
1833   /// See AbstractAttribute::trackStatistics()
1834   void trackStatistics() const override {}
1835 };
1836 } // namespace
1837 
1838 /// ------------------------ NoSync Function Attribute -------------------------
1839 
1840 bool AANoSync::isNonRelaxedAtomic(const Instruction *I) {
1841   if (!I->isAtomic())
1842     return false;
1843 
1844   if (auto *FI = dyn_cast<FenceInst>(I))
1845     // All legal orderings for fence are stronger than monotonic.
1846     return FI->getSyncScopeID() != SyncScope::SingleThread;
1847   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1848     // Unordered is not a legal ordering for cmpxchg.
1849     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1850             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1851   }
1852 
1853   AtomicOrdering Ordering;
1854   switch (I->getOpcode()) {
1855   case Instruction::AtomicRMW:
1856     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1857     break;
1858   case Instruction::Store:
1859     Ordering = cast<StoreInst>(I)->getOrdering();
1860     break;
1861   case Instruction::Load:
1862     Ordering = cast<LoadInst>(I)->getOrdering();
1863     break;
1864   default:
1865     llvm_unreachable(
1866         "New atomic operations need to be known in the attributor.");
1867   }
1868 
1869   return (Ordering != AtomicOrdering::Unordered &&
1870           Ordering != AtomicOrdering::Monotonic);
1871 }
1872 
1873 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1874 /// which would be nosync except that they have a volatile flag.  All other
1875 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1876 bool AANoSync::isNoSyncIntrinsic(const Instruction *I) {
1877   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1878     return !MI->isVolatile();
1879   return false;
1880 }
1881 
1882 namespace {
1883 struct AANoSyncImpl : AANoSync {
1884   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1885 
1886   const std::string getAsStr() const override {
1887     return getAssumed() ? "nosync" : "may-sync";
1888   }
1889 
1890   /// See AbstractAttribute::updateImpl(...).
1891   ChangeStatus updateImpl(Attributor &A) override;
1892 };
1893 
1894 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1895 
1896   auto CheckRWInstForNoSync = [&](Instruction &I) {
1897     return AA::isNoSyncInst(A, I, *this);
1898   };
1899 
1900   auto CheckForNoSync = [&](Instruction &I) {
1901     // At this point we handled all read/write effects and they are all
1902     // nosync, so they can be skipped.
1903     if (I.mayReadOrWriteMemory())
1904       return true;
1905 
1906     // non-convergent and readnone imply nosync.
1907     return !cast<CallBase>(I).isConvergent();
1908   };
1909 
1910   bool UsedAssumedInformation = false;
1911   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
1912                                           UsedAssumedInformation) ||
1913       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
1914                                          UsedAssumedInformation))
1915     return indicatePessimisticFixpoint();
1916 
1917   return ChangeStatus::UNCHANGED;
1918 }
1919 
1920 struct AANoSyncFunction final : public AANoSyncImpl {
1921   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1922       : AANoSyncImpl(IRP, A) {}
1923 
1924   /// See AbstractAttribute::trackStatistics()
1925   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1926 };
1927 
1928 /// NoSync attribute deduction for a call sites.
1929 struct AANoSyncCallSite final : AANoSyncImpl {
1930   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1931       : AANoSyncImpl(IRP, A) {}
1932 
1933   /// See AbstractAttribute::initialize(...).
1934   void initialize(Attributor &A) override {
1935     AANoSyncImpl::initialize(A);
1936     Function *F = getAssociatedFunction();
1937     if (!F || F->isDeclaration())
1938       indicatePessimisticFixpoint();
1939   }
1940 
1941   /// See AbstractAttribute::updateImpl(...).
1942   ChangeStatus updateImpl(Attributor &A) override {
1943     // TODO: Once we have call site specific value information we can provide
1944     //       call site specific liveness information and then it makes
1945     //       sense to specialize attributes for call sites arguments instead of
1946     //       redirecting requests to the callee argument.
1947     Function *F = getAssociatedFunction();
1948     const IRPosition &FnPos = IRPosition::function(*F);
1949     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1950     return clampStateAndIndicateChange(getState(), FnAA.getState());
1951   }
1952 
1953   /// See AbstractAttribute::trackStatistics()
1954   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1955 };
1956 } // namespace
1957 
1958 /// ------------------------ No-Free Attributes ----------------------------
1959 
1960 namespace {
1961 struct AANoFreeImpl : public AANoFree {
1962   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1963 
1964   /// See AbstractAttribute::updateImpl(...).
1965   ChangeStatus updateImpl(Attributor &A) override {
1966     auto CheckForNoFree = [&](Instruction &I) {
1967       const auto &CB = cast<CallBase>(I);
1968       if (CB.hasFnAttr(Attribute::NoFree))
1969         return true;
1970 
1971       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1972           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1973       return NoFreeAA.isAssumedNoFree();
1974     };
1975 
1976     bool UsedAssumedInformation = false;
1977     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
1978                                            UsedAssumedInformation))
1979       return indicatePessimisticFixpoint();
1980     return ChangeStatus::UNCHANGED;
1981   }
1982 
1983   /// See AbstractAttribute::getAsStr().
1984   const std::string getAsStr() const override {
1985     return getAssumed() ? "nofree" : "may-free";
1986   }
1987 };
1988 
1989 struct AANoFreeFunction final : public AANoFreeImpl {
1990   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1991       : AANoFreeImpl(IRP, A) {}
1992 
1993   /// See AbstractAttribute::trackStatistics()
1994   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1995 };
1996 
1997 /// NoFree attribute deduction for a call sites.
1998 struct AANoFreeCallSite final : AANoFreeImpl {
1999   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
2000       : AANoFreeImpl(IRP, A) {}
2001 
2002   /// See AbstractAttribute::initialize(...).
2003   void initialize(Attributor &A) override {
2004     AANoFreeImpl::initialize(A);
2005     Function *F = getAssociatedFunction();
2006     if (!F || F->isDeclaration())
2007       indicatePessimisticFixpoint();
2008   }
2009 
2010   /// See AbstractAttribute::updateImpl(...).
2011   ChangeStatus updateImpl(Attributor &A) override {
2012     // TODO: Once we have call site specific value information we can provide
2013     //       call site specific liveness information and then it makes
2014     //       sense to specialize attributes for call sites arguments instead of
2015     //       redirecting requests to the callee argument.
2016     Function *F = getAssociatedFunction();
2017     const IRPosition &FnPos = IRPosition::function(*F);
2018     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
2019     return clampStateAndIndicateChange(getState(), FnAA.getState());
2020   }
2021 
2022   /// See AbstractAttribute::trackStatistics()
2023   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
2024 };
2025 
2026 /// NoFree attribute for floating values.
2027 struct AANoFreeFloating : AANoFreeImpl {
2028   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
2029       : AANoFreeImpl(IRP, A) {}
2030 
2031   /// See AbstractAttribute::trackStatistics()
2032   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
2033 
2034   /// See Abstract Attribute::updateImpl(...).
2035   ChangeStatus updateImpl(Attributor &A) override {
2036     const IRPosition &IRP = getIRPosition();
2037 
2038     const auto &NoFreeAA = A.getAAFor<AANoFree>(
2039         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
2040     if (NoFreeAA.isAssumedNoFree())
2041       return ChangeStatus::UNCHANGED;
2042 
2043     Value &AssociatedValue = getIRPosition().getAssociatedValue();
2044     auto Pred = [&](const Use &U, bool &Follow) -> bool {
2045       Instruction *UserI = cast<Instruction>(U.getUser());
2046       if (auto *CB = dyn_cast<CallBase>(UserI)) {
2047         if (CB->isBundleOperand(&U))
2048           return false;
2049         if (!CB->isArgOperand(&U))
2050           return true;
2051         unsigned ArgNo = CB->getArgOperandNo(&U);
2052 
2053         const auto &NoFreeArg = A.getAAFor<AANoFree>(
2054             *this, IRPosition::callsite_argument(*CB, ArgNo),
2055             DepClassTy::REQUIRED);
2056         return NoFreeArg.isAssumedNoFree();
2057       }
2058 
2059       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2060           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2061         Follow = true;
2062         return true;
2063       }
2064       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2065           isa<ReturnInst>(UserI))
2066         return true;
2067 
2068       // Unknown user.
2069       return false;
2070     };
2071     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2072       return indicatePessimisticFixpoint();
2073 
2074     return ChangeStatus::UNCHANGED;
2075   }
2076 };
2077 
2078 /// NoFree attribute for a call site argument.
2079 struct AANoFreeArgument final : AANoFreeFloating {
2080   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2081       : AANoFreeFloating(IRP, A) {}
2082 
2083   /// See AbstractAttribute::trackStatistics()
2084   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2085 };
2086 
2087 /// NoFree attribute for call site arguments.
2088 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2089   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2090       : AANoFreeFloating(IRP, A) {}
2091 
2092   /// See AbstractAttribute::updateImpl(...).
2093   ChangeStatus updateImpl(Attributor &A) override {
2094     // TODO: Once we have call site specific value information we can provide
2095     //       call site specific liveness information and then it makes
2096     //       sense to specialize attributes for call sites arguments instead of
2097     //       redirecting requests to the callee argument.
2098     Argument *Arg = getAssociatedArgument();
2099     if (!Arg)
2100       return indicatePessimisticFixpoint();
2101     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2102     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2103     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2104   }
2105 
2106   /// See AbstractAttribute::trackStatistics()
2107   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2108 };
2109 
2110 /// NoFree attribute for function return value.
2111 struct AANoFreeReturned final : AANoFreeFloating {
2112   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2113       : AANoFreeFloating(IRP, A) {
2114     llvm_unreachable("NoFree is not applicable to function returns!");
2115   }
2116 
2117   /// See AbstractAttribute::initialize(...).
2118   void initialize(Attributor &A) override {
2119     llvm_unreachable("NoFree is not applicable to function returns!");
2120   }
2121 
2122   /// See AbstractAttribute::updateImpl(...).
2123   ChangeStatus updateImpl(Attributor &A) override {
2124     llvm_unreachable("NoFree is not applicable to function returns!");
2125   }
2126 
2127   /// See AbstractAttribute::trackStatistics()
2128   void trackStatistics() const override {}
2129 };
2130 
2131 /// NoFree attribute deduction for a call site return value.
2132 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2133   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2134       : AANoFreeFloating(IRP, A) {}
2135 
2136   ChangeStatus manifest(Attributor &A) override {
2137     return ChangeStatus::UNCHANGED;
2138   }
2139   /// See AbstractAttribute::trackStatistics()
2140   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2141 };
2142 } // namespace
2143 
2144 /// ------------------------ NonNull Argument Attribute ------------------------
2145 namespace {
2146 static int64_t getKnownNonNullAndDerefBytesForUse(
2147     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2148     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2149   TrackUse = false;
2150 
2151   const Value *UseV = U->get();
2152   if (!UseV->getType()->isPointerTy())
2153     return 0;
2154 
2155   // We need to follow common pointer manipulation uses to the accesses they
2156   // feed into. We can try to be smart to avoid looking through things we do not
2157   // like for now, e.g., non-inbounds GEPs.
2158   if (isa<CastInst>(I)) {
2159     TrackUse = true;
2160     return 0;
2161   }
2162 
2163   if (isa<GetElementPtrInst>(I)) {
2164     TrackUse = true;
2165     return 0;
2166   }
2167 
2168   Type *PtrTy = UseV->getType();
2169   const Function *F = I->getFunction();
2170   bool NullPointerIsDefined =
2171       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2172   const DataLayout &DL = A.getInfoCache().getDL();
2173   if (const auto *CB = dyn_cast<CallBase>(I)) {
2174     if (CB->isBundleOperand(U)) {
2175       if (RetainedKnowledge RK = getKnowledgeFromUse(
2176               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2177         IsNonNull |=
2178             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2179         return RK.ArgValue;
2180       }
2181       return 0;
2182     }
2183 
2184     if (CB->isCallee(U)) {
2185       IsNonNull |= !NullPointerIsDefined;
2186       return 0;
2187     }
2188 
2189     unsigned ArgNo = CB->getArgOperandNo(U);
2190     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2191     // As long as we only use known information there is no need to track
2192     // dependences here.
2193     auto &DerefAA =
2194         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2195     IsNonNull |= DerefAA.isKnownNonNull();
2196     return DerefAA.getKnownDereferenceableBytes();
2197   }
2198 
2199   Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
2200   if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
2201     return 0;
2202 
2203   int64_t Offset;
2204   const Value *Base =
2205       getMinimalBaseOfPointer(A, QueryingAA, Loc->Ptr, Offset, DL);
2206   if (Base && Base == &AssociatedValue) {
2207     int64_t DerefBytes = Loc->Size.getValue() + Offset;
2208     IsNonNull |= !NullPointerIsDefined;
2209     return std::max(int64_t(0), DerefBytes);
2210   }
2211 
2212   /// Corner case when an offset is 0.
2213   Base = GetPointerBaseWithConstantOffset(Loc->Ptr, Offset, DL,
2214                                           /*AllowNonInbounds*/ true);
2215   if (Base && Base == &AssociatedValue && Offset == 0) {
2216     int64_t DerefBytes = Loc->Size.getValue();
2217     IsNonNull |= !NullPointerIsDefined;
2218     return std::max(int64_t(0), DerefBytes);
2219   }
2220 
2221   return 0;
2222 }
2223 
2224 struct AANonNullImpl : AANonNull {
2225   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2226       : AANonNull(IRP, A),
2227         NullIsDefined(NullPointerIsDefined(
2228             getAnchorScope(),
2229             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2230 
2231   /// See AbstractAttribute::initialize(...).
2232   void initialize(Attributor &A) override {
2233     Value &V = *getAssociatedValue().stripPointerCasts();
2234     if (!NullIsDefined &&
2235         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2236                 /* IgnoreSubsumingPositions */ false, &A)) {
2237       indicateOptimisticFixpoint();
2238       return;
2239     }
2240 
2241     if (isa<ConstantPointerNull>(V)) {
2242       indicatePessimisticFixpoint();
2243       return;
2244     }
2245 
2246     AANonNull::initialize(A);
2247 
2248     bool CanBeNull, CanBeFreed;
2249     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2250                                          CanBeFreed)) {
2251       if (!CanBeNull) {
2252         indicateOptimisticFixpoint();
2253         return;
2254       }
2255     }
2256 
2257     if (isa<GlobalValue>(V)) {
2258       indicatePessimisticFixpoint();
2259       return;
2260     }
2261 
2262     if (Instruction *CtxI = getCtxI())
2263       followUsesInMBEC(*this, A, getState(), *CtxI);
2264   }
2265 
2266   /// See followUsesInMBEC
2267   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2268                        AANonNull::StateType &State) {
2269     bool IsNonNull = false;
2270     bool TrackUse = false;
2271     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2272                                        IsNonNull, TrackUse);
2273     State.setKnown(IsNonNull);
2274     return TrackUse;
2275   }
2276 
2277   /// See AbstractAttribute::getAsStr().
2278   const std::string getAsStr() const override {
2279     return getAssumed() ? "nonnull" : "may-null";
2280   }
2281 
2282   /// Flag to determine if the underlying value can be null and still allow
2283   /// valid accesses.
2284   const bool NullIsDefined;
2285 };
2286 
2287 /// NonNull attribute for a floating value.
2288 struct AANonNullFloating : public AANonNullImpl {
2289   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2290       : AANonNullImpl(IRP, A) {}
2291 
2292   /// See AbstractAttribute::updateImpl(...).
2293   ChangeStatus updateImpl(Attributor &A) override {
2294     const DataLayout &DL = A.getDataLayout();
2295 
2296     bool Stripped;
2297     bool UsedAssumedInformation = false;
2298     SmallVector<AA::ValueAndContext> Values;
2299     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
2300                                       AA::AnyScope, UsedAssumedInformation)) {
2301       Values.push_back({getAssociatedValue(), getCtxI()});
2302       Stripped = false;
2303     } else {
2304       Stripped = Values.size() != 1 ||
2305                  Values.front().getValue() != &getAssociatedValue();
2306     }
2307 
2308     DominatorTree *DT = nullptr;
2309     AssumptionCache *AC = nullptr;
2310     InformationCache &InfoCache = A.getInfoCache();
2311     if (const Function *Fn = getAnchorScope()) {
2312       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2313       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2314     }
2315 
2316     AANonNull::StateType T;
2317     auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
2318       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2319                                              DepClassTy::REQUIRED);
2320       if (!Stripped && this == &AA) {
2321         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2322           T.indicatePessimisticFixpoint();
2323       } else {
2324         // Use abstract attribute information.
2325         const AANonNull::StateType &NS = AA.getState();
2326         T ^= NS;
2327       }
2328       return T.isValidState();
2329     };
2330 
2331     for (const auto &VAC : Values)
2332       if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI()))
2333         return indicatePessimisticFixpoint();
2334 
2335     return clampStateAndIndicateChange(getState(), T);
2336   }
2337 
2338   /// See AbstractAttribute::trackStatistics()
2339   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2340 };
2341 
2342 /// NonNull attribute for function return value.
2343 struct AANonNullReturned final
2344     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2345   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2346       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2347 
2348   /// See AbstractAttribute::getAsStr().
2349   const std::string getAsStr() const override {
2350     return getAssumed() ? "nonnull" : "may-null";
2351   }
2352 
2353   /// See AbstractAttribute::trackStatistics()
2354   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2355 };
2356 
2357 /// NonNull attribute for function argument.
2358 struct AANonNullArgument final
2359     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2360   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2361       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2362 
2363   /// See AbstractAttribute::trackStatistics()
2364   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2365 };
2366 
2367 struct AANonNullCallSiteArgument final : AANonNullFloating {
2368   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2369       : AANonNullFloating(IRP, A) {}
2370 
2371   /// See AbstractAttribute::trackStatistics()
2372   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2373 };
2374 
2375 /// NonNull attribute for a call site return position.
2376 struct AANonNullCallSiteReturned final
2377     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2378   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2379       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2380 
2381   /// See AbstractAttribute::trackStatistics()
2382   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2383 };
2384 } // namespace
2385 
2386 /// ------------------------ No-Recurse Attributes ----------------------------
2387 
2388 namespace {
2389 struct AANoRecurseImpl : public AANoRecurse {
2390   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2391 
2392   /// See AbstractAttribute::getAsStr()
2393   const std::string getAsStr() const override {
2394     return getAssumed() ? "norecurse" : "may-recurse";
2395   }
2396 };
2397 
2398 struct AANoRecurseFunction final : AANoRecurseImpl {
2399   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2400       : AANoRecurseImpl(IRP, A) {}
2401 
2402   /// See AbstractAttribute::updateImpl(...).
2403   ChangeStatus updateImpl(Attributor &A) override {
2404 
2405     // If all live call sites are known to be no-recurse, we are as well.
2406     auto CallSitePred = [&](AbstractCallSite ACS) {
2407       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2408           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2409           DepClassTy::NONE);
2410       return NoRecurseAA.isKnownNoRecurse();
2411     };
2412     bool UsedAssumedInformation = false;
2413     if (A.checkForAllCallSites(CallSitePred, *this, true,
2414                                UsedAssumedInformation)) {
2415       // If we know all call sites and all are known no-recurse, we are done.
2416       // If all known call sites, which might not be all that exist, are known
2417       // to be no-recurse, we are not done but we can continue to assume
2418       // no-recurse. If one of the call sites we have not visited will become
2419       // live, another update is triggered.
2420       if (!UsedAssumedInformation)
2421         indicateOptimisticFixpoint();
2422       return ChangeStatus::UNCHANGED;
2423     }
2424 
2425     const AAFunctionReachability &EdgeReachability =
2426         A.getAAFor<AAFunctionReachability>(*this, getIRPosition(),
2427                                            DepClassTy::REQUIRED);
2428     if (EdgeReachability.canReach(A, *getAnchorScope()))
2429       return indicatePessimisticFixpoint();
2430     return ChangeStatus::UNCHANGED;
2431   }
2432 
2433   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2434 };
2435 
2436 /// NoRecurse attribute deduction for a call sites.
2437 struct AANoRecurseCallSite final : AANoRecurseImpl {
2438   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2439       : AANoRecurseImpl(IRP, A) {}
2440 
2441   /// See AbstractAttribute::initialize(...).
2442   void initialize(Attributor &A) override {
2443     AANoRecurseImpl::initialize(A);
2444     Function *F = getAssociatedFunction();
2445     if (!F || F->isDeclaration())
2446       indicatePessimisticFixpoint();
2447   }
2448 
2449   /// See AbstractAttribute::updateImpl(...).
2450   ChangeStatus updateImpl(Attributor &A) override {
2451     // TODO: Once we have call site specific value information we can provide
2452     //       call site specific liveness information and then it makes
2453     //       sense to specialize attributes for call sites arguments instead of
2454     //       redirecting requests to the callee argument.
2455     Function *F = getAssociatedFunction();
2456     const IRPosition &FnPos = IRPosition::function(*F);
2457     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2458     return clampStateAndIndicateChange(getState(), FnAA.getState());
2459   }
2460 
2461   /// See AbstractAttribute::trackStatistics()
2462   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2463 };
2464 } // namespace
2465 
2466 /// -------------------- Undefined-Behavior Attributes ------------------------
2467 
2468 namespace {
2469 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2470   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2471       : AAUndefinedBehavior(IRP, A) {}
2472 
2473   /// See AbstractAttribute::updateImpl(...).
2474   // through a pointer (i.e. also branches etc.)
2475   ChangeStatus updateImpl(Attributor &A) override {
2476     const size_t UBPrevSize = KnownUBInsts.size();
2477     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2478 
2479     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2480       // Lang ref now states volatile store is not UB, let's skip them.
2481       if (I.isVolatile() && I.mayWriteToMemory())
2482         return true;
2483 
2484       // Skip instructions that are already saved.
2485       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2486         return true;
2487 
2488       // If we reach here, we know we have an instruction
2489       // that accesses memory through a pointer operand,
2490       // for which getPointerOperand() should give it to us.
2491       Value *PtrOp =
2492           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2493       assert(PtrOp &&
2494              "Expected pointer operand of memory accessing instruction");
2495 
2496       // Either we stopped and the appropriate action was taken,
2497       // or we got back a simplified value to continue.
2498       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2499       if (!SimplifiedPtrOp || !SimplifiedPtrOp.getValue())
2500         return true;
2501       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2502 
2503       // A memory access through a pointer is considered UB
2504       // only if the pointer has constant null value.
2505       // TODO: Expand it to not only check constant values.
2506       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2507         AssumedNoUBInsts.insert(&I);
2508         return true;
2509       }
2510       const Type *PtrTy = PtrOpVal->getType();
2511 
2512       // Because we only consider instructions inside functions,
2513       // assume that a parent function exists.
2514       const Function *F = I.getFunction();
2515 
2516       // A memory access using constant null pointer is only considered UB
2517       // if null pointer is _not_ defined for the target platform.
2518       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2519         AssumedNoUBInsts.insert(&I);
2520       else
2521         KnownUBInsts.insert(&I);
2522       return true;
2523     };
2524 
2525     auto InspectBrInstForUB = [&](Instruction &I) {
2526       // A conditional branch instruction is considered UB if it has `undef`
2527       // condition.
2528 
2529       // Skip instructions that are already saved.
2530       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2531         return true;
2532 
2533       // We know we have a branch instruction.
2534       auto *BrInst = cast<BranchInst>(&I);
2535 
2536       // Unconditional branches are never considered UB.
2537       if (BrInst->isUnconditional())
2538         return true;
2539 
2540       // Either we stopped and the appropriate action was taken,
2541       // or we got back a simplified value to continue.
2542       Optional<Value *> SimplifiedCond =
2543           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2544       if (!SimplifiedCond || !*SimplifiedCond)
2545         return true;
2546       AssumedNoUBInsts.insert(&I);
2547       return true;
2548     };
2549 
2550     auto InspectCallSiteForUB = [&](Instruction &I) {
2551       // Check whether a callsite always cause UB or not
2552 
2553       // Skip instructions that are already saved.
2554       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2555         return true;
2556 
2557       // Check nonnull and noundef argument attribute violation for each
2558       // callsite.
2559       CallBase &CB = cast<CallBase>(I);
2560       Function *Callee = CB.getCalledFunction();
2561       if (!Callee)
2562         return true;
2563       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2564         // If current argument is known to be simplified to null pointer and the
2565         // corresponding argument position is known to have nonnull attribute,
2566         // the argument is poison. Furthermore, if the argument is poison and
2567         // the position is known to have noundef attriubte, this callsite is
2568         // considered UB.
2569         if (idx >= Callee->arg_size())
2570           break;
2571         Value *ArgVal = CB.getArgOperand(idx);
2572         if (!ArgVal)
2573           continue;
2574         // Here, we handle three cases.
2575         //   (1) Not having a value means it is dead. (we can replace the value
2576         //       with undef)
2577         //   (2) Simplified to undef. The argument violate noundef attriubte.
2578         //   (3) Simplified to null pointer where known to be nonnull.
2579         //       The argument is a poison value and violate noundef attribute.
2580         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2581         auto &NoUndefAA =
2582             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2583         if (!NoUndefAA.isKnownNoUndef())
2584           continue;
2585         bool UsedAssumedInformation = false;
2586         Optional<Value *> SimplifiedVal =
2587             A.getAssumedSimplified(IRPosition::value(*ArgVal), *this,
2588                                    UsedAssumedInformation, AA::Interprocedural);
2589         if (UsedAssumedInformation)
2590           continue;
2591         if (SimplifiedVal && !SimplifiedVal.getValue())
2592           return true;
2593         if (!SimplifiedVal || isa<UndefValue>(*SimplifiedVal.getValue())) {
2594           KnownUBInsts.insert(&I);
2595           continue;
2596         }
2597         if (!ArgVal->getType()->isPointerTy() ||
2598             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2599           continue;
2600         auto &NonNullAA =
2601             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2602         if (NonNullAA.isKnownNonNull())
2603           KnownUBInsts.insert(&I);
2604       }
2605       return true;
2606     };
2607 
2608     auto InspectReturnInstForUB = [&](Instruction &I) {
2609       auto &RI = cast<ReturnInst>(I);
2610       // Either we stopped and the appropriate action was taken,
2611       // or we got back a simplified return value to continue.
2612       Optional<Value *> SimplifiedRetValue =
2613           stopOnUndefOrAssumed(A, RI.getReturnValue(), &I);
2614       if (!SimplifiedRetValue || !*SimplifiedRetValue)
2615         return true;
2616 
2617       // Check if a return instruction always cause UB or not
2618       // Note: It is guaranteed that the returned position of the anchor
2619       //       scope has noundef attribute when this is called.
2620       //       We also ensure the return position is not "assumed dead"
2621       //       because the returned value was then potentially simplified to
2622       //       `undef` in AAReturnedValues without removing the `noundef`
2623       //       attribute yet.
2624 
2625       // When the returned position has noundef attriubte, UB occurs in the
2626       // following cases.
2627       //   (1) Returned value is known to be undef.
2628       //   (2) The value is known to be a null pointer and the returned
2629       //       position has nonnull attribute (because the returned value is
2630       //       poison).
2631       if (isa<ConstantPointerNull>(*SimplifiedRetValue)) {
2632         auto &NonNullAA = A.getAAFor<AANonNull>(
2633             *this, IRPosition::returned(*getAnchorScope()), DepClassTy::NONE);
2634         if (NonNullAA.isKnownNonNull())
2635           KnownUBInsts.insert(&I);
2636       }
2637 
2638       return true;
2639     };
2640 
2641     bool UsedAssumedInformation = false;
2642     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2643                               {Instruction::Load, Instruction::Store,
2644                                Instruction::AtomicCmpXchg,
2645                                Instruction::AtomicRMW},
2646                               UsedAssumedInformation,
2647                               /* CheckBBLivenessOnly */ true);
2648     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2649                               UsedAssumedInformation,
2650                               /* CheckBBLivenessOnly */ true);
2651     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2652                                       UsedAssumedInformation);
2653 
2654     // If the returned position of the anchor scope has noundef attriubte, check
2655     // all returned instructions.
2656     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2657       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2658       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2659         auto &RetPosNoUndefAA =
2660             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2661         if (RetPosNoUndefAA.isKnownNoUndef())
2662           A.checkForAllInstructions(InspectReturnInstForUB, *this,
2663                                     {Instruction::Ret}, UsedAssumedInformation,
2664                                     /* CheckBBLivenessOnly */ true);
2665       }
2666     }
2667 
2668     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2669         UBPrevSize != KnownUBInsts.size())
2670       return ChangeStatus::CHANGED;
2671     return ChangeStatus::UNCHANGED;
2672   }
2673 
2674   bool isKnownToCauseUB(Instruction *I) const override {
2675     return KnownUBInsts.count(I);
2676   }
2677 
2678   bool isAssumedToCauseUB(Instruction *I) const override {
2679     // In simple words, if an instruction is not in the assumed to _not_
2680     // cause UB, then it is assumed UB (that includes those
2681     // in the KnownUBInsts set). The rest is boilerplate
2682     // is to ensure that it is one of the instructions we test
2683     // for UB.
2684 
2685     switch (I->getOpcode()) {
2686     case Instruction::Load:
2687     case Instruction::Store:
2688     case Instruction::AtomicCmpXchg:
2689     case Instruction::AtomicRMW:
2690       return !AssumedNoUBInsts.count(I);
2691     case Instruction::Br: {
2692       auto *BrInst = cast<BranchInst>(I);
2693       if (BrInst->isUnconditional())
2694         return false;
2695       return !AssumedNoUBInsts.count(I);
2696     } break;
2697     default:
2698       return false;
2699     }
2700     return false;
2701   }
2702 
2703   ChangeStatus manifest(Attributor &A) override {
2704     if (KnownUBInsts.empty())
2705       return ChangeStatus::UNCHANGED;
2706     for (Instruction *I : KnownUBInsts)
2707       A.changeToUnreachableAfterManifest(I);
2708     return ChangeStatus::CHANGED;
2709   }
2710 
2711   /// See AbstractAttribute::getAsStr()
2712   const std::string getAsStr() const override {
2713     return getAssumed() ? "undefined-behavior" : "no-ub";
2714   }
2715 
2716   /// Note: The correctness of this analysis depends on the fact that the
2717   /// following 2 sets will stop changing after some point.
2718   /// "Change" here means that their size changes.
2719   /// The size of each set is monotonically increasing
2720   /// (we only add items to them) and it is upper bounded by the number of
2721   /// instructions in the processed function (we can never save more
2722   /// elements in either set than this number). Hence, at some point,
2723   /// they will stop increasing.
2724   /// Consequently, at some point, both sets will have stopped
2725   /// changing, effectively making the analysis reach a fixpoint.
2726 
2727   /// Note: These 2 sets are disjoint and an instruction can be considered
2728   /// one of 3 things:
2729   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2730   ///    the KnownUBInsts set.
2731   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2732   ///    has a reason to assume it).
2733   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2734   ///    could not find a reason to assume or prove that it can cause UB,
2735   ///    hence it assumes it doesn't. We have a set for these instructions
2736   ///    so that we don't reprocess them in every update.
2737   ///    Note however that instructions in this set may cause UB.
2738 
2739 protected:
2740   /// A set of all live instructions _known_ to cause UB.
2741   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2742 
2743 private:
2744   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2745   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2746 
2747   // Should be called on updates in which if we're processing an instruction
2748   // \p I that depends on a value \p V, one of the following has to happen:
2749   // - If the value is assumed, then stop.
2750   // - If the value is known but undef, then consider it UB.
2751   // - Otherwise, do specific processing with the simplified value.
2752   // We return None in the first 2 cases to signify that an appropriate
2753   // action was taken and the caller should stop.
2754   // Otherwise, we return the simplified value that the caller should
2755   // use for specific processing.
2756   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2757                                          Instruction *I) {
2758     bool UsedAssumedInformation = false;
2759     Optional<Value *> SimplifiedV =
2760         A.getAssumedSimplified(IRPosition::value(*V), *this,
2761                                UsedAssumedInformation, AA::Interprocedural);
2762     if (!UsedAssumedInformation) {
2763       // Don't depend on assumed values.
2764       if (!SimplifiedV) {
2765         // If it is known (which we tested above) but it doesn't have a value,
2766         // then we can assume `undef` and hence the instruction is UB.
2767         KnownUBInsts.insert(I);
2768         return llvm::None;
2769       }
2770       if (!*SimplifiedV)
2771         return nullptr;
2772       V = *SimplifiedV;
2773     }
2774     if (isa<UndefValue>(V)) {
2775       KnownUBInsts.insert(I);
2776       return llvm::None;
2777     }
2778     return V;
2779   }
2780 };
2781 
2782 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2783   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2784       : AAUndefinedBehaviorImpl(IRP, A) {}
2785 
2786   /// See AbstractAttribute::trackStatistics()
2787   void trackStatistics() const override {
2788     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2789                "Number of instructions known to have UB");
2790     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2791         KnownUBInsts.size();
2792   }
2793 };
2794 } // namespace
2795 
2796 /// ------------------------ Will-Return Attributes ----------------------------
2797 
2798 namespace {
2799 // Helper function that checks whether a function has any cycle which we don't
2800 // know if it is bounded or not.
2801 // Loops with maximum trip count are considered bounded, any other cycle not.
2802 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2803   ScalarEvolution *SE =
2804       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2805   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2806   // If either SCEV or LoopInfo is not available for the function then we assume
2807   // any cycle to be unbounded cycle.
2808   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2809   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2810   if (!SE || !LI) {
2811     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2812       if (SCCI.hasCycle())
2813         return true;
2814     return false;
2815   }
2816 
2817   // If there's irreducible control, the function may contain non-loop cycles.
2818   if (mayContainIrreducibleControl(F, LI))
2819     return true;
2820 
2821   // Any loop that does not have a max trip count is considered unbounded cycle.
2822   for (auto *L : LI->getLoopsInPreorder()) {
2823     if (!SE->getSmallConstantMaxTripCount(L))
2824       return true;
2825   }
2826   return false;
2827 }
2828 
2829 struct AAWillReturnImpl : public AAWillReturn {
2830   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2831       : AAWillReturn(IRP, A) {}
2832 
2833   /// See AbstractAttribute::initialize(...).
2834   void initialize(Attributor &A) override {
2835     AAWillReturn::initialize(A);
2836 
2837     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2838       indicateOptimisticFixpoint();
2839       return;
2840     }
2841   }
2842 
2843   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2844   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2845     // Check for `mustprogress` in the scope and the associated function which
2846     // might be different if this is a call site.
2847     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2848         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2849       return false;
2850 
2851     bool IsKnown;
2852     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
2853       return IsKnown || !KnownOnly;
2854     return false;
2855   }
2856 
2857   /// See AbstractAttribute::updateImpl(...).
2858   ChangeStatus updateImpl(Attributor &A) override {
2859     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2860       return ChangeStatus::UNCHANGED;
2861 
2862     auto CheckForWillReturn = [&](Instruction &I) {
2863       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2864       const auto &WillReturnAA =
2865           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2866       if (WillReturnAA.isKnownWillReturn())
2867         return true;
2868       if (!WillReturnAA.isAssumedWillReturn())
2869         return false;
2870       const auto &NoRecurseAA =
2871           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2872       return NoRecurseAA.isAssumedNoRecurse();
2873     };
2874 
2875     bool UsedAssumedInformation = false;
2876     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2877                                            UsedAssumedInformation))
2878       return indicatePessimisticFixpoint();
2879 
2880     return ChangeStatus::UNCHANGED;
2881   }
2882 
2883   /// See AbstractAttribute::getAsStr()
2884   const std::string getAsStr() const override {
2885     return getAssumed() ? "willreturn" : "may-noreturn";
2886   }
2887 };
2888 
2889 struct AAWillReturnFunction final : AAWillReturnImpl {
2890   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2891       : AAWillReturnImpl(IRP, A) {}
2892 
2893   /// See AbstractAttribute::initialize(...).
2894   void initialize(Attributor &A) override {
2895     AAWillReturnImpl::initialize(A);
2896 
2897     Function *F = getAnchorScope();
2898     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2899       indicatePessimisticFixpoint();
2900   }
2901 
2902   /// See AbstractAttribute::trackStatistics()
2903   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2904 };
2905 
2906 /// WillReturn attribute deduction for a call sites.
2907 struct AAWillReturnCallSite final : AAWillReturnImpl {
2908   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2909       : AAWillReturnImpl(IRP, A) {}
2910 
2911   /// See AbstractAttribute::initialize(...).
2912   void initialize(Attributor &A) override {
2913     AAWillReturnImpl::initialize(A);
2914     Function *F = getAssociatedFunction();
2915     if (!F || !A.isFunctionIPOAmendable(*F))
2916       indicatePessimisticFixpoint();
2917   }
2918 
2919   /// See AbstractAttribute::updateImpl(...).
2920   ChangeStatus updateImpl(Attributor &A) override {
2921     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2922       return ChangeStatus::UNCHANGED;
2923 
2924     // TODO: Once we have call site specific value information we can provide
2925     //       call site specific liveness information and then it makes
2926     //       sense to specialize attributes for call sites arguments instead of
2927     //       redirecting requests to the callee argument.
2928     Function *F = getAssociatedFunction();
2929     const IRPosition &FnPos = IRPosition::function(*F);
2930     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2931     return clampStateAndIndicateChange(getState(), FnAA.getState());
2932   }
2933 
2934   /// See AbstractAttribute::trackStatistics()
2935   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2936 };
2937 } // namespace
2938 
2939 /// -------------------AAReachability Attribute--------------------------
2940 
2941 namespace {
2942 struct AAReachabilityImpl : AAReachability {
2943   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2944       : AAReachability(IRP, A) {}
2945 
2946   const std::string getAsStr() const override {
2947     // TODO: Return the number of reachable queries.
2948     return "reachable";
2949   }
2950 
2951   /// See AbstractAttribute::updateImpl(...).
2952   ChangeStatus updateImpl(Attributor &A) override {
2953     return ChangeStatus::UNCHANGED;
2954   }
2955 };
2956 
2957 struct AAReachabilityFunction final : public AAReachabilityImpl {
2958   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2959       : AAReachabilityImpl(IRP, A) {}
2960 
2961   /// See AbstractAttribute::trackStatistics()
2962   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2963 };
2964 } // namespace
2965 
2966 /// ------------------------ NoAlias Argument Attribute ------------------------
2967 
2968 namespace {
2969 struct AANoAliasImpl : AANoAlias {
2970   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2971     assert(getAssociatedType()->isPointerTy() &&
2972            "Noalias is a pointer attribute");
2973   }
2974 
2975   const std::string getAsStr() const override {
2976     return getAssumed() ? "noalias" : "may-alias";
2977   }
2978 };
2979 
2980 /// NoAlias attribute for a floating value.
2981 struct AANoAliasFloating final : AANoAliasImpl {
2982   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2983       : AANoAliasImpl(IRP, A) {}
2984 
2985   /// See AbstractAttribute::initialize(...).
2986   void initialize(Attributor &A) override {
2987     AANoAliasImpl::initialize(A);
2988     Value *Val = &getAssociatedValue();
2989     do {
2990       CastInst *CI = dyn_cast<CastInst>(Val);
2991       if (!CI)
2992         break;
2993       Value *Base = CI->getOperand(0);
2994       if (!Base->hasOneUse())
2995         break;
2996       Val = Base;
2997     } while (true);
2998 
2999     if (!Val->getType()->isPointerTy()) {
3000       indicatePessimisticFixpoint();
3001       return;
3002     }
3003 
3004     if (isa<AllocaInst>(Val))
3005       indicateOptimisticFixpoint();
3006     else if (isa<ConstantPointerNull>(Val) &&
3007              !NullPointerIsDefined(getAnchorScope(),
3008                                    Val->getType()->getPointerAddressSpace()))
3009       indicateOptimisticFixpoint();
3010     else if (Val != &getAssociatedValue()) {
3011       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
3012           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
3013       if (ValNoAliasAA.isKnownNoAlias())
3014         indicateOptimisticFixpoint();
3015     }
3016   }
3017 
3018   /// See AbstractAttribute::updateImpl(...).
3019   ChangeStatus updateImpl(Attributor &A) override {
3020     // TODO: Implement this.
3021     return indicatePessimisticFixpoint();
3022   }
3023 
3024   /// See AbstractAttribute::trackStatistics()
3025   void trackStatistics() const override {
3026     STATS_DECLTRACK_FLOATING_ATTR(noalias)
3027   }
3028 };
3029 
3030 /// NoAlias attribute for an argument.
3031 struct AANoAliasArgument final
3032     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
3033   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
3034   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3035 
3036   /// See AbstractAttribute::initialize(...).
3037   void initialize(Attributor &A) override {
3038     Base::initialize(A);
3039     // See callsite argument attribute and callee argument attribute.
3040     if (hasAttr({Attribute::ByVal}))
3041       indicateOptimisticFixpoint();
3042   }
3043 
3044   /// See AbstractAttribute::update(...).
3045   ChangeStatus updateImpl(Attributor &A) override {
3046     // We have to make sure no-alias on the argument does not break
3047     // synchronization when this is a callback argument, see also [1] below.
3048     // If synchronization cannot be affected, we delegate to the base updateImpl
3049     // function, otherwise we give up for now.
3050 
3051     // If the function is no-sync, no-alias cannot break synchronization.
3052     const auto &NoSyncAA =
3053         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3054                              DepClassTy::OPTIONAL);
3055     if (NoSyncAA.isAssumedNoSync())
3056       return Base::updateImpl(A);
3057 
3058     // If the argument is read-only, no-alias cannot break synchronization.
3059     bool IsKnown;
3060     if (AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
3061       return Base::updateImpl(A);
3062 
3063     // If the argument is never passed through callbacks, no-alias cannot break
3064     // synchronization.
3065     bool UsedAssumedInformation = false;
3066     if (A.checkForAllCallSites(
3067             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3068             true, UsedAssumedInformation))
3069       return Base::updateImpl(A);
3070 
3071     // TODO: add no-alias but make sure it doesn't break synchronization by
3072     // introducing fake uses. See:
3073     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3074     //     International Workshop on OpenMP 2018,
3075     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3076 
3077     return indicatePessimisticFixpoint();
3078   }
3079 
3080   /// See AbstractAttribute::trackStatistics()
3081   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3082 };
3083 
3084 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3085   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3086       : AANoAliasImpl(IRP, A) {}
3087 
3088   /// See AbstractAttribute::initialize(...).
3089   void initialize(Attributor &A) override {
3090     // See callsite argument attribute and callee argument attribute.
3091     const auto &CB = cast<CallBase>(getAnchorValue());
3092     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3093       indicateOptimisticFixpoint();
3094     Value &Val = getAssociatedValue();
3095     if (isa<ConstantPointerNull>(Val) &&
3096         !NullPointerIsDefined(getAnchorScope(),
3097                               Val.getType()->getPointerAddressSpace()))
3098       indicateOptimisticFixpoint();
3099   }
3100 
3101   /// Determine if the underlying value may alias with the call site argument
3102   /// \p OtherArgNo of \p ICS (= the underlying call site).
3103   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3104                             const AAMemoryBehavior &MemBehaviorAA,
3105                             const CallBase &CB, unsigned OtherArgNo) {
3106     // We do not need to worry about aliasing with the underlying IRP.
3107     if (this->getCalleeArgNo() == (int)OtherArgNo)
3108       return false;
3109 
3110     // If it is not a pointer or pointer vector we do not alias.
3111     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3112     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3113       return false;
3114 
3115     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3116         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3117 
3118     // If the argument is readnone, there is no read-write aliasing.
3119     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3120       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3121       return false;
3122     }
3123 
3124     // If the argument is readonly and the underlying value is readonly, there
3125     // is no read-write aliasing.
3126     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3127     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3128       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3129       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3130       return false;
3131     }
3132 
3133     // We have to utilize actual alias analysis queries so we need the object.
3134     if (!AAR)
3135       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3136 
3137     // Try to rule it out at the call site.
3138     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3139     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3140                          "callsite arguments: "
3141                       << getAssociatedValue() << " " << *ArgOp << " => "
3142                       << (IsAliasing ? "" : "no-") << "alias \n");
3143 
3144     return IsAliasing;
3145   }
3146 
3147   bool
3148   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3149                                          const AAMemoryBehavior &MemBehaviorAA,
3150                                          const AANoAlias &NoAliasAA) {
3151     // We can deduce "noalias" if the following conditions hold.
3152     // (i)   Associated value is assumed to be noalias in the definition.
3153     // (ii)  Associated value is assumed to be no-capture in all the uses
3154     //       possibly executed before this callsite.
3155     // (iii) There is no other pointer argument which could alias with the
3156     //       value.
3157 
3158     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3159     if (!AssociatedValueIsNoAliasAtDef) {
3160       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3161                         << " is not no-alias at the definition\n");
3162       return false;
3163     }
3164 
3165     auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
3166       const auto &DerefAA = A.getAAFor<AADereferenceable>(
3167           *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
3168       return DerefAA.getAssumedDereferenceableBytes();
3169     };
3170 
3171     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3172 
3173     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3174     const Function *ScopeFn = VIRP.getAnchorScope();
3175     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3176     // Check whether the value is captured in the scope using AANoCapture.
3177     // Look at CFG and check only uses possibly executed before this
3178     // callsite.
3179     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3180       Instruction *UserI = cast<Instruction>(U.getUser());
3181 
3182       // If UserI is the curr instruction and there is a single potential use of
3183       // the value in UserI we allow the use.
3184       // TODO: We should inspect the operands and allow those that cannot alias
3185       //       with the value.
3186       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3187         return true;
3188 
3189       if (ScopeFn) {
3190         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3191           if (CB->isArgOperand(&U)) {
3192 
3193             unsigned ArgNo = CB->getArgOperandNo(&U);
3194 
3195             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3196                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3197                 DepClassTy::OPTIONAL);
3198 
3199             if (NoCaptureAA.isAssumedNoCapture())
3200               return true;
3201           }
3202         }
3203 
3204         if (!AA::isPotentiallyReachable(A, *UserI, *getCtxI(), *this))
3205           return true;
3206       }
3207 
3208       // TODO: We should track the capturing uses in AANoCapture but the problem
3209       //       is CGSCC runs. For those we would need to "allow" AANoCapture for
3210       //       a value in the module slice.
3211       switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
3212       case UseCaptureKind::NO_CAPTURE:
3213         return true;
3214       case UseCaptureKind::MAY_CAPTURE:
3215         LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *UserI
3216                           << "\n");
3217         return false;
3218       case UseCaptureKind::PASSTHROUGH:
3219         Follow = true;
3220         return true;
3221       }
3222       llvm_unreachable("unknown UseCaptureKind");
3223     };
3224 
3225     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3226       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3227         LLVM_DEBUG(
3228             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3229                    << " cannot be noalias as it is potentially captured\n");
3230         return false;
3231       }
3232     }
3233     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3234 
3235     // Check there is no other pointer argument which could alias with the
3236     // value passed at this call site.
3237     // TODO: AbstractCallSite
3238     const auto &CB = cast<CallBase>(getAnchorValue());
3239     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3240       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3241         return false;
3242 
3243     return true;
3244   }
3245 
3246   /// See AbstractAttribute::updateImpl(...).
3247   ChangeStatus updateImpl(Attributor &A) override {
3248     // If the argument is readnone we are done as there are no accesses via the
3249     // argument.
3250     auto &MemBehaviorAA =
3251         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3252     if (MemBehaviorAA.isAssumedReadNone()) {
3253       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3254       return ChangeStatus::UNCHANGED;
3255     }
3256 
3257     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3258     const auto &NoAliasAA =
3259         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3260 
3261     AAResults *AAR = nullptr;
3262     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3263                                                NoAliasAA)) {
3264       LLVM_DEBUG(
3265           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3266       return ChangeStatus::UNCHANGED;
3267     }
3268 
3269     return indicatePessimisticFixpoint();
3270   }
3271 
3272   /// See AbstractAttribute::trackStatistics()
3273   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3274 };
3275 
3276 /// NoAlias attribute for function return value.
3277 struct AANoAliasReturned final : AANoAliasImpl {
3278   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3279       : AANoAliasImpl(IRP, A) {}
3280 
3281   /// See AbstractAttribute::initialize(...).
3282   void initialize(Attributor &A) override {
3283     AANoAliasImpl::initialize(A);
3284     Function *F = getAssociatedFunction();
3285     if (!F || F->isDeclaration())
3286       indicatePessimisticFixpoint();
3287   }
3288 
3289   /// See AbstractAttribute::updateImpl(...).
3290   virtual ChangeStatus updateImpl(Attributor &A) override {
3291 
3292     auto CheckReturnValue = [&](Value &RV) -> bool {
3293       if (Constant *C = dyn_cast<Constant>(&RV))
3294         if (C->isNullValue() || isa<UndefValue>(C))
3295           return true;
3296 
3297       /// For now, we can only deduce noalias if we have call sites.
3298       /// FIXME: add more support.
3299       if (!isa<CallBase>(&RV))
3300         return false;
3301 
3302       const IRPosition &RVPos = IRPosition::value(RV);
3303       const auto &NoAliasAA =
3304           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3305       if (!NoAliasAA.isAssumedNoAlias())
3306         return false;
3307 
3308       const auto &NoCaptureAA =
3309           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3310       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3311     };
3312 
3313     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3314       return indicatePessimisticFixpoint();
3315 
3316     return ChangeStatus::UNCHANGED;
3317   }
3318 
3319   /// See AbstractAttribute::trackStatistics()
3320   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3321 };
3322 
3323 /// NoAlias attribute deduction for a call site return value.
3324 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3325   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3326       : AANoAliasImpl(IRP, A) {}
3327 
3328   /// See AbstractAttribute::initialize(...).
3329   void initialize(Attributor &A) override {
3330     AANoAliasImpl::initialize(A);
3331     Function *F = getAssociatedFunction();
3332     if (!F || F->isDeclaration())
3333       indicatePessimisticFixpoint();
3334   }
3335 
3336   /// See AbstractAttribute::updateImpl(...).
3337   ChangeStatus updateImpl(Attributor &A) override {
3338     // TODO: Once we have call site specific value information we can provide
3339     //       call site specific liveness information and then it makes
3340     //       sense to specialize attributes for call sites arguments instead of
3341     //       redirecting requests to the callee argument.
3342     Function *F = getAssociatedFunction();
3343     const IRPosition &FnPos = IRPosition::returned(*F);
3344     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3345     return clampStateAndIndicateChange(getState(), FnAA.getState());
3346   }
3347 
3348   /// See AbstractAttribute::trackStatistics()
3349   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3350 };
3351 } // namespace
3352 
3353 /// -------------------AAIsDead Function Attribute-----------------------
3354 
3355 namespace {
3356 struct AAIsDeadValueImpl : public AAIsDead {
3357   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3358 
3359   /// See AbstractAttribute::initialize(...).
3360   void initialize(Attributor &A) override {
3361     if (auto *Scope = getAnchorScope())
3362       if (!A.isRunOn(*Scope))
3363         indicatePessimisticFixpoint();
3364   }
3365 
3366   /// See AAIsDead::isAssumedDead().
3367   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3368 
3369   /// See AAIsDead::isKnownDead().
3370   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3371 
3372   /// See AAIsDead::isAssumedDead(BasicBlock *).
3373   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3374 
3375   /// See AAIsDead::isKnownDead(BasicBlock *).
3376   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3377 
3378   /// See AAIsDead::isAssumedDead(Instruction *I).
3379   bool isAssumedDead(const Instruction *I) const override {
3380     return I == getCtxI() && isAssumedDead();
3381   }
3382 
3383   /// See AAIsDead::isKnownDead(Instruction *I).
3384   bool isKnownDead(const Instruction *I) const override {
3385     return isAssumedDead(I) && isKnownDead();
3386   }
3387 
3388   /// See AbstractAttribute::getAsStr().
3389   virtual const std::string getAsStr() const override {
3390     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3391   }
3392 
3393   /// Check if all uses are assumed dead.
3394   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3395     // Callers might not check the type, void has no uses.
3396     if (V.getType()->isVoidTy() || V.use_empty())
3397       return true;
3398 
3399     // If we replace a value with a constant there are no uses left afterwards.
3400     if (!isa<Constant>(V)) {
3401       if (auto *I = dyn_cast<Instruction>(&V))
3402         if (!A.isRunOn(*I->getFunction()))
3403           return false;
3404       bool UsedAssumedInformation = false;
3405       Optional<Constant *> C =
3406           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3407       if (!C || *C)
3408         return true;
3409     }
3410 
3411     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3412     // Explicitly set the dependence class to required because we want a long
3413     // chain of N dependent instructions to be considered live as soon as one is
3414     // without going through N update cycles. This is not required for
3415     // correctness.
3416     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3417                              DepClassTy::REQUIRED,
3418                              /* IgnoreDroppableUses */ false);
3419   }
3420 
3421   /// Determine if \p I is assumed to be side-effect free.
3422   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3423     if (!I || wouldInstructionBeTriviallyDead(I))
3424       return true;
3425 
3426     auto *CB = dyn_cast<CallBase>(I);
3427     if (!CB || isa<IntrinsicInst>(CB))
3428       return false;
3429 
3430     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3431     const auto &NoUnwindAA =
3432         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3433     if (!NoUnwindAA.isAssumedNoUnwind())
3434       return false;
3435     if (!NoUnwindAA.isKnownNoUnwind())
3436       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3437 
3438     bool IsKnown;
3439     return AA::isAssumedReadOnly(A, CallIRP, *this, IsKnown);
3440   }
3441 };
3442 
3443 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3444   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3445       : AAIsDeadValueImpl(IRP, A) {}
3446 
3447   /// See AbstractAttribute::initialize(...).
3448   void initialize(Attributor &A) override {
3449     AAIsDeadValueImpl::initialize(A);
3450 
3451     if (isa<UndefValue>(getAssociatedValue())) {
3452       indicatePessimisticFixpoint();
3453       return;
3454     }
3455 
3456     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3457     if (!isAssumedSideEffectFree(A, I)) {
3458       if (!isa_and_nonnull<StoreInst>(I))
3459         indicatePessimisticFixpoint();
3460       else
3461         removeAssumedBits(HAS_NO_EFFECT);
3462     }
3463   }
3464 
3465   bool isDeadStore(Attributor &A, StoreInst &SI) {
3466     // Lang ref now states volatile store is not UB/dead, let's skip them.
3467     if (SI.isVolatile())
3468       return false;
3469 
3470     bool UsedAssumedInformation = false;
3471     SmallSetVector<Value *, 4> PotentialCopies;
3472     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3473                                              UsedAssumedInformation))
3474       return false;
3475     return llvm::all_of(PotentialCopies, [&](Value *V) {
3476       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3477                              UsedAssumedInformation);
3478     });
3479   }
3480 
3481   /// See AbstractAttribute::getAsStr().
3482   const std::string getAsStr() const override {
3483     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3484     if (isa_and_nonnull<StoreInst>(I))
3485       if (isValidState())
3486         return "assumed-dead-store";
3487     return AAIsDeadValueImpl::getAsStr();
3488   }
3489 
3490   /// See AbstractAttribute::updateImpl(...).
3491   ChangeStatus updateImpl(Attributor &A) override {
3492     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3493     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3494       if (!isDeadStore(A, *SI))
3495         return indicatePessimisticFixpoint();
3496     } else {
3497       if (!isAssumedSideEffectFree(A, I))
3498         return indicatePessimisticFixpoint();
3499       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3500         return indicatePessimisticFixpoint();
3501     }
3502     return ChangeStatus::UNCHANGED;
3503   }
3504 
3505   bool isRemovableStore() const override {
3506     return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
3507   }
3508 
3509   /// See AbstractAttribute::manifest(...).
3510   ChangeStatus manifest(Attributor &A) override {
3511     Value &V = getAssociatedValue();
3512     if (auto *I = dyn_cast<Instruction>(&V)) {
3513       // If we get here we basically know the users are all dead. We check if
3514       // isAssumedSideEffectFree returns true here again because it might not be
3515       // the case and only the users are dead but the instruction (=call) is
3516       // still needed.
3517       if (isa<StoreInst>(I) ||
3518           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3519         A.deleteAfterManifest(*I);
3520         return ChangeStatus::CHANGED;
3521       }
3522     }
3523     return ChangeStatus::UNCHANGED;
3524   }
3525 
3526   /// See AbstractAttribute::trackStatistics()
3527   void trackStatistics() const override {
3528     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3529   }
3530 };
3531 
3532 struct AAIsDeadArgument : public AAIsDeadFloating {
3533   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3534       : AAIsDeadFloating(IRP, A) {}
3535 
3536   /// See AbstractAttribute::initialize(...).
3537   void initialize(Attributor &A) override {
3538     AAIsDeadFloating::initialize(A);
3539     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3540       indicatePessimisticFixpoint();
3541   }
3542 
3543   /// See AbstractAttribute::manifest(...).
3544   ChangeStatus manifest(Attributor &A) override {
3545     Argument &Arg = *getAssociatedArgument();
3546     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3547       if (A.registerFunctionSignatureRewrite(
3548               Arg, /* ReplacementTypes */ {},
3549               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3550               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3551         return ChangeStatus::CHANGED;
3552       }
3553     return ChangeStatus::UNCHANGED;
3554   }
3555 
3556   /// See AbstractAttribute::trackStatistics()
3557   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3558 };
3559 
3560 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3561   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3562       : AAIsDeadValueImpl(IRP, A) {}
3563 
3564   /// See AbstractAttribute::initialize(...).
3565   void initialize(Attributor &A) override {
3566     AAIsDeadValueImpl::initialize(A);
3567     if (isa<UndefValue>(getAssociatedValue()))
3568       indicatePessimisticFixpoint();
3569   }
3570 
3571   /// See AbstractAttribute::updateImpl(...).
3572   ChangeStatus updateImpl(Attributor &A) override {
3573     // TODO: Once we have call site specific value information we can provide
3574     //       call site specific liveness information and then it makes
3575     //       sense to specialize attributes for call sites arguments instead of
3576     //       redirecting requests to the callee argument.
3577     Argument *Arg = getAssociatedArgument();
3578     if (!Arg)
3579       return indicatePessimisticFixpoint();
3580     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3581     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3582     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3583   }
3584 
3585   /// See AbstractAttribute::manifest(...).
3586   ChangeStatus manifest(Attributor &A) override {
3587     CallBase &CB = cast<CallBase>(getAnchorValue());
3588     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3589     assert(!isa<UndefValue>(U.get()) &&
3590            "Expected undef values to be filtered out!");
3591     UndefValue &UV = *UndefValue::get(U->getType());
3592     if (A.changeUseAfterManifest(U, UV))
3593       return ChangeStatus::CHANGED;
3594     return ChangeStatus::UNCHANGED;
3595   }
3596 
3597   /// See AbstractAttribute::trackStatistics()
3598   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3599 };
3600 
3601 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3602   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3603       : AAIsDeadFloating(IRP, A) {}
3604 
3605   /// See AAIsDead::isAssumedDead().
3606   bool isAssumedDead() const override {
3607     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3608   }
3609 
3610   /// See AbstractAttribute::initialize(...).
3611   void initialize(Attributor &A) override {
3612     AAIsDeadFloating::initialize(A);
3613     if (isa<UndefValue>(getAssociatedValue())) {
3614       indicatePessimisticFixpoint();
3615       return;
3616     }
3617 
3618     // We track this separately as a secondary state.
3619     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3620   }
3621 
3622   /// See AbstractAttribute::updateImpl(...).
3623   ChangeStatus updateImpl(Attributor &A) override {
3624     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3625     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3626       IsAssumedSideEffectFree = false;
3627       Changed = ChangeStatus::CHANGED;
3628     }
3629     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3630       return indicatePessimisticFixpoint();
3631     return Changed;
3632   }
3633 
3634   /// See AbstractAttribute::trackStatistics()
3635   void trackStatistics() const override {
3636     if (IsAssumedSideEffectFree)
3637       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3638     else
3639       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3640   }
3641 
3642   /// See AbstractAttribute::getAsStr().
3643   const std::string getAsStr() const override {
3644     return isAssumedDead()
3645                ? "assumed-dead"
3646                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3647   }
3648 
3649 private:
3650   bool IsAssumedSideEffectFree = true;
3651 };
3652 
3653 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3654   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3655       : AAIsDeadValueImpl(IRP, A) {}
3656 
3657   /// See AbstractAttribute::updateImpl(...).
3658   ChangeStatus updateImpl(Attributor &A) override {
3659 
3660     bool UsedAssumedInformation = false;
3661     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3662                               {Instruction::Ret}, UsedAssumedInformation);
3663 
3664     auto PredForCallSite = [&](AbstractCallSite ACS) {
3665       if (ACS.isCallbackCall() || !ACS.getInstruction())
3666         return false;
3667       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3668     };
3669 
3670     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3671                                 UsedAssumedInformation))
3672       return indicatePessimisticFixpoint();
3673 
3674     return ChangeStatus::UNCHANGED;
3675   }
3676 
3677   /// See AbstractAttribute::manifest(...).
3678   ChangeStatus manifest(Attributor &A) override {
3679     // TODO: Rewrite the signature to return void?
3680     bool AnyChange = false;
3681     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3682     auto RetInstPred = [&](Instruction &I) {
3683       ReturnInst &RI = cast<ReturnInst>(I);
3684       if (!isa<UndefValue>(RI.getReturnValue()))
3685         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3686       return true;
3687     };
3688     bool UsedAssumedInformation = false;
3689     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3690                               UsedAssumedInformation);
3691     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3692   }
3693 
3694   /// See AbstractAttribute::trackStatistics()
3695   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3696 };
3697 
3698 struct AAIsDeadFunction : public AAIsDead {
3699   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3700 
3701   /// See AbstractAttribute::initialize(...).
3702   void initialize(Attributor &A) override {
3703     Function *F = getAnchorScope();
3704     if (!F || F->isDeclaration() || !A.isRunOn(*F)) {
3705       indicatePessimisticFixpoint();
3706       return;
3707     }
3708     ToBeExploredFrom.insert(&F->getEntryBlock().front());
3709     assumeLive(A, F->getEntryBlock());
3710   }
3711 
3712   /// See AbstractAttribute::getAsStr().
3713   const std::string getAsStr() const override {
3714     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3715            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3716            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3717            std::to_string(KnownDeadEnds.size()) + "]";
3718   }
3719 
3720   /// See AbstractAttribute::manifest(...).
3721   ChangeStatus manifest(Attributor &A) override {
3722     assert(getState().isValidState() &&
3723            "Attempted to manifest an invalid state!");
3724 
3725     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3726     Function &F = *getAnchorScope();
3727 
3728     if (AssumedLiveBlocks.empty()) {
3729       A.deleteAfterManifest(F);
3730       return ChangeStatus::CHANGED;
3731     }
3732 
3733     // Flag to determine if we can change an invoke to a call assuming the
3734     // callee is nounwind. This is not possible if the personality of the
3735     // function allows to catch asynchronous exceptions.
3736     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3737 
3738     KnownDeadEnds.set_union(ToBeExploredFrom);
3739     for (const Instruction *DeadEndI : KnownDeadEnds) {
3740       auto *CB = dyn_cast<CallBase>(DeadEndI);
3741       if (!CB)
3742         continue;
3743       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3744           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3745       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3746       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3747         continue;
3748 
3749       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3750         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3751       else
3752         A.changeToUnreachableAfterManifest(
3753             const_cast<Instruction *>(DeadEndI->getNextNode()));
3754       HasChanged = ChangeStatus::CHANGED;
3755     }
3756 
3757     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3758     for (BasicBlock &BB : F)
3759       if (!AssumedLiveBlocks.count(&BB)) {
3760         A.deleteAfterManifest(BB);
3761         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3762         HasChanged = ChangeStatus::CHANGED;
3763       }
3764 
3765     return HasChanged;
3766   }
3767 
3768   /// See AbstractAttribute::updateImpl(...).
3769   ChangeStatus updateImpl(Attributor &A) override;
3770 
3771   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3772     assert(From->getParent() == getAnchorScope() &&
3773            To->getParent() == getAnchorScope() &&
3774            "Used AAIsDead of the wrong function");
3775     return isValidState() && !AssumedLiveEdges.count(std::make_pair(From, To));
3776   }
3777 
3778   /// See AbstractAttribute::trackStatistics()
3779   void trackStatistics() const override {}
3780 
3781   /// Returns true if the function is assumed dead.
3782   bool isAssumedDead() const override { return false; }
3783 
3784   /// See AAIsDead::isKnownDead().
3785   bool isKnownDead() const override { return false; }
3786 
3787   /// See AAIsDead::isAssumedDead(BasicBlock *).
3788   bool isAssumedDead(const BasicBlock *BB) const override {
3789     assert(BB->getParent() == getAnchorScope() &&
3790            "BB must be in the same anchor scope function.");
3791 
3792     if (!getAssumed())
3793       return false;
3794     return !AssumedLiveBlocks.count(BB);
3795   }
3796 
3797   /// See AAIsDead::isKnownDead(BasicBlock *).
3798   bool isKnownDead(const BasicBlock *BB) const override {
3799     return getKnown() && isAssumedDead(BB);
3800   }
3801 
3802   /// See AAIsDead::isAssumed(Instruction *I).
3803   bool isAssumedDead(const Instruction *I) const override {
3804     assert(I->getParent()->getParent() == getAnchorScope() &&
3805            "Instruction must be in the same anchor scope function.");
3806 
3807     if (!getAssumed())
3808       return false;
3809 
3810     // If it is not in AssumedLiveBlocks then it for sure dead.
3811     // Otherwise, it can still be after noreturn call in a live block.
3812     if (!AssumedLiveBlocks.count(I->getParent()))
3813       return true;
3814 
3815     // If it is not after a liveness barrier it is live.
3816     const Instruction *PrevI = I->getPrevNode();
3817     while (PrevI) {
3818       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3819         return true;
3820       PrevI = PrevI->getPrevNode();
3821     }
3822     return false;
3823   }
3824 
3825   /// See AAIsDead::isKnownDead(Instruction *I).
3826   bool isKnownDead(const Instruction *I) const override {
3827     return getKnown() && isAssumedDead(I);
3828   }
3829 
3830   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3831   /// that internal function called from \p BB should now be looked at.
3832   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3833     if (!AssumedLiveBlocks.insert(&BB).second)
3834       return false;
3835 
3836     // We assume that all of BB is (probably) live now and if there are calls to
3837     // internal functions we will assume that those are now live as well. This
3838     // is a performance optimization for blocks with calls to a lot of internal
3839     // functions. It can however cause dead functions to be treated as live.
3840     for (const Instruction &I : BB)
3841       if (const auto *CB = dyn_cast<CallBase>(&I))
3842         if (const Function *F = CB->getCalledFunction())
3843           if (F->hasLocalLinkage())
3844             A.markLiveInternalFunction(*F);
3845     return true;
3846   }
3847 
3848   /// Collection of instructions that need to be explored again, e.g., we
3849   /// did assume they do not transfer control to (one of their) successors.
3850   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3851 
3852   /// Collection of instructions that are known to not transfer control.
3853   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3854 
3855   /// Collection of all assumed live edges
3856   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3857 
3858   /// Collection of all assumed live BasicBlocks.
3859   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3860 };
3861 
3862 static bool
3863 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3864                         AbstractAttribute &AA,
3865                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3866   const IRPosition &IPos = IRPosition::callsite_function(CB);
3867 
3868   const auto &NoReturnAA =
3869       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3870   if (NoReturnAA.isAssumedNoReturn())
3871     return !NoReturnAA.isKnownNoReturn();
3872   if (CB.isTerminator())
3873     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3874   else
3875     AliveSuccessors.push_back(CB.getNextNode());
3876   return false;
3877 }
3878 
3879 static bool
3880 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3881                         AbstractAttribute &AA,
3882                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3883   bool UsedAssumedInformation =
3884       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3885 
3886   // First, determine if we can change an invoke to a call assuming the
3887   // callee is nounwind. This is not possible if the personality of the
3888   // function allows to catch asynchronous exceptions.
3889   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3890     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3891   } else {
3892     const IRPosition &IPos = IRPosition::callsite_function(II);
3893     const auto &AANoUnw =
3894         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3895     if (AANoUnw.isAssumedNoUnwind()) {
3896       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3897     } else {
3898       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3899     }
3900   }
3901   return UsedAssumedInformation;
3902 }
3903 
3904 static bool
3905 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3906                         AbstractAttribute &AA,
3907                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3908   bool UsedAssumedInformation = false;
3909   if (BI.getNumSuccessors() == 1) {
3910     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3911   } else {
3912     Optional<Constant *> C =
3913         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3914     if (!C || isa_and_nonnull<UndefValue>(*C)) {
3915       // No value yet, assume both edges are dead.
3916     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3917       const BasicBlock *SuccBB =
3918           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3919       AliveSuccessors.push_back(&SuccBB->front());
3920     } else {
3921       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3922       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3923       UsedAssumedInformation = false;
3924     }
3925   }
3926   return UsedAssumedInformation;
3927 }
3928 
3929 static bool
3930 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3931                         AbstractAttribute &AA,
3932                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3933   bool UsedAssumedInformation = false;
3934   Optional<Constant *> C =
3935       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3936   if (!C || isa_and_nonnull<UndefValue>(C.getValue())) {
3937     // No value yet, assume all edges are dead.
3938   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3939     for (auto &CaseIt : SI.cases()) {
3940       if (CaseIt.getCaseValue() == C.getValue()) {
3941         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3942         return UsedAssumedInformation;
3943       }
3944     }
3945     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3946     return UsedAssumedInformation;
3947   } else {
3948     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3949       AliveSuccessors.push_back(&SuccBB->front());
3950   }
3951   return UsedAssumedInformation;
3952 }
3953 
3954 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3955   ChangeStatus Change = ChangeStatus::UNCHANGED;
3956 
3957   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3958                     << getAnchorScope()->size() << "] BBs and "
3959                     << ToBeExploredFrom.size() << " exploration points and "
3960                     << KnownDeadEnds.size() << " known dead ends\n");
3961 
3962   // Copy and clear the list of instructions we need to explore from. It is
3963   // refilled with instructions the next update has to look at.
3964   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3965                                                ToBeExploredFrom.end());
3966   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3967 
3968   SmallVector<const Instruction *, 8> AliveSuccessors;
3969   while (!Worklist.empty()) {
3970     const Instruction *I = Worklist.pop_back_val();
3971     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3972 
3973     // Fast forward for uninteresting instructions. We could look for UB here
3974     // though.
3975     while (!I->isTerminator() && !isa<CallBase>(I))
3976       I = I->getNextNode();
3977 
3978     AliveSuccessors.clear();
3979 
3980     bool UsedAssumedInformation = false;
3981     switch (I->getOpcode()) {
3982     // TODO: look for (assumed) UB to backwards propagate "deadness".
3983     default:
3984       assert(I->isTerminator() &&
3985              "Expected non-terminators to be handled already!");
3986       for (const BasicBlock *SuccBB : successors(I->getParent()))
3987         AliveSuccessors.push_back(&SuccBB->front());
3988       break;
3989     case Instruction::Call:
3990       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3991                                                        *this, AliveSuccessors);
3992       break;
3993     case Instruction::Invoke:
3994       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3995                                                        *this, AliveSuccessors);
3996       break;
3997     case Instruction::Br:
3998       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3999                                                        *this, AliveSuccessors);
4000       break;
4001     case Instruction::Switch:
4002       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
4003                                                        *this, AliveSuccessors);
4004       break;
4005     }
4006 
4007     if (UsedAssumedInformation) {
4008       NewToBeExploredFrom.insert(I);
4009     } else if (AliveSuccessors.empty() ||
4010                (I->isTerminator() &&
4011                 AliveSuccessors.size() < I->getNumSuccessors())) {
4012       if (KnownDeadEnds.insert(I))
4013         Change = ChangeStatus::CHANGED;
4014     }
4015 
4016     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
4017                       << AliveSuccessors.size() << " UsedAssumedInformation: "
4018                       << UsedAssumedInformation << "\n");
4019 
4020     for (const Instruction *AliveSuccessor : AliveSuccessors) {
4021       if (!I->isTerminator()) {
4022         assert(AliveSuccessors.size() == 1 &&
4023                "Non-terminator expected to have a single successor!");
4024         Worklist.push_back(AliveSuccessor);
4025       } else {
4026         // record the assumed live edge
4027         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
4028         if (AssumedLiveEdges.insert(Edge).second)
4029           Change = ChangeStatus::CHANGED;
4030         if (assumeLive(A, *AliveSuccessor->getParent()))
4031           Worklist.push_back(AliveSuccessor);
4032       }
4033     }
4034   }
4035 
4036   // Check if the content of ToBeExploredFrom changed, ignore the order.
4037   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
4038       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
4039         return !ToBeExploredFrom.count(I);
4040       })) {
4041     Change = ChangeStatus::CHANGED;
4042     ToBeExploredFrom = std::move(NewToBeExploredFrom);
4043   }
4044 
4045   // If we know everything is live there is no need to query for liveness.
4046   // Instead, indicating a pessimistic fixpoint will cause the state to be
4047   // "invalid" and all queries to be answered conservatively without lookups.
4048   // To be in this state we have to (1) finished the exploration and (3) not
4049   // discovered any non-trivial dead end and (2) not ruled unreachable code
4050   // dead.
4051   if (ToBeExploredFrom.empty() &&
4052       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
4053       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
4054         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
4055       }))
4056     return indicatePessimisticFixpoint();
4057   return Change;
4058 }
4059 
4060 /// Liveness information for a call sites.
4061 struct AAIsDeadCallSite final : AAIsDeadFunction {
4062   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4063       : AAIsDeadFunction(IRP, A) {}
4064 
4065   /// See AbstractAttribute::initialize(...).
4066   void initialize(Attributor &A) override {
4067     // TODO: Once we have call site specific value information we can provide
4068     //       call site specific liveness information and then it makes
4069     //       sense to specialize attributes for call sites instead of
4070     //       redirecting requests to the callee.
4071     llvm_unreachable("Abstract attributes for liveness are not "
4072                      "supported for call sites yet!");
4073   }
4074 
4075   /// See AbstractAttribute::updateImpl(...).
4076   ChangeStatus updateImpl(Attributor &A) override {
4077     return indicatePessimisticFixpoint();
4078   }
4079 
4080   /// See AbstractAttribute::trackStatistics()
4081   void trackStatistics() const override {}
4082 };
4083 } // namespace
4084 
4085 /// -------------------- Dereferenceable Argument Attribute --------------------
4086 
4087 namespace {
4088 struct AADereferenceableImpl : AADereferenceable {
4089   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4090       : AADereferenceable(IRP, A) {}
4091   using StateType = DerefState;
4092 
4093   /// See AbstractAttribute::initialize(...).
4094   void initialize(Attributor &A) override {
4095     Value &V = *getAssociatedValue().stripPointerCasts();
4096     SmallVector<Attribute, 4> Attrs;
4097     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4098              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4099     for (const Attribute &Attr : Attrs)
4100       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4101 
4102     const IRPosition &IRP = this->getIRPosition();
4103     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4104 
4105     bool CanBeNull, CanBeFreed;
4106     takeKnownDerefBytesMaximum(V.getPointerDereferenceableBytes(
4107         A.getDataLayout(), CanBeNull, CanBeFreed));
4108 
4109     bool IsFnInterface = IRP.isFnInterfaceKind();
4110     Function *FnScope = IRP.getAnchorScope();
4111     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4112       indicatePessimisticFixpoint();
4113       return;
4114     }
4115 
4116     if (Instruction *CtxI = getCtxI())
4117       followUsesInMBEC(*this, A, getState(), *CtxI);
4118   }
4119 
4120   /// See AbstractAttribute::getState()
4121   /// {
4122   StateType &getState() override { return *this; }
4123   const StateType &getState() const override { return *this; }
4124   /// }
4125 
4126   /// Helper function for collecting accessed bytes in must-be-executed-context
4127   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4128                               DerefState &State) {
4129     const Value *UseV = U->get();
4130     if (!UseV->getType()->isPointerTy())
4131       return;
4132 
4133     Optional<MemoryLocation> Loc = MemoryLocation::getOrNone(I);
4134     if (!Loc || Loc->Ptr != UseV || !Loc->Size.isPrecise() || I->isVolatile())
4135       return;
4136 
4137     int64_t Offset;
4138     const Value *Base = GetPointerBaseWithConstantOffset(
4139         Loc->Ptr, Offset, A.getDataLayout(), /*AllowNonInbounds*/ true);
4140     if (Base && Base == &getAssociatedValue())
4141       State.addAccessedBytes(Offset, Loc->Size.getValue());
4142   }
4143 
4144   /// See followUsesInMBEC
4145   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4146                        AADereferenceable::StateType &State) {
4147     bool IsNonNull = false;
4148     bool TrackUse = false;
4149     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4150         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4151     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4152                       << " for instruction " << *I << "\n");
4153 
4154     addAccessedBytesForUse(A, U, I, State);
4155     State.takeKnownDerefBytesMaximum(DerefBytes);
4156     return TrackUse;
4157   }
4158 
4159   /// See AbstractAttribute::manifest(...).
4160   ChangeStatus manifest(Attributor &A) override {
4161     ChangeStatus Change = AADereferenceable::manifest(A);
4162     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4163       removeAttrs({Attribute::DereferenceableOrNull});
4164       return ChangeStatus::CHANGED;
4165     }
4166     return Change;
4167   }
4168 
4169   void getDeducedAttributes(LLVMContext &Ctx,
4170                             SmallVectorImpl<Attribute> &Attrs) const override {
4171     // TODO: Add *_globally support
4172     if (isAssumedNonNull())
4173       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4174           Ctx, getAssumedDereferenceableBytes()));
4175     else
4176       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4177           Ctx, getAssumedDereferenceableBytes()));
4178   }
4179 
4180   /// See AbstractAttribute::getAsStr().
4181   const std::string getAsStr() const override {
4182     if (!getAssumedDereferenceableBytes())
4183       return "unknown-dereferenceable";
4184     return std::string("dereferenceable") +
4185            (isAssumedNonNull() ? "" : "_or_null") +
4186            (isAssumedGlobal() ? "_globally" : "") + "<" +
4187            std::to_string(getKnownDereferenceableBytes()) + "-" +
4188            std::to_string(getAssumedDereferenceableBytes()) + ">";
4189   }
4190 };
4191 
4192 /// Dereferenceable attribute for a floating value.
4193 struct AADereferenceableFloating : AADereferenceableImpl {
4194   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4195       : AADereferenceableImpl(IRP, A) {}
4196 
4197   /// See AbstractAttribute::updateImpl(...).
4198   ChangeStatus updateImpl(Attributor &A) override {
4199 
4200     bool Stripped;
4201     bool UsedAssumedInformation = false;
4202     SmallVector<AA::ValueAndContext> Values;
4203     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
4204                                       AA::AnyScope, UsedAssumedInformation)) {
4205       Values.push_back({getAssociatedValue(), getCtxI()});
4206       Stripped = false;
4207     } else {
4208       Stripped = Values.size() != 1 ||
4209                  Values.front().getValue() != &getAssociatedValue();
4210     }
4211 
4212     const DataLayout &DL = A.getDataLayout();
4213     DerefState T;
4214 
4215     auto VisitValueCB = [&](const Value &V) -> bool {
4216       unsigned IdxWidth =
4217           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4218       APInt Offset(IdxWidth, 0);
4219       const Value *Base = stripAndAccumulateOffsets(
4220           A, *this, &V, DL, Offset, /* GetMinOffset */ false,
4221           /* AllowNonInbounds */ true);
4222 
4223       const auto &AA = A.getAAFor<AADereferenceable>(
4224           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4225       int64_t DerefBytes = 0;
4226       if (!Stripped && this == &AA) {
4227         // Use IR information if we did not strip anything.
4228         // TODO: track globally.
4229         bool CanBeNull, CanBeFreed;
4230         DerefBytes =
4231             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4232         T.GlobalState.indicatePessimisticFixpoint();
4233       } else {
4234         const DerefState &DS = AA.getState();
4235         DerefBytes = DS.DerefBytesState.getAssumed();
4236         T.GlobalState &= DS.GlobalState;
4237       }
4238 
4239       // For now we do not try to "increase" dereferenceability due to negative
4240       // indices as we first have to come up with code to deal with loops and
4241       // for overflows of the dereferenceable bytes.
4242       int64_t OffsetSExt = Offset.getSExtValue();
4243       if (OffsetSExt < 0)
4244         OffsetSExt = 0;
4245 
4246       T.takeAssumedDerefBytesMinimum(
4247           std::max(int64_t(0), DerefBytes - OffsetSExt));
4248 
4249       if (this == &AA) {
4250         if (!Stripped) {
4251           // If nothing was stripped IR information is all we got.
4252           T.takeKnownDerefBytesMaximum(
4253               std::max(int64_t(0), DerefBytes - OffsetSExt));
4254           T.indicatePessimisticFixpoint();
4255         } else if (OffsetSExt > 0) {
4256           // If something was stripped but there is circular reasoning we look
4257           // for the offset. If it is positive we basically decrease the
4258           // dereferenceable bytes in a circluar loop now, which will simply
4259           // drive them down to the known value in a very slow way which we
4260           // can accelerate.
4261           T.indicatePessimisticFixpoint();
4262         }
4263       }
4264 
4265       return T.isValidState();
4266     };
4267 
4268     for (const auto &VAC : Values)
4269       if (!VisitValueCB(*VAC.getValue()))
4270         return indicatePessimisticFixpoint();
4271 
4272     return clampStateAndIndicateChange(getState(), T);
4273   }
4274 
4275   /// See AbstractAttribute::trackStatistics()
4276   void trackStatistics() const override {
4277     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4278   }
4279 };
4280 
4281 /// Dereferenceable attribute for a return value.
4282 struct AADereferenceableReturned final
4283     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4284   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4285       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4286             IRP, A) {}
4287 
4288   /// See AbstractAttribute::trackStatistics()
4289   void trackStatistics() const override {
4290     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4291   }
4292 };
4293 
4294 /// Dereferenceable attribute for an argument
4295 struct AADereferenceableArgument final
4296     : AAArgumentFromCallSiteArguments<AADereferenceable,
4297                                       AADereferenceableImpl> {
4298   using Base =
4299       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4300   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4301       : Base(IRP, A) {}
4302 
4303   /// See AbstractAttribute::trackStatistics()
4304   void trackStatistics() const override {
4305     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4306   }
4307 };
4308 
4309 /// Dereferenceable attribute for a call site argument.
4310 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4311   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4312       : AADereferenceableFloating(IRP, A) {}
4313 
4314   /// See AbstractAttribute::trackStatistics()
4315   void trackStatistics() const override {
4316     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4317   }
4318 };
4319 
4320 /// Dereferenceable attribute deduction for a call site return value.
4321 struct AADereferenceableCallSiteReturned final
4322     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4323   using Base =
4324       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4325   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4326       : Base(IRP, A) {}
4327 
4328   /// See AbstractAttribute::trackStatistics()
4329   void trackStatistics() const override {
4330     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4331   }
4332 };
4333 } // namespace
4334 
4335 // ------------------------ Align Argument Attribute ------------------------
4336 
4337 namespace {
4338 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4339                                     Value &AssociatedValue, const Use *U,
4340                                     const Instruction *I, bool &TrackUse) {
4341   // We need to follow common pointer manipulation uses to the accesses they
4342   // feed into.
4343   if (isa<CastInst>(I)) {
4344     // Follow all but ptr2int casts.
4345     TrackUse = !isa<PtrToIntInst>(I);
4346     return 0;
4347   }
4348   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4349     if (GEP->hasAllConstantIndices())
4350       TrackUse = true;
4351     return 0;
4352   }
4353 
4354   MaybeAlign MA;
4355   if (const auto *CB = dyn_cast<CallBase>(I)) {
4356     if (CB->isBundleOperand(U) || CB->isCallee(U))
4357       return 0;
4358 
4359     unsigned ArgNo = CB->getArgOperandNo(U);
4360     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4361     // As long as we only use known information there is no need to track
4362     // dependences here.
4363     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4364     MA = MaybeAlign(AlignAA.getKnownAlign());
4365   }
4366 
4367   const DataLayout &DL = A.getDataLayout();
4368   const Value *UseV = U->get();
4369   if (auto *SI = dyn_cast<StoreInst>(I)) {
4370     if (SI->getPointerOperand() == UseV)
4371       MA = SI->getAlign();
4372   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4373     if (LI->getPointerOperand() == UseV)
4374       MA = LI->getAlign();
4375   }
4376 
4377   if (!MA || *MA <= QueryingAA.getKnownAlign())
4378     return 0;
4379 
4380   unsigned Alignment = MA->value();
4381   int64_t Offset;
4382 
4383   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4384     if (Base == &AssociatedValue) {
4385       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4386       // So we can say that the maximum power of two which is a divisor of
4387       // gcd(Offset, Alignment) is an alignment.
4388 
4389       uint32_t gcd =
4390           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4391       Alignment = llvm::PowerOf2Floor(gcd);
4392     }
4393   }
4394 
4395   return Alignment;
4396 }
4397 
4398 struct AAAlignImpl : AAAlign {
4399   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4400 
4401   /// See AbstractAttribute::initialize(...).
4402   void initialize(Attributor &A) override {
4403     SmallVector<Attribute, 4> Attrs;
4404     getAttrs({Attribute::Alignment}, Attrs);
4405     for (const Attribute &Attr : Attrs)
4406       takeKnownMaximum(Attr.getValueAsInt());
4407 
4408     Value &V = *getAssociatedValue().stripPointerCasts();
4409     takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4410 
4411     if (getIRPosition().isFnInterfaceKind() &&
4412         (!getAnchorScope() ||
4413          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4414       indicatePessimisticFixpoint();
4415       return;
4416     }
4417 
4418     if (Instruction *CtxI = getCtxI())
4419       followUsesInMBEC(*this, A, getState(), *CtxI);
4420   }
4421 
4422   /// See AbstractAttribute::manifest(...).
4423   ChangeStatus manifest(Attributor &A) override {
4424     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4425 
4426     // Check for users that allow alignment annotations.
4427     Value &AssociatedValue = getAssociatedValue();
4428     for (const Use &U : AssociatedValue.uses()) {
4429       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4430         if (SI->getPointerOperand() == &AssociatedValue)
4431           if (SI->getAlign() < getAssumedAlign()) {
4432             STATS_DECLTRACK(AAAlign, Store,
4433                             "Number of times alignment added to a store");
4434             SI->setAlignment(getAssumedAlign());
4435             LoadStoreChanged = ChangeStatus::CHANGED;
4436           }
4437       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4438         if (LI->getPointerOperand() == &AssociatedValue)
4439           if (LI->getAlign() < getAssumedAlign()) {
4440             LI->setAlignment(getAssumedAlign());
4441             STATS_DECLTRACK(AAAlign, Load,
4442                             "Number of times alignment added to a load");
4443             LoadStoreChanged = ChangeStatus::CHANGED;
4444           }
4445       }
4446     }
4447 
4448     ChangeStatus Changed = AAAlign::manifest(A);
4449 
4450     Align InheritAlign =
4451         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4452     if (InheritAlign >= getAssumedAlign())
4453       return LoadStoreChanged;
4454     return Changed | LoadStoreChanged;
4455   }
4456 
4457   // TODO: Provide a helper to determine the implied ABI alignment and check in
4458   //       the existing manifest method and a new one for AAAlignImpl that value
4459   //       to avoid making the alignment explicit if it did not improve.
4460 
4461   /// See AbstractAttribute::getDeducedAttributes
4462   virtual void
4463   getDeducedAttributes(LLVMContext &Ctx,
4464                        SmallVectorImpl<Attribute> &Attrs) const override {
4465     if (getAssumedAlign() > 1)
4466       Attrs.emplace_back(
4467           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4468   }
4469 
4470   /// See followUsesInMBEC
4471   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4472                        AAAlign::StateType &State) {
4473     bool TrackUse = false;
4474 
4475     unsigned int KnownAlign =
4476         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4477     State.takeKnownMaximum(KnownAlign);
4478 
4479     return TrackUse;
4480   }
4481 
4482   /// See AbstractAttribute::getAsStr().
4483   const std::string getAsStr() const override {
4484     return "align<" + std::to_string(getKnownAlign().value()) + "-" +
4485            std::to_string(getAssumedAlign().value()) + ">";
4486   }
4487 };
4488 
4489 /// Align attribute for a floating value.
4490 struct AAAlignFloating : AAAlignImpl {
4491   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4492 
4493   /// See AbstractAttribute::updateImpl(...).
4494   ChangeStatus updateImpl(Attributor &A) override {
4495     const DataLayout &DL = A.getDataLayout();
4496 
4497     bool Stripped;
4498     bool UsedAssumedInformation = false;
4499     SmallVector<AA::ValueAndContext> Values;
4500     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
4501                                       AA::AnyScope, UsedAssumedInformation)) {
4502       Values.push_back({getAssociatedValue(), getCtxI()});
4503       Stripped = false;
4504     } else {
4505       Stripped = Values.size() != 1 ||
4506                  Values.front().getValue() != &getAssociatedValue();
4507     }
4508 
4509     StateType T;
4510     auto VisitValueCB = [&](Value &V) -> bool {
4511       if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V))
4512         return true;
4513       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4514                                            DepClassTy::REQUIRED);
4515       if (!Stripped && this == &AA) {
4516         int64_t Offset;
4517         unsigned Alignment = 1;
4518         if (const Value *Base =
4519                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4520           // TODO: Use AAAlign for the base too.
4521           Align PA = Base->getPointerAlignment(DL);
4522           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4523           // So we can say that the maximum power of two which is a divisor of
4524           // gcd(Offset, Alignment) is an alignment.
4525 
4526           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4527                                                uint32_t(PA.value()));
4528           Alignment = llvm::PowerOf2Floor(gcd);
4529         } else {
4530           Alignment = V.getPointerAlignment(DL).value();
4531         }
4532         // Use only IR information if we did not strip anything.
4533         T.takeKnownMaximum(Alignment);
4534         T.indicatePessimisticFixpoint();
4535       } else {
4536         // Use abstract attribute information.
4537         const AAAlign::StateType &DS = AA.getState();
4538         T ^= DS;
4539       }
4540       return T.isValidState();
4541     };
4542 
4543     for (const auto &VAC : Values) {
4544       if (!VisitValueCB(*VAC.getValue()))
4545         return indicatePessimisticFixpoint();
4546     }
4547 
4548     //  TODO: If we know we visited all incoming values, thus no are assumed
4549     //  dead, we can take the known information from the state T.
4550     return clampStateAndIndicateChange(getState(), T);
4551   }
4552 
4553   /// See AbstractAttribute::trackStatistics()
4554   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4555 };
4556 
4557 /// Align attribute for function return value.
4558 struct AAAlignReturned final
4559     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4560   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4561   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4562 
4563   /// See AbstractAttribute::initialize(...).
4564   void initialize(Attributor &A) override {
4565     Base::initialize(A);
4566     Function *F = getAssociatedFunction();
4567     if (!F || F->isDeclaration())
4568       indicatePessimisticFixpoint();
4569   }
4570 
4571   /// See AbstractAttribute::trackStatistics()
4572   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4573 };
4574 
4575 /// Align attribute for function argument.
4576 struct AAAlignArgument final
4577     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4578   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4579   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4580 
4581   /// See AbstractAttribute::manifest(...).
4582   ChangeStatus manifest(Attributor &A) override {
4583     // If the associated argument is involved in a must-tail call we give up
4584     // because we would need to keep the argument alignments of caller and
4585     // callee in-sync. Just does not seem worth the trouble right now.
4586     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4587       return ChangeStatus::UNCHANGED;
4588     return Base::manifest(A);
4589   }
4590 
4591   /// See AbstractAttribute::trackStatistics()
4592   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4593 };
4594 
4595 struct AAAlignCallSiteArgument final : AAAlignFloating {
4596   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4597       : AAAlignFloating(IRP, A) {}
4598 
4599   /// See AbstractAttribute::manifest(...).
4600   ChangeStatus manifest(Attributor &A) override {
4601     // If the associated argument is involved in a must-tail call we give up
4602     // because we would need to keep the argument alignments of caller and
4603     // callee in-sync. Just does not seem worth the trouble right now.
4604     if (Argument *Arg = getAssociatedArgument())
4605       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4606         return ChangeStatus::UNCHANGED;
4607     ChangeStatus Changed = AAAlignImpl::manifest(A);
4608     Align InheritAlign =
4609         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4610     if (InheritAlign >= getAssumedAlign())
4611       Changed = ChangeStatus::UNCHANGED;
4612     return Changed;
4613   }
4614 
4615   /// See AbstractAttribute::updateImpl(Attributor &A).
4616   ChangeStatus updateImpl(Attributor &A) override {
4617     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4618     if (Argument *Arg = getAssociatedArgument()) {
4619       // We only take known information from the argument
4620       // so we do not need to track a dependence.
4621       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4622           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4623       takeKnownMaximum(ArgAlignAA.getKnownAlign().value());
4624     }
4625     return Changed;
4626   }
4627 
4628   /// See AbstractAttribute::trackStatistics()
4629   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4630 };
4631 
4632 /// Align attribute deduction for a call site return value.
4633 struct AAAlignCallSiteReturned final
4634     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4635   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4636   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4637       : Base(IRP, A) {}
4638 
4639   /// See AbstractAttribute::initialize(...).
4640   void initialize(Attributor &A) override {
4641     Base::initialize(A);
4642     Function *F = getAssociatedFunction();
4643     if (!F || F->isDeclaration())
4644       indicatePessimisticFixpoint();
4645   }
4646 
4647   /// See AbstractAttribute::trackStatistics()
4648   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4649 };
4650 } // namespace
4651 
4652 /// ------------------ Function No-Return Attribute ----------------------------
4653 namespace {
4654 struct AANoReturnImpl : public AANoReturn {
4655   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4656 
4657   /// See AbstractAttribute::initialize(...).
4658   void initialize(Attributor &A) override {
4659     AANoReturn::initialize(A);
4660     Function *F = getAssociatedFunction();
4661     if (!F || F->isDeclaration())
4662       indicatePessimisticFixpoint();
4663   }
4664 
4665   /// See AbstractAttribute::getAsStr().
4666   const std::string getAsStr() const override {
4667     return getAssumed() ? "noreturn" : "may-return";
4668   }
4669 
4670   /// See AbstractAttribute::updateImpl(Attributor &A).
4671   virtual ChangeStatus updateImpl(Attributor &A) override {
4672     auto CheckForNoReturn = [](Instruction &) { return false; };
4673     bool UsedAssumedInformation = false;
4674     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4675                                    {(unsigned)Instruction::Ret},
4676                                    UsedAssumedInformation))
4677       return indicatePessimisticFixpoint();
4678     return ChangeStatus::UNCHANGED;
4679   }
4680 };
4681 
4682 struct AANoReturnFunction final : AANoReturnImpl {
4683   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4684       : AANoReturnImpl(IRP, A) {}
4685 
4686   /// See AbstractAttribute::trackStatistics()
4687   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4688 };
4689 
4690 /// NoReturn attribute deduction for a call sites.
4691 struct AANoReturnCallSite final : AANoReturnImpl {
4692   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4693       : AANoReturnImpl(IRP, A) {}
4694 
4695   /// See AbstractAttribute::initialize(...).
4696   void initialize(Attributor &A) override {
4697     AANoReturnImpl::initialize(A);
4698     if (Function *F = getAssociatedFunction()) {
4699       const IRPosition &FnPos = IRPosition::function(*F);
4700       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4701       if (!FnAA.isAssumedNoReturn())
4702         indicatePessimisticFixpoint();
4703     }
4704   }
4705 
4706   /// See AbstractAttribute::updateImpl(...).
4707   ChangeStatus updateImpl(Attributor &A) override {
4708     // TODO: Once we have call site specific value information we can provide
4709     //       call site specific liveness information and then it makes
4710     //       sense to specialize attributes for call sites arguments instead of
4711     //       redirecting requests to the callee argument.
4712     Function *F = getAssociatedFunction();
4713     const IRPosition &FnPos = IRPosition::function(*F);
4714     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4715     return clampStateAndIndicateChange(getState(), FnAA.getState());
4716   }
4717 
4718   /// See AbstractAttribute::trackStatistics()
4719   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4720 };
4721 } // namespace
4722 
4723 /// ----------------------- Instance Info ---------------------------------
4724 
4725 namespace {
4726 /// A class to hold the state of for no-capture attributes.
4727 struct AAInstanceInfoImpl : public AAInstanceInfo {
4728   AAInstanceInfoImpl(const IRPosition &IRP, Attributor &A)
4729       : AAInstanceInfo(IRP, A) {}
4730 
4731   /// See AbstractAttribute::initialize(...).
4732   void initialize(Attributor &A) override {
4733     Value &V = getAssociatedValue();
4734     if (auto *C = dyn_cast<Constant>(&V)) {
4735       if (C->isThreadDependent())
4736         indicatePessimisticFixpoint();
4737       else
4738         indicateOptimisticFixpoint();
4739       return;
4740     }
4741     if (auto *CB = dyn_cast<CallBase>(&V))
4742       if (CB->arg_size() == 0 && !CB->mayHaveSideEffects() &&
4743           !CB->mayReadFromMemory()) {
4744         indicateOptimisticFixpoint();
4745         return;
4746       }
4747   }
4748 
4749   /// See AbstractAttribute::updateImpl(...).
4750   ChangeStatus updateImpl(Attributor &A) override {
4751     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4752 
4753     Value &V = getAssociatedValue();
4754     const Function *Scope = nullptr;
4755     if (auto *I = dyn_cast<Instruction>(&V))
4756       Scope = I->getFunction();
4757     if (auto *A = dyn_cast<Argument>(&V)) {
4758       Scope = A->getParent();
4759       if (!Scope->hasLocalLinkage())
4760         return Changed;
4761     }
4762     if (!Scope)
4763       return indicateOptimisticFixpoint();
4764 
4765     auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
4766         *this, IRPosition::function(*Scope), DepClassTy::OPTIONAL);
4767     if (NoRecurseAA.isAssumedNoRecurse())
4768       return Changed;
4769 
4770     auto UsePred = [&](const Use &U, bool &Follow) {
4771       const Instruction *UserI = dyn_cast<Instruction>(U.getUser());
4772       if (!UserI || isa<GetElementPtrInst>(UserI) || isa<CastInst>(UserI) ||
4773           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4774         Follow = true;
4775         return true;
4776       }
4777       if (isa<LoadInst>(UserI) || isa<CmpInst>(UserI) ||
4778           (isa<StoreInst>(UserI) &&
4779            cast<StoreInst>(UserI)->getValueOperand() != U.get()))
4780         return true;
4781       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4782         // This check is not guaranteeing uniqueness but for now that we cannot
4783         // end up with two versions of \p U thinking it was one.
4784         if (!CB->getCalledFunction() ||
4785             !CB->getCalledFunction()->hasLocalLinkage())
4786           return true;
4787         if (!CB->isArgOperand(&U))
4788           return false;
4789         const auto &ArgInstanceInfoAA = A.getAAFor<AAInstanceInfo>(
4790             *this, IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)),
4791             DepClassTy::OPTIONAL);
4792         if (!ArgInstanceInfoAA.isAssumedUniqueForAnalysis())
4793           return false;
4794         // If this call base might reach the scope again we might forward the
4795         // argument back here. This is very conservative.
4796         if (AA::isPotentiallyReachable(A, *CB, *Scope, *this, nullptr))
4797           return false;
4798         return true;
4799       }
4800       return false;
4801     };
4802 
4803     auto EquivalentUseCB = [&](const Use &OldU, const Use &NewU) {
4804       if (auto *SI = dyn_cast<StoreInst>(OldU.getUser())) {
4805         auto *Ptr = SI->getPointerOperand()->stripPointerCasts();
4806         if (isa<AllocaInst>(Ptr) && AA::isDynamicallyUnique(A, *this, *Ptr))
4807           return true;
4808         auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(
4809             *SI->getFunction());
4810         if (isAllocationFn(Ptr, TLI) && AA::isDynamicallyUnique(A, *this, *Ptr))
4811           return true;
4812       }
4813       return false;
4814     };
4815 
4816     if (!A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ true,
4817                            DepClassTy::OPTIONAL,
4818                            /* IgnoreDroppableUses */ true, EquivalentUseCB))
4819       return indicatePessimisticFixpoint();
4820 
4821     return Changed;
4822   }
4823 
4824   /// See AbstractState::getAsStr().
4825   const std::string getAsStr() const override {
4826     return isAssumedUniqueForAnalysis() ? "<unique [fAa]>" : "<unknown>";
4827   }
4828 
4829   /// See AbstractAttribute::trackStatistics()
4830   void trackStatistics() const override {}
4831 };
4832 
4833 /// InstanceInfo attribute for floating values.
4834 struct AAInstanceInfoFloating : AAInstanceInfoImpl {
4835   AAInstanceInfoFloating(const IRPosition &IRP, Attributor &A)
4836       : AAInstanceInfoImpl(IRP, A) {}
4837 };
4838 
4839 /// NoCapture attribute for function arguments.
4840 struct AAInstanceInfoArgument final : AAInstanceInfoFloating {
4841   AAInstanceInfoArgument(const IRPosition &IRP, Attributor &A)
4842       : AAInstanceInfoFloating(IRP, A) {}
4843 };
4844 
4845 /// InstanceInfo attribute for call site arguments.
4846 struct AAInstanceInfoCallSiteArgument final : AAInstanceInfoImpl {
4847   AAInstanceInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
4848       : AAInstanceInfoImpl(IRP, A) {}
4849 
4850   /// See AbstractAttribute::updateImpl(...).
4851   ChangeStatus updateImpl(Attributor &A) override {
4852     // TODO: Once we have call site specific value information we can provide
4853     //       call site specific liveness information and then it makes
4854     //       sense to specialize attributes for call sites arguments instead of
4855     //       redirecting requests to the callee argument.
4856     Argument *Arg = getAssociatedArgument();
4857     if (!Arg)
4858       return indicatePessimisticFixpoint();
4859     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4860     auto &ArgAA =
4861         A.getAAFor<AAInstanceInfo>(*this, ArgPos, DepClassTy::REQUIRED);
4862     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4863   }
4864 };
4865 
4866 /// InstanceInfo attribute for function return value.
4867 struct AAInstanceInfoReturned final : AAInstanceInfoImpl {
4868   AAInstanceInfoReturned(const IRPosition &IRP, Attributor &A)
4869       : AAInstanceInfoImpl(IRP, A) {
4870     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4871   }
4872 
4873   /// See AbstractAttribute::initialize(...).
4874   void initialize(Attributor &A) override {
4875     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4876   }
4877 
4878   /// See AbstractAttribute::updateImpl(...).
4879   ChangeStatus updateImpl(Attributor &A) override {
4880     llvm_unreachable("InstanceInfo is not applicable to function returns!");
4881   }
4882 };
4883 
4884 /// InstanceInfo attribute deduction for a call site return value.
4885 struct AAInstanceInfoCallSiteReturned final : AAInstanceInfoFloating {
4886   AAInstanceInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
4887       : AAInstanceInfoFloating(IRP, A) {}
4888 };
4889 } // namespace
4890 
4891 /// ----------------------- Variable Capturing ---------------------------------
4892 
4893 namespace {
4894 /// A class to hold the state of for no-capture attributes.
4895 struct AANoCaptureImpl : public AANoCapture {
4896   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4897 
4898   /// See AbstractAttribute::initialize(...).
4899   void initialize(Attributor &A) override {
4900     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4901       indicateOptimisticFixpoint();
4902       return;
4903     }
4904     Function *AnchorScope = getAnchorScope();
4905     if (isFnInterfaceKind() &&
4906         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4907       indicatePessimisticFixpoint();
4908       return;
4909     }
4910 
4911     // You cannot "capture" null in the default address space.
4912     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4913         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4914       indicateOptimisticFixpoint();
4915       return;
4916     }
4917 
4918     const Function *F =
4919         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4920 
4921     // Check what state the associated function can actually capture.
4922     if (F)
4923       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4924     else
4925       indicatePessimisticFixpoint();
4926   }
4927 
4928   /// See AbstractAttribute::updateImpl(...).
4929   ChangeStatus updateImpl(Attributor &A) override;
4930 
4931   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4932   virtual void
4933   getDeducedAttributes(LLVMContext &Ctx,
4934                        SmallVectorImpl<Attribute> &Attrs) const override {
4935     if (!isAssumedNoCaptureMaybeReturned())
4936       return;
4937 
4938     if (isArgumentPosition()) {
4939       if (isAssumedNoCapture())
4940         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4941       else if (ManifestInternal)
4942         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4943     }
4944   }
4945 
4946   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4947   /// depending on the ability of the function associated with \p IRP to capture
4948   /// state in memory and through "returning/throwing", respectively.
4949   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4950                                                    const Function &F,
4951                                                    BitIntegerState &State) {
4952     // TODO: Once we have memory behavior attributes we should use them here.
4953 
4954     // If we know we cannot communicate or write to memory, we do not care about
4955     // ptr2int anymore.
4956     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4957         F.getReturnType()->isVoidTy()) {
4958       State.addKnownBits(NO_CAPTURE);
4959       return;
4960     }
4961 
4962     // A function cannot capture state in memory if it only reads memory, it can
4963     // however return/throw state and the state might be influenced by the
4964     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4965     if (F.onlyReadsMemory())
4966       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4967 
4968     // A function cannot communicate state back if it does not through
4969     // exceptions and doesn not return values.
4970     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4971       State.addKnownBits(NOT_CAPTURED_IN_RET);
4972 
4973     // Check existing "returned" attributes.
4974     int ArgNo = IRP.getCalleeArgNo();
4975     if (F.doesNotThrow() && ArgNo >= 0) {
4976       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4977         if (F.hasParamAttribute(u, Attribute::Returned)) {
4978           if (u == unsigned(ArgNo))
4979             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4980           else if (F.onlyReadsMemory())
4981             State.addKnownBits(NO_CAPTURE);
4982           else
4983             State.addKnownBits(NOT_CAPTURED_IN_RET);
4984           break;
4985         }
4986     }
4987   }
4988 
4989   /// See AbstractState::getAsStr().
4990   const std::string getAsStr() const override {
4991     if (isKnownNoCapture())
4992       return "known not-captured";
4993     if (isAssumedNoCapture())
4994       return "assumed not-captured";
4995     if (isKnownNoCaptureMaybeReturned())
4996       return "known not-captured-maybe-returned";
4997     if (isAssumedNoCaptureMaybeReturned())
4998       return "assumed not-captured-maybe-returned";
4999     return "assumed-captured";
5000   }
5001 
5002   /// Check the use \p U and update \p State accordingly. Return true if we
5003   /// should continue to update the state.
5004   bool checkUse(Attributor &A, AANoCapture::StateType &State, const Use &U,
5005                 bool &Follow) {
5006     Instruction *UInst = cast<Instruction>(U.getUser());
5007     LLVM_DEBUG(dbgs() << "[AANoCapture] Check use: " << *U.get() << " in "
5008                       << *UInst << "\n");
5009 
5010     // Deal with ptr2int by following uses.
5011     if (isa<PtrToIntInst>(UInst)) {
5012       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
5013       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5014                           /* Return */ true);
5015     }
5016 
5017     // For stores we already checked if we can follow them, if they make it
5018     // here we give up.
5019     if (isa<StoreInst>(UInst))
5020       return isCapturedIn(State, /* Memory */ true, /* Integer */ false,
5021                           /* Return */ false);
5022 
5023     // Explicitly catch return instructions.
5024     if (isa<ReturnInst>(UInst)) {
5025       if (UInst->getFunction() == getAnchorScope())
5026         return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5027                             /* Return */ true);
5028       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5029                           /* Return */ true);
5030     }
5031 
5032     // For now we only use special logic for call sites. However, the tracker
5033     // itself knows about a lot of other non-capturing cases already.
5034     auto *CB = dyn_cast<CallBase>(UInst);
5035     if (!CB || !CB->isArgOperand(&U))
5036       return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5037                           /* Return */ true);
5038 
5039     unsigned ArgNo = CB->getArgOperandNo(&U);
5040     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
5041     // If we have a abstract no-capture attribute for the argument we can use
5042     // it to justify a non-capture attribute here. This allows recursion!
5043     auto &ArgNoCaptureAA =
5044         A.getAAFor<AANoCapture>(*this, CSArgPos, DepClassTy::REQUIRED);
5045     if (ArgNoCaptureAA.isAssumedNoCapture())
5046       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5047                           /* Return */ false);
5048     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5049       Follow = true;
5050       return isCapturedIn(State, /* Memory */ false, /* Integer */ false,
5051                           /* Return */ false);
5052     }
5053 
5054     // Lastly, we could not find a reason no-capture can be assumed so we don't.
5055     return isCapturedIn(State, /* Memory */ true, /* Integer */ true,
5056                         /* Return */ true);
5057   }
5058 
5059   /// Update \p State according to \p CapturedInMem, \p CapturedInInt, and
5060   /// \p CapturedInRet, then return true if we should continue updating the
5061   /// state.
5062   static bool isCapturedIn(AANoCapture::StateType &State, bool CapturedInMem,
5063                            bool CapturedInInt, bool CapturedInRet) {
5064     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
5065                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
5066     if (CapturedInMem)
5067       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
5068     if (CapturedInInt)
5069       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
5070     if (CapturedInRet)
5071       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
5072     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
5073   }
5074 };
5075 
5076 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
5077   const IRPosition &IRP = getIRPosition();
5078   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
5079                                   : &IRP.getAssociatedValue();
5080   if (!V)
5081     return indicatePessimisticFixpoint();
5082 
5083   const Function *F =
5084       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
5085   assert(F && "Expected a function!");
5086   const IRPosition &FnPos = IRPosition::function(*F);
5087 
5088   AANoCapture::StateType T;
5089 
5090   // Readonly means we cannot capture through memory.
5091   bool IsKnown;
5092   if (AA::isAssumedReadOnly(A, FnPos, *this, IsKnown)) {
5093     T.addKnownBits(NOT_CAPTURED_IN_MEM);
5094     if (IsKnown)
5095       addKnownBits(NOT_CAPTURED_IN_MEM);
5096   }
5097 
5098   // Make sure all returned values are different than the underlying value.
5099   // TODO: we could do this in a more sophisticated way inside
5100   //       AAReturnedValues, e.g., track all values that escape through returns
5101   //       directly somehow.
5102   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
5103     if (!RVAA.getState().isValidState())
5104       return false;
5105     bool SeenConstant = false;
5106     for (auto &It : RVAA.returned_values()) {
5107       if (isa<Constant>(It.first)) {
5108         if (SeenConstant)
5109           return false;
5110         SeenConstant = true;
5111       } else if (!isa<Argument>(It.first) ||
5112                  It.first == getAssociatedArgument())
5113         return false;
5114     }
5115     return true;
5116   };
5117 
5118   const auto &NoUnwindAA =
5119       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
5120   if (NoUnwindAA.isAssumedNoUnwind()) {
5121     bool IsVoidTy = F->getReturnType()->isVoidTy();
5122     const AAReturnedValues *RVAA =
5123         IsVoidTy ? nullptr
5124                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
5125 
5126                                                  DepClassTy::OPTIONAL);
5127     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
5128       T.addKnownBits(NOT_CAPTURED_IN_RET);
5129       if (T.isKnown(NOT_CAPTURED_IN_MEM))
5130         return ChangeStatus::UNCHANGED;
5131       if (NoUnwindAA.isKnownNoUnwind() &&
5132           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
5133         addKnownBits(NOT_CAPTURED_IN_RET);
5134         if (isKnown(NOT_CAPTURED_IN_MEM))
5135           return indicateOptimisticFixpoint();
5136       }
5137     }
5138   }
5139 
5140   auto IsDereferenceableOrNull = [&](Value *O, const DataLayout &DL) {
5141     const auto &DerefAA = A.getAAFor<AADereferenceable>(
5142         *this, IRPosition::value(*O), DepClassTy::OPTIONAL);
5143     return DerefAA.getAssumedDereferenceableBytes();
5144   };
5145 
5146   auto UseCheck = [&](const Use &U, bool &Follow) -> bool {
5147     switch (DetermineUseCaptureKind(U, IsDereferenceableOrNull)) {
5148     case UseCaptureKind::NO_CAPTURE:
5149       return true;
5150     case UseCaptureKind::MAY_CAPTURE:
5151       return checkUse(A, T, U, Follow);
5152     case UseCaptureKind::PASSTHROUGH:
5153       Follow = true;
5154       return true;
5155     }
5156     llvm_unreachable("Unexpected use capture kind!");
5157   };
5158 
5159   if (!A.checkForAllUses(UseCheck, *this, *V))
5160     return indicatePessimisticFixpoint();
5161 
5162   AANoCapture::StateType &S = getState();
5163   auto Assumed = S.getAssumed();
5164   S.intersectAssumedBits(T.getAssumed());
5165   if (!isAssumedNoCaptureMaybeReturned())
5166     return indicatePessimisticFixpoint();
5167   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5168                                    : ChangeStatus::CHANGED;
5169 }
5170 
5171 /// NoCapture attribute for function arguments.
5172 struct AANoCaptureArgument final : AANoCaptureImpl {
5173   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5174       : AANoCaptureImpl(IRP, A) {}
5175 
5176   /// See AbstractAttribute::trackStatistics()
5177   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5178 };
5179 
5180 /// NoCapture attribute for call site arguments.
5181 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5182   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5183       : AANoCaptureImpl(IRP, A) {}
5184 
5185   /// See AbstractAttribute::initialize(...).
5186   void initialize(Attributor &A) override {
5187     if (Argument *Arg = getAssociatedArgument())
5188       if (Arg->hasByValAttr())
5189         indicateOptimisticFixpoint();
5190     AANoCaptureImpl::initialize(A);
5191   }
5192 
5193   /// See AbstractAttribute::updateImpl(...).
5194   ChangeStatus updateImpl(Attributor &A) override {
5195     // TODO: Once we have call site specific value information we can provide
5196     //       call site specific liveness information and then it makes
5197     //       sense to specialize attributes for call sites arguments instead of
5198     //       redirecting requests to the callee argument.
5199     Argument *Arg = getAssociatedArgument();
5200     if (!Arg)
5201       return indicatePessimisticFixpoint();
5202     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5203     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5204     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5205   }
5206 
5207   /// See AbstractAttribute::trackStatistics()
5208   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5209 };
5210 
5211 /// NoCapture attribute for floating values.
5212 struct AANoCaptureFloating final : AANoCaptureImpl {
5213   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5214       : AANoCaptureImpl(IRP, A) {}
5215 
5216   /// See AbstractAttribute::trackStatistics()
5217   void trackStatistics() const override {
5218     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5219   }
5220 };
5221 
5222 /// NoCapture attribute for function return value.
5223 struct AANoCaptureReturned final : AANoCaptureImpl {
5224   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5225       : AANoCaptureImpl(IRP, A) {
5226     llvm_unreachable("NoCapture is not applicable to function returns!");
5227   }
5228 
5229   /// See AbstractAttribute::initialize(...).
5230   void initialize(Attributor &A) override {
5231     llvm_unreachable("NoCapture is not applicable to function returns!");
5232   }
5233 
5234   /// See AbstractAttribute::updateImpl(...).
5235   ChangeStatus updateImpl(Attributor &A) override {
5236     llvm_unreachable("NoCapture is not applicable to function returns!");
5237   }
5238 
5239   /// See AbstractAttribute::trackStatistics()
5240   void trackStatistics() const override {}
5241 };
5242 
5243 /// NoCapture attribute deduction for a call site return value.
5244 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5245   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5246       : AANoCaptureImpl(IRP, A) {}
5247 
5248   /// See AbstractAttribute::initialize(...).
5249   void initialize(Attributor &A) override {
5250     const Function *F = getAnchorScope();
5251     // Check what state the associated function can actually capture.
5252     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5253   }
5254 
5255   /// See AbstractAttribute::trackStatistics()
5256   void trackStatistics() const override {
5257     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5258   }
5259 };
5260 } // namespace
5261 
5262 /// ------------------ Value Simplify Attribute ----------------------------
5263 
5264 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5265   // FIXME: Add a typecast support.
5266   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5267       SimplifiedAssociatedValue, Other, Ty);
5268   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5269     return false;
5270 
5271   LLVM_DEBUG({
5272     if (SimplifiedAssociatedValue)
5273       dbgs() << "[ValueSimplify] is assumed to be "
5274              << **SimplifiedAssociatedValue << "\n";
5275     else
5276       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5277   });
5278   return true;
5279 }
5280 
5281 namespace {
5282 struct AAValueSimplifyImpl : AAValueSimplify {
5283   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5284       : AAValueSimplify(IRP, A) {}
5285 
5286   /// See AbstractAttribute::initialize(...).
5287   void initialize(Attributor &A) override {
5288     if (getAssociatedValue().getType()->isVoidTy())
5289       indicatePessimisticFixpoint();
5290     if (A.hasSimplificationCallback(getIRPosition()))
5291       indicatePessimisticFixpoint();
5292   }
5293 
5294   /// See AbstractAttribute::getAsStr().
5295   const std::string getAsStr() const override {
5296     LLVM_DEBUG({
5297       dbgs() << "SAV: " << (bool)SimplifiedAssociatedValue << " ";
5298       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5299         dbgs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5300     });
5301     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5302                           : "not-simple";
5303   }
5304 
5305   /// See AbstractAttribute::trackStatistics()
5306   void trackStatistics() const override {}
5307 
5308   /// See AAValueSimplify::getAssumedSimplifiedValue()
5309   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5310     return SimplifiedAssociatedValue;
5311   }
5312 
5313   /// Ensure the return value is \p V with type \p Ty, if not possible return
5314   /// nullptr. If \p Check is true we will only verify such an operation would
5315   /// suceed and return a non-nullptr value if that is the case. No IR is
5316   /// generated or modified.
5317   static Value *ensureType(Attributor &A, Value &V, Type &Ty, Instruction *CtxI,
5318                            bool Check) {
5319     if (auto *TypedV = AA::getWithType(V, Ty))
5320       return TypedV;
5321     if (CtxI && V.getType()->canLosslesslyBitCastTo(&Ty))
5322       return Check ? &V
5323                    : BitCastInst::CreatePointerBitCastOrAddrSpaceCast(&V, &Ty,
5324                                                                       "", CtxI);
5325     return nullptr;
5326   }
5327 
5328   /// Reproduce \p I with type \p Ty or return nullptr if that is not posisble.
5329   /// If \p Check is true we will only verify such an operation would suceed and
5330   /// return a non-nullptr value if that is the case. No IR is generated or
5331   /// modified.
5332   static Value *reproduceInst(Attributor &A,
5333                               const AbstractAttribute &QueryingAA,
5334                               Instruction &I, Type &Ty, Instruction *CtxI,
5335                               bool Check, ValueToValueMapTy &VMap) {
5336     assert(CtxI && "Cannot reproduce an instruction without context!");
5337     if (Check && (I.mayReadFromMemory() ||
5338                   !isSafeToSpeculativelyExecute(&I, CtxI, /* DT */ nullptr,
5339                                                 /* TLI */ nullptr)))
5340       return nullptr;
5341     for (Value *Op : I.operands()) {
5342       Value *NewOp = reproduceValue(A, QueryingAA, *Op, Ty, CtxI, Check, VMap);
5343       if (!NewOp) {
5344         assert(Check && "Manifest of new value unexpectedly failed!");
5345         return nullptr;
5346       }
5347       if (!Check)
5348         VMap[Op] = NewOp;
5349     }
5350     if (Check)
5351       return &I;
5352 
5353     Instruction *CloneI = I.clone();
5354     // TODO: Try to salvage debug information here.
5355     CloneI->setDebugLoc(DebugLoc());
5356     VMap[&I] = CloneI;
5357     CloneI->insertBefore(CtxI);
5358     RemapInstruction(CloneI, VMap);
5359     return CloneI;
5360   }
5361 
5362   /// Reproduce \p V with type \p Ty or return nullptr if that is not posisble.
5363   /// If \p Check is true we will only verify such an operation would suceed and
5364   /// return a non-nullptr value if that is the case. No IR is generated or
5365   /// modified.
5366   static Value *reproduceValue(Attributor &A,
5367                                const AbstractAttribute &QueryingAA, Value &V,
5368                                Type &Ty, Instruction *CtxI, bool Check,
5369                                ValueToValueMapTy &VMap) {
5370     if (const auto &NewV = VMap.lookup(&V))
5371       return NewV;
5372     bool UsedAssumedInformation = false;
5373     Optional<Value *> SimpleV = A.getAssumedSimplified(
5374         V, QueryingAA, UsedAssumedInformation, AA::Interprocedural);
5375     if (!SimpleV.hasValue())
5376       return PoisonValue::get(&Ty);
5377     Value *EffectiveV = &V;
5378     if (SimpleV.getValue())
5379       EffectiveV = SimpleV.getValue();
5380     if (auto *C = dyn_cast<Constant>(EffectiveV))
5381       return C;
5382     if (CtxI && AA::isValidAtPosition(AA::ValueAndContext(*EffectiveV, *CtxI),
5383                                       A.getInfoCache()))
5384       return ensureType(A, *EffectiveV, Ty, CtxI, Check);
5385     if (auto *I = dyn_cast<Instruction>(EffectiveV))
5386       if (Value *NewV = reproduceInst(A, QueryingAA, *I, Ty, CtxI, Check, VMap))
5387         return ensureType(A, *NewV, Ty, CtxI, Check);
5388     return nullptr;
5389   }
5390 
5391   /// Return a value we can use as replacement for the associated one, or
5392   /// nullptr if we don't have one that makes sense.
5393   Value *manifestReplacementValue(Attributor &A, Instruction *CtxI) const {
5394     Value *NewV = SimplifiedAssociatedValue
5395                       ? SimplifiedAssociatedValue.getValue()
5396                       : UndefValue::get(getAssociatedType());
5397     if (NewV && NewV != &getAssociatedValue()) {
5398       ValueToValueMapTy VMap;
5399       // First verify we can reprduce the value with the required type at the
5400       // context location before we actually start modifying the IR.
5401       if (reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5402                          /* CheckOnly */ true, VMap))
5403         return reproduceValue(A, *this, *NewV, *getAssociatedType(), CtxI,
5404                               /* CheckOnly */ false, VMap);
5405     }
5406     return nullptr;
5407   }
5408 
5409   /// Helper function for querying AAValueSimplify and updating candicate.
5410   /// \param IRP The value position we are trying to unify with SimplifiedValue
5411   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5412                       const IRPosition &IRP, bool Simplify = true) {
5413     bool UsedAssumedInformation = false;
5414     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5415     if (Simplify)
5416       QueryingValueSimplified = A.getAssumedSimplified(
5417           IRP, QueryingAA, UsedAssumedInformation, AA::Interprocedural);
5418     return unionAssumed(QueryingValueSimplified);
5419   }
5420 
5421   /// Returns a candidate is found or not
5422   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5423     if (!getAssociatedValue().getType()->isIntegerTy())
5424       return false;
5425 
5426     // This will also pass the call base context.
5427     const auto &AA =
5428         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5429 
5430     Optional<Constant *> COpt = AA.getAssumedConstant(A);
5431 
5432     if (!COpt) {
5433       SimplifiedAssociatedValue = llvm::None;
5434       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5435       return true;
5436     }
5437     if (auto *C = *COpt) {
5438       SimplifiedAssociatedValue = C;
5439       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5440       return true;
5441     }
5442     return false;
5443   }
5444 
5445   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5446     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5447       return true;
5448     if (askSimplifiedValueFor<AAPotentialConstantValues>(A))
5449       return true;
5450     return false;
5451   }
5452 
5453   /// See AbstractAttribute::manifest(...).
5454   ChangeStatus manifest(Attributor &A) override {
5455     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5456     for (auto &U : getAssociatedValue().uses()) {
5457       // Check if we need to adjust the insertion point to make sure the IR is
5458       // valid.
5459       Instruction *IP = dyn_cast<Instruction>(U.getUser());
5460       if (auto *PHI = dyn_cast_or_null<PHINode>(IP))
5461         IP = PHI->getIncomingBlock(U)->getTerminator();
5462       if (auto *NewV = manifestReplacementValue(A, IP)) {
5463         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue()
5464                           << " -> " << *NewV << " :: " << *this << "\n");
5465         if (A.changeUseAfterManifest(U, *NewV))
5466           Changed = ChangeStatus::CHANGED;
5467       }
5468     }
5469 
5470     return Changed | AAValueSimplify::manifest(A);
5471   }
5472 
5473   /// See AbstractState::indicatePessimisticFixpoint(...).
5474   ChangeStatus indicatePessimisticFixpoint() override {
5475     SimplifiedAssociatedValue = &getAssociatedValue();
5476     return AAValueSimplify::indicatePessimisticFixpoint();
5477   }
5478 };
5479 
5480 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5481   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5482       : AAValueSimplifyImpl(IRP, A) {}
5483 
5484   void initialize(Attributor &A) override {
5485     AAValueSimplifyImpl::initialize(A);
5486     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5487       indicatePessimisticFixpoint();
5488     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5489                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5490                 /* IgnoreSubsumingPositions */ true))
5491       indicatePessimisticFixpoint();
5492   }
5493 
5494   /// See AbstractAttribute::updateImpl(...).
5495   ChangeStatus updateImpl(Attributor &A) override {
5496     // Byval is only replacable if it is readonly otherwise we would write into
5497     // the replaced value and not the copy that byval creates implicitly.
5498     Argument *Arg = getAssociatedArgument();
5499     if (Arg->hasByValAttr()) {
5500       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5501       //       there is no race by not copying a constant byval.
5502       bool IsKnown;
5503       if (!AA::isAssumedReadOnly(A, getIRPosition(), *this, IsKnown))
5504         return indicatePessimisticFixpoint();
5505     }
5506 
5507     auto Before = SimplifiedAssociatedValue;
5508 
5509     auto PredForCallSite = [&](AbstractCallSite ACS) {
5510       const IRPosition &ACSArgPos =
5511           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5512       // Check if a coresponding argument was found or if it is on not
5513       // associated (which can happen for callback calls).
5514       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5515         return false;
5516 
5517       // Simplify the argument operand explicitly and check if the result is
5518       // valid in the current scope. This avoids refering to simplified values
5519       // in other functions, e.g., we don't want to say a an argument in a
5520       // static function is actually an argument in a different function.
5521       bool UsedAssumedInformation = false;
5522       Optional<Constant *> SimpleArgOp =
5523           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5524       if (!SimpleArgOp)
5525         return true;
5526       if (!SimpleArgOp.getValue())
5527         return false;
5528       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5529         return false;
5530       return unionAssumed(*SimpleArgOp);
5531     };
5532 
5533     // Generate a answer specific to a call site context.
5534     bool Success;
5535     bool UsedAssumedInformation = false;
5536     if (hasCallBaseContext() &&
5537         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5538       Success = PredForCallSite(
5539           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5540     else
5541       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5542                                        UsedAssumedInformation);
5543 
5544     if (!Success)
5545       if (!askSimplifiedValueForOtherAAs(A))
5546         return indicatePessimisticFixpoint();
5547 
5548     // If a candicate was found in this update, return CHANGED.
5549     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5550                                                : ChangeStatus ::CHANGED;
5551   }
5552 
5553   /// See AbstractAttribute::trackStatistics()
5554   void trackStatistics() const override {
5555     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5556   }
5557 };
5558 
5559 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5560   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5561       : AAValueSimplifyImpl(IRP, A) {}
5562 
5563   /// See AAValueSimplify::getAssumedSimplifiedValue()
5564   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5565     if (!isValidState())
5566       return nullptr;
5567     return SimplifiedAssociatedValue;
5568   }
5569 
5570   /// See AbstractAttribute::updateImpl(...).
5571   ChangeStatus updateImpl(Attributor &A) override {
5572     auto Before = SimplifiedAssociatedValue;
5573 
5574     auto ReturnInstCB = [&](Instruction &I) {
5575       auto &RI = cast<ReturnInst>(I);
5576       return checkAndUpdate(
5577           A, *this,
5578           IRPosition::value(*RI.getReturnValue(), getCallBaseContext()));
5579     };
5580 
5581     bool UsedAssumedInformation = false;
5582     if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
5583                                    UsedAssumedInformation))
5584       if (!askSimplifiedValueForOtherAAs(A))
5585         return indicatePessimisticFixpoint();
5586 
5587     // If a candicate was found in this update, return CHANGED.
5588     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5589                                                : ChangeStatus ::CHANGED;
5590   }
5591 
5592   ChangeStatus manifest(Attributor &A) override {
5593     // We queried AAValueSimplify for the returned values so they will be
5594     // replaced if a simplified form was found. Nothing to do here.
5595     return ChangeStatus::UNCHANGED;
5596   }
5597 
5598   /// See AbstractAttribute::trackStatistics()
5599   void trackStatistics() const override {
5600     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5601   }
5602 };
5603 
5604 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5605   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5606       : AAValueSimplifyImpl(IRP, A) {}
5607 
5608   /// See AbstractAttribute::initialize(...).
5609   void initialize(Attributor &A) override {
5610     AAValueSimplifyImpl::initialize(A);
5611     Value &V = getAnchorValue();
5612 
5613     // TODO: add other stuffs
5614     if (isa<Constant>(V))
5615       indicatePessimisticFixpoint();
5616   }
5617 
5618   /// See AbstractAttribute::updateImpl(...).
5619   ChangeStatus updateImpl(Attributor &A) override {
5620     auto Before = SimplifiedAssociatedValue;
5621     if (!askSimplifiedValueForOtherAAs(A))
5622       return indicatePessimisticFixpoint();
5623 
5624     // If a candicate was found in this update, return CHANGED.
5625     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5626                                                : ChangeStatus ::CHANGED;
5627   }
5628 
5629   /// See AbstractAttribute::trackStatistics()
5630   void trackStatistics() const override {
5631     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5632   }
5633 };
5634 
5635 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5636   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5637       : AAValueSimplifyImpl(IRP, A) {}
5638 
5639   /// See AbstractAttribute::initialize(...).
5640   void initialize(Attributor &A) override {
5641     SimplifiedAssociatedValue = nullptr;
5642     indicateOptimisticFixpoint();
5643   }
5644   /// See AbstractAttribute::initialize(...).
5645   ChangeStatus updateImpl(Attributor &A) override {
5646     llvm_unreachable(
5647         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5648   }
5649   /// See AbstractAttribute::trackStatistics()
5650   void trackStatistics() const override {
5651     STATS_DECLTRACK_FN_ATTR(value_simplify)
5652   }
5653 };
5654 
5655 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5656   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5657       : AAValueSimplifyFunction(IRP, A) {}
5658   /// See AbstractAttribute::trackStatistics()
5659   void trackStatistics() const override {
5660     STATS_DECLTRACK_CS_ATTR(value_simplify)
5661   }
5662 };
5663 
5664 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5665   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5666       : AAValueSimplifyImpl(IRP, A) {}
5667 
5668   void initialize(Attributor &A) override {
5669     AAValueSimplifyImpl::initialize(A);
5670     Function *Fn = getAssociatedFunction();
5671     if (!Fn) {
5672       indicatePessimisticFixpoint();
5673       return;
5674     }
5675     for (Argument &Arg : Fn->args()) {
5676       if (Arg.hasReturnedAttr()) {
5677         auto IRP = IRPosition::callsite_argument(*cast<CallBase>(getCtxI()),
5678                                                  Arg.getArgNo());
5679         if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT &&
5680             checkAndUpdate(A, *this, IRP))
5681           indicateOptimisticFixpoint();
5682         else
5683           indicatePessimisticFixpoint();
5684         return;
5685       }
5686     }
5687   }
5688 
5689   /// See AbstractAttribute::updateImpl(...).
5690   ChangeStatus updateImpl(Attributor &A) override {
5691     auto Before = SimplifiedAssociatedValue;
5692     auto &RetAA = A.getAAFor<AAReturnedValues>(
5693         *this, IRPosition::function(*getAssociatedFunction()),
5694         DepClassTy::REQUIRED);
5695     auto PredForReturned =
5696         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5697           bool UsedAssumedInformation = false;
5698           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
5699               &RetVal, *cast<CallBase>(getCtxI()), *this,
5700               UsedAssumedInformation);
5701           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5702               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
5703           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5704         };
5705     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
5706       if (!askSimplifiedValueForOtherAAs(A))
5707         return indicatePessimisticFixpoint();
5708     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5709                                                : ChangeStatus ::CHANGED;
5710   }
5711 
5712   void trackStatistics() const override {
5713     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
5714   }
5715 };
5716 
5717 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
5718   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
5719       : AAValueSimplifyFloating(IRP, A) {}
5720 
5721   /// See AbstractAttribute::manifest(...).
5722   ChangeStatus manifest(Attributor &A) override {
5723     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5724     // TODO: We should avoid simplification duplication to begin with.
5725     auto *FloatAA = A.lookupAAFor<AAValueSimplify>(
5726         IRPosition::value(getAssociatedValue()), this, DepClassTy::NONE);
5727     if (FloatAA && FloatAA->getState().isValidState())
5728       return Changed;
5729 
5730     if (auto *NewV = manifestReplacementValue(A, getCtxI())) {
5731       Use &U = cast<CallBase>(&getAnchorValue())
5732                    ->getArgOperandUse(getCallSiteArgNo());
5733       if (A.changeUseAfterManifest(U, *NewV))
5734         Changed = ChangeStatus::CHANGED;
5735     }
5736 
5737     return Changed | AAValueSimplify::manifest(A);
5738   }
5739 
5740   void trackStatistics() const override {
5741     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5742   }
5743 };
5744 } // namespace
5745 
5746 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5747 namespace {
5748 struct AAHeapToStackFunction final : public AAHeapToStack {
5749 
5750   struct AllocationInfo {
5751     /// The call that allocates the memory.
5752     CallBase *const CB;
5753 
5754     /// The library function id for the allocation.
5755     LibFunc LibraryFunctionId = NotLibFunc;
5756 
5757     /// The status wrt. a rewrite.
5758     enum {
5759       STACK_DUE_TO_USE,
5760       STACK_DUE_TO_FREE,
5761       INVALID,
5762     } Status = STACK_DUE_TO_USE;
5763 
5764     /// Flag to indicate if we encountered a use that might free this allocation
5765     /// but which is not in the deallocation infos.
5766     bool HasPotentiallyFreeingUnknownUses = false;
5767 
5768     /// Flag to indicate that we should place the new alloca in the function
5769     /// entry block rather than where the call site (CB) is.
5770     bool MoveAllocaIntoEntry = true;
5771 
5772     /// The set of free calls that use this allocation.
5773     SmallSetVector<CallBase *, 1> PotentialFreeCalls{};
5774   };
5775 
5776   struct DeallocationInfo {
5777     /// The call that deallocates the memory.
5778     CallBase *const CB;
5779 
5780     /// Flag to indicate if we don't know all objects this deallocation might
5781     /// free.
5782     bool MightFreeUnknownObjects = false;
5783 
5784     /// The set of allocation calls that are potentially freed.
5785     SmallSetVector<CallBase *, 1> PotentialAllocationCalls{};
5786   };
5787 
5788   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5789       : AAHeapToStack(IRP, A) {}
5790 
5791   ~AAHeapToStackFunction() {
5792     // Ensure we call the destructor so we release any memory allocated in the
5793     // sets.
5794     for (auto &It : AllocationInfos)
5795       It.second->~AllocationInfo();
5796     for (auto &It : DeallocationInfos)
5797       It.second->~DeallocationInfo();
5798   }
5799 
5800   void initialize(Attributor &A) override {
5801     AAHeapToStack::initialize(A);
5802 
5803     const Function *F = getAnchorScope();
5804     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5805 
5806     auto AllocationIdentifierCB = [&](Instruction &I) {
5807       CallBase *CB = dyn_cast<CallBase>(&I);
5808       if (!CB)
5809         return true;
5810       if (isFreeCall(CB, TLI)) {
5811         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
5812         return true;
5813       }
5814       // To do heap to stack, we need to know that the allocation itself is
5815       // removable once uses are rewritten, and that we can initialize the
5816       // alloca to the same pattern as the original allocation result.
5817       if (isAllocationFn(CB, TLI) && isAllocRemovable(CB, TLI)) {
5818         auto *I8Ty = Type::getInt8Ty(CB->getParent()->getContext());
5819         if (nullptr != getInitialValueOfAllocation(CB, TLI, I8Ty)) {
5820           AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB};
5821           AllocationInfos[CB] = AI;
5822           if (TLI)
5823             TLI->getLibFunc(*CB, AI->LibraryFunctionId);
5824         }
5825       }
5826       return true;
5827     };
5828 
5829     bool UsedAssumedInformation = false;
5830     bool Success = A.checkForAllCallLikeInstructions(
5831         AllocationIdentifierCB, *this, UsedAssumedInformation,
5832         /* CheckBBLivenessOnly */ false,
5833         /* CheckPotentiallyDead */ true);
5834     (void)Success;
5835     assert(Success && "Did not expect the call base visit callback to fail!");
5836 
5837     Attributor::SimplifictionCallbackTy SCB =
5838         [](const IRPosition &, const AbstractAttribute *,
5839            bool &) -> Optional<Value *> { return nullptr; };
5840     for (const auto &It : AllocationInfos)
5841       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
5842                                        SCB);
5843     for (const auto &It : DeallocationInfos)
5844       A.registerSimplificationCallback(IRPosition::callsite_returned(*It.first),
5845                                        SCB);
5846   }
5847 
5848   const std::string getAsStr() const override {
5849     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
5850     for (const auto &It : AllocationInfos) {
5851       if (It.second->Status == AllocationInfo::INVALID)
5852         ++NumInvalidMallocs;
5853       else
5854         ++NumH2SMallocs;
5855     }
5856     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
5857            std::to_string(NumInvalidMallocs);
5858   }
5859 
5860   /// See AbstractAttribute::trackStatistics().
5861   void trackStatistics() const override {
5862     STATS_DECL(
5863         MallocCalls, Function,
5864         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5865     for (auto &It : AllocationInfos)
5866       if (It.second->Status != AllocationInfo::INVALID)
5867         ++BUILD_STAT_NAME(MallocCalls, Function);
5868   }
5869 
5870   bool isAssumedHeapToStack(const CallBase &CB) const override {
5871     if (isValidState())
5872       if (AllocationInfo *AI =
5873               AllocationInfos.lookup(const_cast<CallBase *>(&CB)))
5874         return AI->Status != AllocationInfo::INVALID;
5875     return false;
5876   }
5877 
5878   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
5879     if (!isValidState())
5880       return false;
5881 
5882     for (auto &It : AllocationInfos) {
5883       AllocationInfo &AI = *It.second;
5884       if (AI.Status == AllocationInfo::INVALID)
5885         continue;
5886 
5887       if (AI.PotentialFreeCalls.count(&CB))
5888         return true;
5889     }
5890 
5891     return false;
5892   }
5893 
5894   ChangeStatus manifest(Attributor &A) override {
5895     assert(getState().isValidState() &&
5896            "Attempted to manifest an invalid state!");
5897 
5898     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5899     Function *F = getAnchorScope();
5900     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5901 
5902     for (auto &It : AllocationInfos) {
5903       AllocationInfo &AI = *It.second;
5904       if (AI.Status == AllocationInfo::INVALID)
5905         continue;
5906 
5907       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
5908         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5909         A.deleteAfterManifest(*FreeCall);
5910         HasChanged = ChangeStatus::CHANGED;
5911       }
5912 
5913       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
5914                         << "\n");
5915 
5916       auto Remark = [&](OptimizationRemark OR) {
5917         LibFunc IsAllocShared;
5918         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
5919           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
5920             return OR << "Moving globalized variable to the stack.";
5921         return OR << "Moving memory allocation from the heap to the stack.";
5922       };
5923       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
5924         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
5925       else
5926         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
5927 
5928       const DataLayout &DL = A.getInfoCache().getDL();
5929       Value *Size;
5930       Optional<APInt> SizeAPI = getSize(A, *this, AI);
5931       if (SizeAPI) {
5932         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
5933       } else {
5934         LLVMContext &Ctx = AI.CB->getContext();
5935         ObjectSizeOpts Opts;
5936         ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, Opts);
5937         SizeOffsetEvalType SizeOffsetPair = Eval.compute(AI.CB);
5938         assert(SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown() &&
5939                cast<ConstantInt>(SizeOffsetPair.second)->isZero());
5940         Size = SizeOffsetPair.first;
5941       }
5942 
5943       Instruction *IP =
5944           AI.MoveAllocaIntoEntry ? &F->getEntryBlock().front() : AI.CB;
5945 
5946       Align Alignment(1);
5947       if (MaybeAlign RetAlign = AI.CB->getRetAlign())
5948         Alignment = std::max(Alignment, *RetAlign);
5949       if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
5950         Optional<APInt> AlignmentAPI = getAPInt(A, *this, *Align);
5951         assert(AlignmentAPI && AlignmentAPI.getValue().getZExtValue() > 0 &&
5952                "Expected an alignment during manifest!");
5953         Alignment = std::max(
5954             Alignment, assumeAligned(AlignmentAPI.getValue().getZExtValue()));
5955       }
5956 
5957       // TODO: Hoist the alloca towards the function entry.
5958       unsigned AS = DL.getAllocaAddrSpace();
5959       Instruction *Alloca =
5960           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5961                          AI.CB->getName() + ".h2s", IP);
5962 
5963       if (Alloca->getType() != AI.CB->getType())
5964         Alloca = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
5965             Alloca, AI.CB->getType(), "malloc_cast", AI.CB);
5966 
5967       auto *I8Ty = Type::getInt8Ty(F->getContext());
5968       auto *InitVal = getInitialValueOfAllocation(AI.CB, TLI, I8Ty);
5969       assert(InitVal &&
5970              "Must be able to materialize initial memory state of allocation");
5971 
5972       A.changeAfterManifest(IRPosition::inst(*AI.CB), *Alloca);
5973 
5974       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
5975         auto *NBB = II->getNormalDest();
5976         BranchInst::Create(NBB, AI.CB->getParent());
5977         A.deleteAfterManifest(*AI.CB);
5978       } else {
5979         A.deleteAfterManifest(*AI.CB);
5980       }
5981 
5982       // Initialize the alloca with the same value as used by the allocation
5983       // function.  We can skip undef as the initial value of an alloc is
5984       // undef, and the memset would simply end up being DSEd.
5985       if (!isa<UndefValue>(InitVal)) {
5986         IRBuilder<> Builder(Alloca->getNextNode());
5987         // TODO: Use alignment above if align!=1
5988         Builder.CreateMemSet(Alloca, InitVal, Size, None);
5989       }
5990       HasChanged = ChangeStatus::CHANGED;
5991     }
5992 
5993     return HasChanged;
5994   }
5995 
5996   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
5997                            Value &V) {
5998     bool UsedAssumedInformation = false;
5999     Optional<Constant *> SimpleV =
6000         A.getAssumedConstant(V, AA, UsedAssumedInformation);
6001     if (!SimpleV)
6002       return APInt(64, 0);
6003     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
6004       return CI->getValue();
6005     return llvm::None;
6006   }
6007 
6008   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6009                           AllocationInfo &AI) {
6010     auto Mapper = [&](const Value *V) -> const Value * {
6011       bool UsedAssumedInformation = false;
6012       if (Optional<Constant *> SimpleV =
6013               A.getAssumedConstant(*V, AA, UsedAssumedInformation))
6014         if (*SimpleV)
6015           return *SimpleV;
6016       return V;
6017     };
6018 
6019     const Function *F = getAnchorScope();
6020     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6021     return getAllocSize(AI.CB, TLI, Mapper);
6022   }
6023 
6024   /// Collection of all malloc-like calls in a function with associated
6025   /// information.
6026   MapVector<CallBase *, AllocationInfo *> AllocationInfos;
6027 
6028   /// Collection of all free-like calls in a function with associated
6029   /// information.
6030   MapVector<CallBase *, DeallocationInfo *> DeallocationInfos;
6031 
6032   ChangeStatus updateImpl(Attributor &A) override;
6033 };
6034 
6035 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6036   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6037   const Function *F = getAnchorScope();
6038   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
6039 
6040   const auto &LivenessAA =
6041       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6042 
6043   MustBeExecutedContextExplorer &Explorer =
6044       A.getInfoCache().getMustBeExecutedContextExplorer();
6045 
6046   bool StackIsAccessibleByOtherThreads =
6047       A.getInfoCache().stackIsAccessibleByOtherThreads();
6048 
6049   LoopInfo *LI =
6050       A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(*F);
6051   Optional<bool> MayContainIrreducibleControl;
6052   auto IsInLoop = [&](BasicBlock &BB) {
6053     if (&F->getEntryBlock() == &BB)
6054       return false;
6055     if (!MayContainIrreducibleControl.has_value())
6056       MayContainIrreducibleControl = mayContainIrreducibleControl(*F, LI);
6057     if (MayContainIrreducibleControl.value())
6058       return true;
6059     return LI->getLoopFor(&BB) != nullptr;
6060   };
6061 
6062   // Flag to ensure we update our deallocation information at most once per
6063   // updateImpl call and only if we use the free check reasoning.
6064   bool HasUpdatedFrees = false;
6065 
6066   auto UpdateFrees = [&]() {
6067     HasUpdatedFrees = true;
6068 
6069     for (auto &It : DeallocationInfos) {
6070       DeallocationInfo &DI = *It.second;
6071       // For now we cannot use deallocations that have unknown inputs, skip
6072       // them.
6073       if (DI.MightFreeUnknownObjects)
6074         continue;
6075 
6076       // No need to analyze dead calls, ignore them instead.
6077       bool UsedAssumedInformation = false;
6078       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6079                           /* CheckBBLivenessOnly */ true))
6080         continue;
6081 
6082       // Use the non-optimistic version to get the freed object.
6083       Value *Obj = getUnderlyingObject(DI.CB->getArgOperand(0));
6084       if (!Obj) {
6085         LLVM_DEBUG(dbgs() << "[H2S] Unknown underlying object for free!\n");
6086         DI.MightFreeUnknownObjects = true;
6087         continue;
6088       }
6089 
6090       // Free of null and undef can be ignored as no-ops (or UB in the latter
6091       // case).
6092       if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6093         continue;
6094 
6095       CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6096       if (!ObjCB) {
6097         LLVM_DEBUG(dbgs() << "[H2S] Free of a non-call object: " << *Obj
6098                           << "\n");
6099         DI.MightFreeUnknownObjects = true;
6100         continue;
6101       }
6102 
6103       AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6104       if (!AI) {
6105         LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6106                           << "\n");
6107         DI.MightFreeUnknownObjects = true;
6108         continue;
6109       }
6110 
6111       DI.PotentialAllocationCalls.insert(ObjCB);
6112     }
6113   };
6114 
6115   auto FreeCheck = [&](AllocationInfo &AI) {
6116     // If the stack is not accessible by other threads, the "must-free" logic
6117     // doesn't apply as the pointer could be shared and needs to be places in
6118     // "shareable" memory.
6119     if (!StackIsAccessibleByOtherThreads) {
6120       auto &NoSyncAA =
6121           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6122       if (!NoSyncAA.isAssumedNoSync()) {
6123         LLVM_DEBUG(
6124             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6125                       "other threads and function is not nosync:\n");
6126         return false;
6127       }
6128     }
6129     if (!HasUpdatedFrees)
6130       UpdateFrees();
6131 
6132     // TODO: Allow multi exit functions that have different free calls.
6133     if (AI.PotentialFreeCalls.size() != 1) {
6134       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6135                         << AI.PotentialFreeCalls.size() << "\n");
6136       return false;
6137     }
6138     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6139     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6140     if (!DI) {
6141       LLVM_DEBUG(
6142           dbgs() << "[H2S] unique free call was not known as deallocation call "
6143                  << *UniqueFree << "\n");
6144       return false;
6145     }
6146     if (DI->MightFreeUnknownObjects) {
6147       LLVM_DEBUG(
6148           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6149       return false;
6150     }
6151     if (DI->PotentialAllocationCalls.empty())
6152       return true;
6153     if (DI->PotentialAllocationCalls.size() > 1) {
6154       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6155                         << DI->PotentialAllocationCalls.size()
6156                         << " different allocations\n");
6157       return false;
6158     }
6159     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6160       LLVM_DEBUG(
6161           dbgs()
6162           << "[H2S] unique free call not known to free this allocation but "
6163           << **DI->PotentialAllocationCalls.begin() << "\n");
6164       return false;
6165     }
6166     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6167     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6168       LLVM_DEBUG(
6169           dbgs()
6170           << "[H2S] unique free call might not be executed with the allocation "
6171           << *UniqueFree << "\n");
6172       return false;
6173     }
6174     return true;
6175   };
6176 
6177   auto UsesCheck = [&](AllocationInfo &AI) {
6178     bool ValidUsesOnly = true;
6179 
6180     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6181       Instruction *UserI = cast<Instruction>(U.getUser());
6182       if (isa<LoadInst>(UserI))
6183         return true;
6184       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6185         if (SI->getValueOperand() == U.get()) {
6186           LLVM_DEBUG(dbgs()
6187                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6188           ValidUsesOnly = false;
6189         } else {
6190           // A store into the malloc'ed memory is fine.
6191         }
6192         return true;
6193       }
6194       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6195         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6196           return true;
6197         if (DeallocationInfos.count(CB)) {
6198           AI.PotentialFreeCalls.insert(CB);
6199           return true;
6200         }
6201 
6202         unsigned ArgNo = CB->getArgOperandNo(&U);
6203 
6204         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6205             *this, IRPosition::callsite_argument(*CB, ArgNo),
6206             DepClassTy::OPTIONAL);
6207 
6208         // If a call site argument use is nofree, we are fine.
6209         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6210             *this, IRPosition::callsite_argument(*CB, ArgNo),
6211             DepClassTy::OPTIONAL);
6212 
6213         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6214         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6215         if (MaybeCaptured ||
6216             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6217              MaybeFreed)) {
6218           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6219 
6220           // Emit a missed remark if this is missed OpenMP globalization.
6221           auto Remark = [&](OptimizationRemarkMissed ORM) {
6222             return ORM
6223                    << "Could not move globalized variable to the stack. "
6224                       "Variable is potentially captured in call. Mark "
6225                       "parameter as `__attribute__((noescape))` to override.";
6226           };
6227 
6228           if (ValidUsesOnly &&
6229               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6230             A.emitRemark<OptimizationRemarkMissed>(CB, "OMP113", Remark);
6231 
6232           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6233           ValidUsesOnly = false;
6234         }
6235         return true;
6236       }
6237 
6238       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6239           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6240         Follow = true;
6241         return true;
6242       }
6243       // Unknown user for which we can not track uses further (in a way that
6244       // makes sense).
6245       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6246       ValidUsesOnly = false;
6247       return true;
6248     };
6249     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6250       return false;
6251     return ValidUsesOnly;
6252   };
6253 
6254   // The actual update starts here. We look at all allocations and depending on
6255   // their status perform the appropriate check(s).
6256   for (auto &It : AllocationInfos) {
6257     AllocationInfo &AI = *It.second;
6258     if (AI.Status == AllocationInfo::INVALID)
6259       continue;
6260 
6261     if (Value *Align = getAllocAlignment(AI.CB, TLI)) {
6262       Optional<APInt> APAlign = getAPInt(A, *this, *Align);
6263       if (!APAlign) {
6264         // Can't generate an alloca which respects the required alignment
6265         // on the allocation.
6266         LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6267                           << "\n");
6268         AI.Status = AllocationInfo::INVALID;
6269         Changed = ChangeStatus::CHANGED;
6270         continue;
6271       }
6272       if (APAlign->ugt(llvm::Value::MaximumAlignment) ||
6273           !APAlign->isPowerOf2()) {
6274         LLVM_DEBUG(dbgs() << "[H2S] Invalid allocation alignment: " << APAlign
6275                           << "\n");
6276         AI.Status = AllocationInfo::INVALID;
6277         Changed = ChangeStatus::CHANGED;
6278         continue;
6279       }
6280     }
6281 
6282     Optional<APInt> Size = getSize(A, *this, AI);
6283     if (MaxHeapToStackSize != -1) {
6284       if (!Size || Size.getValue().ugt(MaxHeapToStackSize)) {
6285         LLVM_DEBUG({
6286           if (!Size)
6287             dbgs() << "[H2S] Unknown allocation size: " << *AI.CB << "\n";
6288           else
6289             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6290                    << MaxHeapToStackSize << "\n";
6291         });
6292 
6293         AI.Status = AllocationInfo::INVALID;
6294         Changed = ChangeStatus::CHANGED;
6295         continue;
6296       }
6297     }
6298 
6299     switch (AI.Status) {
6300     case AllocationInfo::STACK_DUE_TO_USE:
6301       if (UsesCheck(AI))
6302         break;
6303       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6304       LLVM_FALLTHROUGH;
6305     case AllocationInfo::STACK_DUE_TO_FREE:
6306       if (FreeCheck(AI))
6307         break;
6308       AI.Status = AllocationInfo::INVALID;
6309       Changed = ChangeStatus::CHANGED;
6310       break;
6311     case AllocationInfo::INVALID:
6312       llvm_unreachable("Invalid allocations should never reach this point!");
6313     };
6314 
6315     // Check if we still think we can move it into the entry block.
6316     if (AI.MoveAllocaIntoEntry &&
6317         (!Size.has_value() || IsInLoop(*AI.CB->getParent())))
6318       AI.MoveAllocaIntoEntry = false;
6319   }
6320 
6321   return Changed;
6322 }
6323 } // namespace
6324 
6325 /// ----------------------- Privatizable Pointers ------------------------------
6326 namespace {
6327 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6328   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6329       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6330 
6331   ChangeStatus indicatePessimisticFixpoint() override {
6332     AAPrivatizablePtr::indicatePessimisticFixpoint();
6333     PrivatizableType = nullptr;
6334     return ChangeStatus::CHANGED;
6335   }
6336 
6337   /// Identify the type we can chose for a private copy of the underlying
6338   /// argument. None means it is not clear yet, nullptr means there is none.
6339   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6340 
6341   /// Return a privatizable type that encloses both T0 and T1.
6342   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6343   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6344     if (!T0)
6345       return T1;
6346     if (!T1)
6347       return T0;
6348     if (T0 == T1)
6349       return T0;
6350     return nullptr;
6351   }
6352 
6353   Optional<Type *> getPrivatizableType() const override {
6354     return PrivatizableType;
6355   }
6356 
6357   const std::string getAsStr() const override {
6358     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6359   }
6360 
6361 protected:
6362   Optional<Type *> PrivatizableType;
6363 };
6364 
6365 // TODO: Do this for call site arguments (probably also other values) as well.
6366 
6367 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6368   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6369       : AAPrivatizablePtrImpl(IRP, A) {}
6370 
6371   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6372   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6373     // If this is a byval argument and we know all the call sites (so we can
6374     // rewrite them), there is no need to check them explicitly.
6375     bool UsedAssumedInformation = false;
6376     SmallVector<Attribute, 1> Attrs;
6377     getAttrs({Attribute::ByVal}, Attrs, /* IgnoreSubsumingPositions */ true);
6378     if (!Attrs.empty() &&
6379         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6380                                true, UsedAssumedInformation))
6381       return Attrs[0].getValueAsType();
6382 
6383     Optional<Type *> Ty;
6384     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6385 
6386     // Make sure the associated call site argument has the same type at all call
6387     // sites and it is an allocation we know is safe to privatize, for now that
6388     // means we only allow alloca instructions.
6389     // TODO: We can additionally analyze the accesses in the callee to  create
6390     //       the type from that information instead. That is a little more
6391     //       involved and will be done in a follow up patch.
6392     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6393       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6394       // Check if a coresponding argument was found or if it is one not
6395       // associated (which can happen for callback calls).
6396       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6397         return false;
6398 
6399       // Check that all call sites agree on a type.
6400       auto &PrivCSArgAA =
6401           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6402       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6403 
6404       LLVM_DEBUG({
6405         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6406         if (CSTy && CSTy.getValue())
6407           CSTy.getValue()->print(dbgs());
6408         else if (CSTy)
6409           dbgs() << "<nullptr>";
6410         else
6411           dbgs() << "<none>";
6412       });
6413 
6414       Ty = combineTypes(Ty, CSTy);
6415 
6416       LLVM_DEBUG({
6417         dbgs() << " : New Type: ";
6418         if (Ty && Ty.getValue())
6419           Ty.getValue()->print(dbgs());
6420         else if (Ty)
6421           dbgs() << "<nullptr>";
6422         else
6423           dbgs() << "<none>";
6424         dbgs() << "\n";
6425       });
6426 
6427       return !Ty || Ty.getValue();
6428     };
6429 
6430     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6431                                 UsedAssumedInformation))
6432       return nullptr;
6433     return Ty;
6434   }
6435 
6436   /// See AbstractAttribute::updateImpl(...).
6437   ChangeStatus updateImpl(Attributor &A) override {
6438     PrivatizableType = identifyPrivatizableType(A);
6439     if (!PrivatizableType)
6440       return ChangeStatus::UNCHANGED;
6441     if (!PrivatizableType.getValue())
6442       return indicatePessimisticFixpoint();
6443 
6444     // The dependence is optional so we don't give up once we give up on the
6445     // alignment.
6446     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6447                         DepClassTy::OPTIONAL);
6448 
6449     // Avoid arguments with padding for now.
6450     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6451         !isDenselyPacked(*PrivatizableType, A.getInfoCache().getDL())) {
6452       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6453       return indicatePessimisticFixpoint();
6454     }
6455 
6456     // Collect the types that will replace the privatizable type in the function
6457     // signature.
6458     SmallVector<Type *, 16> ReplacementTypes;
6459     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
6460 
6461     // Verify callee and caller agree on how the promoted argument would be
6462     // passed.
6463     Function &Fn = *getIRPosition().getAnchorScope();
6464     const auto *TTI =
6465         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6466     if (!TTI) {
6467       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Missing TTI for function "
6468                         << Fn.getName() << "\n");
6469       return indicatePessimisticFixpoint();
6470     }
6471 
6472     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6473       CallBase *CB = ACS.getInstruction();
6474       return TTI->areTypesABICompatible(
6475           CB->getCaller(), CB->getCalledFunction(), ReplacementTypes);
6476     };
6477     bool UsedAssumedInformation = false;
6478     if (!A.checkForAllCallSites(CallSiteCheck, *this, true,
6479                                 UsedAssumedInformation)) {
6480       LLVM_DEBUG(
6481           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6482                  << Fn.getName() << "\n");
6483       return indicatePessimisticFixpoint();
6484     }
6485 
6486     // Register a rewrite of the argument.
6487     Argument *Arg = getAssociatedArgument();
6488     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6489       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6490       return indicatePessimisticFixpoint();
6491     }
6492 
6493     unsigned ArgNo = Arg->getArgNo();
6494 
6495     // Helper to check if for the given call site the associated argument is
6496     // passed to a callback where the privatization would be different.
6497     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6498       SmallVector<const Use *, 4> CallbackUses;
6499       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6500       for (const Use *U : CallbackUses) {
6501         AbstractCallSite CBACS(U);
6502         assert(CBACS && CBACS.isCallbackCall());
6503         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6504           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6505 
6506           LLVM_DEBUG({
6507             dbgs()
6508                 << "[AAPrivatizablePtr] Argument " << *Arg
6509                 << "check if can be privatized in the context of its parent ("
6510                 << Arg->getParent()->getName()
6511                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6512                    "callback ("
6513                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6514                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6515                 << CBACS.getCallArgOperand(CBArg) << " vs "
6516                 << CB.getArgOperand(ArgNo) << "\n"
6517                 << "[AAPrivatizablePtr] " << CBArg << " : "
6518                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6519           });
6520 
6521           if (CBArgNo != int(ArgNo))
6522             continue;
6523           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6524               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6525           if (CBArgPrivAA.isValidState()) {
6526             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6527             if (!CBArgPrivTy)
6528               continue;
6529             if (CBArgPrivTy.getValue() == PrivatizableType)
6530               continue;
6531           }
6532 
6533           LLVM_DEBUG({
6534             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6535                    << " cannot be privatized in the context of its parent ("
6536                    << Arg->getParent()->getName()
6537                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6538                       "callback ("
6539                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6540                    << ").\n[AAPrivatizablePtr] for which the argument "
6541                       "privatization is not compatible.\n";
6542           });
6543           return false;
6544         }
6545       }
6546       return true;
6547     };
6548 
6549     // Helper to check if for the given call site the associated argument is
6550     // passed to a direct call where the privatization would be different.
6551     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6552       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6553       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6554       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
6555              "Expected a direct call operand for callback call operand");
6556 
6557       LLVM_DEBUG({
6558         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6559                << " check if be privatized in the context of its parent ("
6560                << Arg->getParent()->getName()
6561                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6562                   "direct call of ("
6563                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6564                << ").\n";
6565       });
6566 
6567       Function *DCCallee = DC->getCalledFunction();
6568       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6569         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6570             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6571             DepClassTy::REQUIRED);
6572         if (DCArgPrivAA.isValidState()) {
6573           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6574           if (!DCArgPrivTy)
6575             return true;
6576           if (DCArgPrivTy.getValue() == PrivatizableType)
6577             return true;
6578         }
6579       }
6580 
6581       LLVM_DEBUG({
6582         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6583                << " cannot be privatized in the context of its parent ("
6584                << Arg->getParent()->getName()
6585                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6586                   "direct call of ("
6587                << ACS.getInstruction()->getCalledFunction()->getName()
6588                << ").\n[AAPrivatizablePtr] for which the argument "
6589                   "privatization is not compatible.\n";
6590       });
6591       return false;
6592     };
6593 
6594     // Helper to check if the associated argument is used at the given abstract
6595     // call site in a way that is incompatible with the privatization assumed
6596     // here.
6597     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6598       if (ACS.isDirectCall())
6599         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6600       if (ACS.isCallbackCall())
6601         return IsCompatiblePrivArgOfDirectCS(ACS);
6602       return false;
6603     };
6604 
6605     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6606                                 UsedAssumedInformation))
6607       return indicatePessimisticFixpoint();
6608 
6609     return ChangeStatus::UNCHANGED;
6610   }
6611 
6612   /// Given a type to private \p PrivType, collect the constituates (which are
6613   /// used) in \p ReplacementTypes.
6614   static void
6615   identifyReplacementTypes(Type *PrivType,
6616                            SmallVectorImpl<Type *> &ReplacementTypes) {
6617     // TODO: For now we expand the privatization type to the fullest which can
6618     //       lead to dead arguments that need to be removed later.
6619     assert(PrivType && "Expected privatizable type!");
6620 
6621     // Traverse the type, extract constituate types on the outermost level.
6622     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6623       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6624         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6625     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6626       ReplacementTypes.append(PrivArrayType->getNumElements(),
6627                               PrivArrayType->getElementType());
6628     } else {
6629       ReplacementTypes.push_back(PrivType);
6630     }
6631   }
6632 
6633   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6634   /// The values needed are taken from the arguments of \p F starting at
6635   /// position \p ArgNo.
6636   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6637                                    unsigned ArgNo, Instruction &IP) {
6638     assert(PrivType && "Expected privatizable type!");
6639 
6640     IRBuilder<NoFolder> IRB(&IP);
6641     const DataLayout &DL = F.getParent()->getDataLayout();
6642 
6643     // Traverse the type, build GEPs and stores.
6644     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6645       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6646       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6647         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6648         Value *Ptr =
6649             constructPointer(PointeeTy, PrivType, &Base,
6650                              PrivStructLayout->getElementOffset(u), IRB, DL);
6651         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6652       }
6653     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6654       Type *PointeeTy = PrivArrayType->getElementType();
6655       Type *PointeePtrTy = PointeeTy->getPointerTo();
6656       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6657       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6658         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6659                                       u * PointeeTySize, IRB, DL);
6660         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6661       }
6662     } else {
6663       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6664     }
6665   }
6666 
6667   /// Extract values from \p Base according to the type \p PrivType at the
6668   /// call position \p ACS. The values are appended to \p ReplacementValues.
6669   void createReplacementValues(Align Alignment, Type *PrivType,
6670                                AbstractCallSite ACS, Value *Base,
6671                                SmallVectorImpl<Value *> &ReplacementValues) {
6672     assert(Base && "Expected base value!");
6673     assert(PrivType && "Expected privatizable type!");
6674     Instruction *IP = ACS.getInstruction();
6675 
6676     IRBuilder<NoFolder> IRB(IP);
6677     const DataLayout &DL = IP->getModule()->getDataLayout();
6678 
6679     Type *PrivPtrType = PrivType->getPointerTo();
6680     if (Base->getType() != PrivPtrType)
6681       Base = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6682           Base, PrivPtrType, "", ACS.getInstruction());
6683 
6684     // Traverse the type, build GEPs and loads.
6685     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6686       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6687       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6688         Type *PointeeTy = PrivStructType->getElementType(u);
6689         Value *Ptr =
6690             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6691                              PrivStructLayout->getElementOffset(u), IRB, DL);
6692         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6693         L->setAlignment(Alignment);
6694         ReplacementValues.push_back(L);
6695       }
6696     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6697       Type *PointeeTy = PrivArrayType->getElementType();
6698       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6699       Type *PointeePtrTy = PointeeTy->getPointerTo();
6700       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6701         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6702                                       u * PointeeTySize, IRB, DL);
6703         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6704         L->setAlignment(Alignment);
6705         ReplacementValues.push_back(L);
6706       }
6707     } else {
6708       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
6709       L->setAlignment(Alignment);
6710       ReplacementValues.push_back(L);
6711     }
6712   }
6713 
6714   /// See AbstractAttribute::manifest(...)
6715   ChangeStatus manifest(Attributor &A) override {
6716     if (!PrivatizableType)
6717       return ChangeStatus::UNCHANGED;
6718     assert(PrivatizableType.getValue() && "Expected privatizable type!");
6719 
6720     // Collect all tail calls in the function as we cannot allow new allocas to
6721     // escape into tail recursion.
6722     // TODO: Be smarter about new allocas escaping into tail calls.
6723     SmallVector<CallInst *, 16> TailCalls;
6724     bool UsedAssumedInformation = false;
6725     if (!A.checkForAllInstructions(
6726             [&](Instruction &I) {
6727               CallInst &CI = cast<CallInst>(I);
6728               if (CI.isTailCall())
6729                 TailCalls.push_back(&CI);
6730               return true;
6731             },
6732             *this, {Instruction::Call}, UsedAssumedInformation))
6733       return ChangeStatus::UNCHANGED;
6734 
6735     Argument *Arg = getAssociatedArgument();
6736     // Query AAAlign attribute for alignment of associated argument to
6737     // determine the best alignment of loads.
6738     const auto &AlignAA =
6739         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
6740 
6741     // Callback to repair the associated function. A new alloca is placed at the
6742     // beginning and initialized with the values passed through arguments. The
6743     // new alloca replaces the use of the old pointer argument.
6744     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
6745         [=](const Attributor::ArgumentReplacementInfo &ARI,
6746             Function &ReplacementFn, Function::arg_iterator ArgIt) {
6747           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
6748           Instruction *IP = &*EntryBB.getFirstInsertionPt();
6749           const DataLayout &DL = IP->getModule()->getDataLayout();
6750           unsigned AS = DL.getAllocaAddrSpace();
6751           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), AS,
6752                                            Arg->getName() + ".priv", IP);
6753           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
6754                                ArgIt->getArgNo(), *IP);
6755 
6756           if (AI->getType() != Arg->getType())
6757             AI = BitCastInst::CreatePointerBitCastOrAddrSpaceCast(
6758                 AI, Arg->getType(), "", IP);
6759           Arg->replaceAllUsesWith(AI);
6760 
6761           for (CallInst *CI : TailCalls)
6762             CI->setTailCall(false);
6763         };
6764 
6765     // Callback to repair a call site of the associated function. The elements
6766     // of the privatizable type are loaded prior to the call and passed to the
6767     // new function version.
6768     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
6769         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
6770                       AbstractCallSite ACS,
6771                       SmallVectorImpl<Value *> &NewArgOperands) {
6772           // When no alignment is specified for the load instruction,
6773           // natural alignment is assumed.
6774           createReplacementValues(
6775               AlignAA.getAssumedAlign(), *PrivatizableType, ACS,
6776               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
6777               NewArgOperands);
6778         };
6779 
6780     // Collect the types that will replace the privatizable type in the function
6781     // signature.
6782     SmallVector<Type *, 16> ReplacementTypes;
6783     identifyReplacementTypes(*PrivatizableType, ReplacementTypes);
6784 
6785     // Register a rewrite of the argument.
6786     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
6787                                            std::move(FnRepairCB),
6788                                            std::move(ACSRepairCB)))
6789       return ChangeStatus::CHANGED;
6790     return ChangeStatus::UNCHANGED;
6791   }
6792 
6793   /// See AbstractAttribute::trackStatistics()
6794   void trackStatistics() const override {
6795     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
6796   }
6797 };
6798 
6799 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
6800   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
6801       : AAPrivatizablePtrImpl(IRP, A) {}
6802 
6803   /// See AbstractAttribute::initialize(...).
6804   virtual void initialize(Attributor &A) override {
6805     // TODO: We can privatize more than arguments.
6806     indicatePessimisticFixpoint();
6807   }
6808 
6809   ChangeStatus updateImpl(Attributor &A) override {
6810     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
6811                      "updateImpl will not be called");
6812   }
6813 
6814   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6815   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6816     Value *Obj = getUnderlyingObject(&getAssociatedValue());
6817     if (!Obj) {
6818       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
6819       return nullptr;
6820     }
6821 
6822     if (auto *AI = dyn_cast<AllocaInst>(Obj))
6823       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
6824         if (CI->isOne())
6825           return AI->getAllocatedType();
6826     if (auto *Arg = dyn_cast<Argument>(Obj)) {
6827       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
6828           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
6829       if (PrivArgAA.isAssumedPrivatizablePtr())
6830         return PrivArgAA.getPrivatizableType();
6831     }
6832 
6833     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
6834                          "alloca nor privatizable argument: "
6835                       << *Obj << "!\n");
6836     return nullptr;
6837   }
6838 
6839   /// See AbstractAttribute::trackStatistics()
6840   void trackStatistics() const override {
6841     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
6842   }
6843 };
6844 
6845 struct AAPrivatizablePtrCallSiteArgument final
6846     : public AAPrivatizablePtrFloating {
6847   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
6848       : AAPrivatizablePtrFloating(IRP, A) {}
6849 
6850   /// See AbstractAttribute::initialize(...).
6851   void initialize(Attributor &A) override {
6852     if (getIRPosition().hasAttr(Attribute::ByVal))
6853       indicateOptimisticFixpoint();
6854   }
6855 
6856   /// See AbstractAttribute::updateImpl(...).
6857   ChangeStatus updateImpl(Attributor &A) override {
6858     PrivatizableType = identifyPrivatizableType(A);
6859     if (!PrivatizableType)
6860       return ChangeStatus::UNCHANGED;
6861     if (!PrivatizableType.getValue())
6862       return indicatePessimisticFixpoint();
6863 
6864     const IRPosition &IRP = getIRPosition();
6865     auto &NoCaptureAA =
6866         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
6867     if (!NoCaptureAA.isAssumedNoCapture()) {
6868       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
6869       return indicatePessimisticFixpoint();
6870     }
6871 
6872     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
6873     if (!NoAliasAA.isAssumedNoAlias()) {
6874       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
6875       return indicatePessimisticFixpoint();
6876     }
6877 
6878     bool IsKnown;
6879     if (!AA::isAssumedReadOnly(A, IRP, *this, IsKnown)) {
6880       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
6881       return indicatePessimisticFixpoint();
6882     }
6883 
6884     return ChangeStatus::UNCHANGED;
6885   }
6886 
6887   /// See AbstractAttribute::trackStatistics()
6888   void trackStatistics() const override {
6889     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
6890   }
6891 };
6892 
6893 struct AAPrivatizablePtrCallSiteReturned final
6894     : public AAPrivatizablePtrFloating {
6895   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
6896       : AAPrivatizablePtrFloating(IRP, A) {}
6897 
6898   /// See AbstractAttribute::initialize(...).
6899   void initialize(Attributor &A) override {
6900     // TODO: We can privatize more than arguments.
6901     indicatePessimisticFixpoint();
6902   }
6903 
6904   /// See AbstractAttribute::trackStatistics()
6905   void trackStatistics() const override {
6906     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
6907   }
6908 };
6909 
6910 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
6911   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
6912       : AAPrivatizablePtrFloating(IRP, A) {}
6913 
6914   /// See AbstractAttribute::initialize(...).
6915   void initialize(Attributor &A) override {
6916     // TODO: We can privatize more than arguments.
6917     indicatePessimisticFixpoint();
6918   }
6919 
6920   /// See AbstractAttribute::trackStatistics()
6921   void trackStatistics() const override {
6922     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
6923   }
6924 };
6925 } // namespace
6926 
6927 /// -------------------- Memory Behavior Attributes ----------------------------
6928 /// Includes read-none, read-only, and write-only.
6929 /// ----------------------------------------------------------------------------
6930 namespace {
6931 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
6932   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
6933       : AAMemoryBehavior(IRP, A) {}
6934 
6935   /// See AbstractAttribute::initialize(...).
6936   void initialize(Attributor &A) override {
6937     intersectAssumedBits(BEST_STATE);
6938     getKnownStateFromValue(getIRPosition(), getState());
6939     AAMemoryBehavior::initialize(A);
6940   }
6941 
6942   /// Return the memory behavior information encoded in the IR for \p IRP.
6943   static void getKnownStateFromValue(const IRPosition &IRP,
6944                                      BitIntegerState &State,
6945                                      bool IgnoreSubsumingPositions = false) {
6946     SmallVector<Attribute, 2> Attrs;
6947     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6948     for (const Attribute &Attr : Attrs) {
6949       switch (Attr.getKindAsEnum()) {
6950       case Attribute::ReadNone:
6951         State.addKnownBits(NO_ACCESSES);
6952         break;
6953       case Attribute::ReadOnly:
6954         State.addKnownBits(NO_WRITES);
6955         break;
6956       case Attribute::WriteOnly:
6957         State.addKnownBits(NO_READS);
6958         break;
6959       default:
6960         llvm_unreachable("Unexpected attribute!");
6961       }
6962     }
6963 
6964     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
6965       if (!I->mayReadFromMemory())
6966         State.addKnownBits(NO_READS);
6967       if (!I->mayWriteToMemory())
6968         State.addKnownBits(NO_WRITES);
6969     }
6970   }
6971 
6972   /// See AbstractAttribute::getDeducedAttributes(...).
6973   void getDeducedAttributes(LLVMContext &Ctx,
6974                             SmallVectorImpl<Attribute> &Attrs) const override {
6975     assert(Attrs.size() == 0);
6976     if (isAssumedReadNone())
6977       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6978     else if (isAssumedReadOnly())
6979       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
6980     else if (isAssumedWriteOnly())
6981       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
6982     assert(Attrs.size() <= 1);
6983   }
6984 
6985   /// See AbstractAttribute::manifest(...).
6986   ChangeStatus manifest(Attributor &A) override {
6987     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
6988       return ChangeStatus::UNCHANGED;
6989 
6990     const IRPosition &IRP = getIRPosition();
6991 
6992     // Check if we would improve the existing attributes first.
6993     SmallVector<Attribute, 4> DeducedAttrs;
6994     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6995     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6996           return IRP.hasAttr(Attr.getKindAsEnum(),
6997                              /* IgnoreSubsumingPositions */ true);
6998         }))
6999       return ChangeStatus::UNCHANGED;
7000 
7001     // Clear existing attributes.
7002     IRP.removeAttrs(AttrKinds);
7003 
7004     // Use the generic manifest method.
7005     return IRAttribute::manifest(A);
7006   }
7007 
7008   /// See AbstractState::getAsStr().
7009   const std::string getAsStr() const override {
7010     if (isAssumedReadNone())
7011       return "readnone";
7012     if (isAssumedReadOnly())
7013       return "readonly";
7014     if (isAssumedWriteOnly())
7015       return "writeonly";
7016     return "may-read/write";
7017   }
7018 
7019   /// The set of IR attributes AAMemoryBehavior deals with.
7020   static const Attribute::AttrKind AttrKinds[3];
7021 };
7022 
7023 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
7024     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
7025 
7026 /// Memory behavior attribute for a floating value.
7027 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
7028   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
7029       : AAMemoryBehaviorImpl(IRP, A) {}
7030 
7031   /// See AbstractAttribute::updateImpl(...).
7032   ChangeStatus updateImpl(Attributor &A) override;
7033 
7034   /// See AbstractAttribute::trackStatistics()
7035   void trackStatistics() const override {
7036     if (isAssumedReadNone())
7037       STATS_DECLTRACK_FLOATING_ATTR(readnone)
7038     else if (isAssumedReadOnly())
7039       STATS_DECLTRACK_FLOATING_ATTR(readonly)
7040     else if (isAssumedWriteOnly())
7041       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7042   }
7043 
7044 private:
7045   /// Return true if users of \p UserI might access the underlying
7046   /// variable/location described by \p U and should therefore be analyzed.
7047   bool followUsersOfUseIn(Attributor &A, const Use &U,
7048                           const Instruction *UserI);
7049 
7050   /// Update the state according to the effect of use \p U in \p UserI.
7051   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7052 };
7053 
7054 /// Memory behavior attribute for function argument.
7055 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7056   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7057       : AAMemoryBehaviorFloating(IRP, A) {}
7058 
7059   /// See AbstractAttribute::initialize(...).
7060   void initialize(Attributor &A) override {
7061     intersectAssumedBits(BEST_STATE);
7062     const IRPosition &IRP = getIRPosition();
7063     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7064     // can query it when we use has/getAttr. That would allow us to reuse the
7065     // initialize of the base class here.
7066     bool HasByVal =
7067         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7068     getKnownStateFromValue(IRP, getState(),
7069                            /* IgnoreSubsumingPositions */ HasByVal);
7070 
7071     // Initialize the use vector with all direct uses of the associated value.
7072     Argument *Arg = getAssociatedArgument();
7073     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7074       indicatePessimisticFixpoint();
7075   }
7076 
7077   ChangeStatus manifest(Attributor &A) override {
7078     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7079     if (!getAssociatedValue().getType()->isPointerTy())
7080       return ChangeStatus::UNCHANGED;
7081 
7082     // TODO: From readattrs.ll: "inalloca parameters are always
7083     //                           considered written"
7084     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7085       removeKnownBits(NO_WRITES);
7086       removeAssumedBits(NO_WRITES);
7087     }
7088     return AAMemoryBehaviorFloating::manifest(A);
7089   }
7090 
7091   /// See AbstractAttribute::trackStatistics()
7092   void trackStatistics() const override {
7093     if (isAssumedReadNone())
7094       STATS_DECLTRACK_ARG_ATTR(readnone)
7095     else if (isAssumedReadOnly())
7096       STATS_DECLTRACK_ARG_ATTR(readonly)
7097     else if (isAssumedWriteOnly())
7098       STATS_DECLTRACK_ARG_ATTR(writeonly)
7099   }
7100 };
7101 
7102 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7103   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7104       : AAMemoryBehaviorArgument(IRP, A) {}
7105 
7106   /// See AbstractAttribute::initialize(...).
7107   void initialize(Attributor &A) override {
7108     // If we don't have an associated attribute this is either a variadic call
7109     // or an indirect call, either way, nothing to do here.
7110     Argument *Arg = getAssociatedArgument();
7111     if (!Arg) {
7112       indicatePessimisticFixpoint();
7113       return;
7114     }
7115     if (Arg->hasByValAttr()) {
7116       addKnownBits(NO_WRITES);
7117       removeKnownBits(NO_READS);
7118       removeAssumedBits(NO_READS);
7119     }
7120     AAMemoryBehaviorArgument::initialize(A);
7121     if (getAssociatedFunction()->isDeclaration())
7122       indicatePessimisticFixpoint();
7123   }
7124 
7125   /// See AbstractAttribute::updateImpl(...).
7126   ChangeStatus updateImpl(Attributor &A) override {
7127     // TODO: Once we have call site specific value information we can provide
7128     //       call site specific liveness liveness information and then it makes
7129     //       sense to specialize attributes for call sites arguments instead of
7130     //       redirecting requests to the callee argument.
7131     Argument *Arg = getAssociatedArgument();
7132     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7133     auto &ArgAA =
7134         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7135     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7136   }
7137 
7138   /// See AbstractAttribute::trackStatistics()
7139   void trackStatistics() const override {
7140     if (isAssumedReadNone())
7141       STATS_DECLTRACK_CSARG_ATTR(readnone)
7142     else if (isAssumedReadOnly())
7143       STATS_DECLTRACK_CSARG_ATTR(readonly)
7144     else if (isAssumedWriteOnly())
7145       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7146   }
7147 };
7148 
7149 /// Memory behavior attribute for a call site return position.
7150 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7151   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7152       : AAMemoryBehaviorFloating(IRP, A) {}
7153 
7154   /// See AbstractAttribute::initialize(...).
7155   void initialize(Attributor &A) override {
7156     AAMemoryBehaviorImpl::initialize(A);
7157     Function *F = getAssociatedFunction();
7158     if (!F || F->isDeclaration())
7159       indicatePessimisticFixpoint();
7160   }
7161 
7162   /// See AbstractAttribute::manifest(...).
7163   ChangeStatus manifest(Attributor &A) override {
7164     // We do not annotate returned values.
7165     return ChangeStatus::UNCHANGED;
7166   }
7167 
7168   /// See AbstractAttribute::trackStatistics()
7169   void trackStatistics() const override {}
7170 };
7171 
7172 /// An AA to represent the memory behavior function attributes.
7173 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7174   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7175       : AAMemoryBehaviorImpl(IRP, A) {}
7176 
7177   /// See AbstractAttribute::updateImpl(Attributor &A).
7178   virtual ChangeStatus updateImpl(Attributor &A) override;
7179 
7180   /// See AbstractAttribute::manifest(...).
7181   ChangeStatus manifest(Attributor &A) override {
7182     Function &F = cast<Function>(getAnchorValue());
7183     if (isAssumedReadNone()) {
7184       F.removeFnAttr(Attribute::ArgMemOnly);
7185       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7186       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7187     }
7188     return AAMemoryBehaviorImpl::manifest(A);
7189   }
7190 
7191   /// See AbstractAttribute::trackStatistics()
7192   void trackStatistics() const override {
7193     if (isAssumedReadNone())
7194       STATS_DECLTRACK_FN_ATTR(readnone)
7195     else if (isAssumedReadOnly())
7196       STATS_DECLTRACK_FN_ATTR(readonly)
7197     else if (isAssumedWriteOnly())
7198       STATS_DECLTRACK_FN_ATTR(writeonly)
7199   }
7200 };
7201 
7202 /// AAMemoryBehavior attribute for call sites.
7203 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7204   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7205       : AAMemoryBehaviorImpl(IRP, A) {}
7206 
7207   /// See AbstractAttribute::initialize(...).
7208   void initialize(Attributor &A) override {
7209     AAMemoryBehaviorImpl::initialize(A);
7210     Function *F = getAssociatedFunction();
7211     if (!F || F->isDeclaration())
7212       indicatePessimisticFixpoint();
7213   }
7214 
7215   /// See AbstractAttribute::updateImpl(...).
7216   ChangeStatus updateImpl(Attributor &A) override {
7217     // TODO: Once we have call site specific value information we can provide
7218     //       call site specific liveness liveness information and then it makes
7219     //       sense to specialize attributes for call sites arguments instead of
7220     //       redirecting requests to the callee argument.
7221     Function *F = getAssociatedFunction();
7222     const IRPosition &FnPos = IRPosition::function(*F);
7223     auto &FnAA =
7224         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7225     return clampStateAndIndicateChange(getState(), FnAA.getState());
7226   }
7227 
7228   /// See AbstractAttribute::trackStatistics()
7229   void trackStatistics() const override {
7230     if (isAssumedReadNone())
7231       STATS_DECLTRACK_CS_ATTR(readnone)
7232     else if (isAssumedReadOnly())
7233       STATS_DECLTRACK_CS_ATTR(readonly)
7234     else if (isAssumedWriteOnly())
7235       STATS_DECLTRACK_CS_ATTR(writeonly)
7236   }
7237 };
7238 
7239 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7240 
7241   // The current assumed state used to determine a change.
7242   auto AssumedState = getAssumed();
7243 
7244   auto CheckRWInst = [&](Instruction &I) {
7245     // If the instruction has an own memory behavior state, use it to restrict
7246     // the local state. No further analysis is required as the other memory
7247     // state is as optimistic as it gets.
7248     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7249       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7250           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7251       intersectAssumedBits(MemBehaviorAA.getAssumed());
7252       return !isAtFixpoint();
7253     }
7254 
7255     // Remove access kind modifiers if necessary.
7256     if (I.mayReadFromMemory())
7257       removeAssumedBits(NO_READS);
7258     if (I.mayWriteToMemory())
7259       removeAssumedBits(NO_WRITES);
7260     return !isAtFixpoint();
7261   };
7262 
7263   bool UsedAssumedInformation = false;
7264   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7265                                           UsedAssumedInformation))
7266     return indicatePessimisticFixpoint();
7267 
7268   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7269                                         : ChangeStatus::UNCHANGED;
7270 }
7271 
7272 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7273 
7274   const IRPosition &IRP = getIRPosition();
7275   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7276   AAMemoryBehavior::StateType &S = getState();
7277 
7278   // First, check the function scope. We take the known information and we avoid
7279   // work if the assumed information implies the current assumed information for
7280   // this attribute. This is a valid for all but byval arguments.
7281   Argument *Arg = IRP.getAssociatedArgument();
7282   AAMemoryBehavior::base_t FnMemAssumedState =
7283       AAMemoryBehavior::StateType::getWorstState();
7284   if (!Arg || !Arg->hasByValAttr()) {
7285     const auto &FnMemAA =
7286         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7287     FnMemAssumedState = FnMemAA.getAssumed();
7288     S.addKnownBits(FnMemAA.getKnown());
7289     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7290       return ChangeStatus::UNCHANGED;
7291   }
7292 
7293   // The current assumed state used to determine a change.
7294   auto AssumedState = S.getAssumed();
7295 
7296   // Make sure the value is not captured (except through "return"), if
7297   // it is, any information derived would be irrelevant anyway as we cannot
7298   // check the potential aliases introduced by the capture. However, no need
7299   // to fall back to anythign less optimistic than the function state.
7300   const auto &ArgNoCaptureAA =
7301       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7302   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7303     S.intersectAssumedBits(FnMemAssumedState);
7304     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7305                                           : ChangeStatus::UNCHANGED;
7306   }
7307 
7308   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7309   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7310     Instruction *UserI = cast<Instruction>(U.getUser());
7311     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7312                       << " \n");
7313 
7314     // Droppable users, e.g., llvm::assume does not actually perform any action.
7315     if (UserI->isDroppable())
7316       return true;
7317 
7318     // Check if the users of UserI should also be visited.
7319     Follow = followUsersOfUseIn(A, U, UserI);
7320 
7321     // If UserI might touch memory we analyze the use in detail.
7322     if (UserI->mayReadOrWriteMemory())
7323       analyzeUseIn(A, U, UserI);
7324 
7325     return !isAtFixpoint();
7326   };
7327 
7328   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7329     return indicatePessimisticFixpoint();
7330 
7331   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7332                                         : ChangeStatus::UNCHANGED;
7333 }
7334 
7335 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7336                                                   const Instruction *UserI) {
7337   // The loaded value is unrelated to the pointer argument, no need to
7338   // follow the users of the load.
7339   if (isa<LoadInst>(UserI))
7340     return false;
7341 
7342   // By default we follow all uses assuming UserI might leak information on U,
7343   // we have special handling for call sites operands though.
7344   const auto *CB = dyn_cast<CallBase>(UserI);
7345   if (!CB || !CB->isArgOperand(&U))
7346     return true;
7347 
7348   // If the use is a call argument known not to be captured, the users of
7349   // the call do not need to be visited because they have to be unrelated to
7350   // the input. Note that this check is not trivial even though we disallow
7351   // general capturing of the underlying argument. The reason is that the
7352   // call might the argument "through return", which we allow and for which we
7353   // need to check call users.
7354   if (U.get()->getType()->isPointerTy()) {
7355     unsigned ArgNo = CB->getArgOperandNo(&U);
7356     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7357         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7358     return !ArgNoCaptureAA.isAssumedNoCapture();
7359   }
7360 
7361   return true;
7362 }
7363 
7364 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7365                                             const Instruction *UserI) {
7366   assert(UserI->mayReadOrWriteMemory());
7367 
7368   switch (UserI->getOpcode()) {
7369   default:
7370     // TODO: Handle all atomics and other side-effect operations we know of.
7371     break;
7372   case Instruction::Load:
7373     // Loads cause the NO_READS property to disappear.
7374     removeAssumedBits(NO_READS);
7375     return;
7376 
7377   case Instruction::Store:
7378     // Stores cause the NO_WRITES property to disappear if the use is the
7379     // pointer operand. Note that while capturing was taken care of somewhere
7380     // else we need to deal with stores of the value that is not looked through.
7381     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7382       removeAssumedBits(NO_WRITES);
7383     else
7384       indicatePessimisticFixpoint();
7385     return;
7386 
7387   case Instruction::Call:
7388   case Instruction::CallBr:
7389   case Instruction::Invoke: {
7390     // For call sites we look at the argument memory behavior attribute (this
7391     // could be recursive!) in order to restrict our own state.
7392     const auto *CB = cast<CallBase>(UserI);
7393 
7394     // Give up on operand bundles.
7395     if (CB->isBundleOperand(&U)) {
7396       indicatePessimisticFixpoint();
7397       return;
7398     }
7399 
7400     // Calling a function does read the function pointer, maybe write it if the
7401     // function is self-modifying.
7402     if (CB->isCallee(&U)) {
7403       removeAssumedBits(NO_READS);
7404       break;
7405     }
7406 
7407     // Adjust the possible access behavior based on the information on the
7408     // argument.
7409     IRPosition Pos;
7410     if (U.get()->getType()->isPointerTy())
7411       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7412     else
7413       Pos = IRPosition::callsite_function(*CB);
7414     const auto &MemBehaviorAA =
7415         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7416     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7417     // and at least "known".
7418     intersectAssumedBits(MemBehaviorAA.getAssumed());
7419     return;
7420   }
7421   };
7422 
7423   // Generally, look at the "may-properties" and adjust the assumed state if we
7424   // did not trigger special handling before.
7425   if (UserI->mayReadFromMemory())
7426     removeAssumedBits(NO_READS);
7427   if (UserI->mayWriteToMemory())
7428     removeAssumedBits(NO_WRITES);
7429 }
7430 } // namespace
7431 
7432 /// -------------------- Memory Locations Attributes ---------------------------
7433 /// Includes read-none, argmemonly, inaccessiblememonly,
7434 /// inaccessiblememorargmemonly
7435 /// ----------------------------------------------------------------------------
7436 
7437 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7438     AAMemoryLocation::MemoryLocationsKind MLK) {
7439   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7440     return "all memory";
7441   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7442     return "no memory";
7443   std::string S = "memory:";
7444   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7445     S += "stack,";
7446   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7447     S += "constant,";
7448   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7449     S += "internal global,";
7450   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7451     S += "external global,";
7452   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7453     S += "argument,";
7454   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7455     S += "inaccessible,";
7456   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7457     S += "malloced,";
7458   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7459     S += "unknown,";
7460   S.pop_back();
7461   return S;
7462 }
7463 
7464 namespace {
7465 struct AAMemoryLocationImpl : public AAMemoryLocation {
7466 
7467   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7468       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7469     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7470       AccessKind2Accesses[u] = nullptr;
7471   }
7472 
7473   ~AAMemoryLocationImpl() {
7474     // The AccessSets are allocated via a BumpPtrAllocator, we call
7475     // the destructor manually.
7476     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7477       if (AccessKind2Accesses[u])
7478         AccessKind2Accesses[u]->~AccessSet();
7479   }
7480 
7481   /// See AbstractAttribute::initialize(...).
7482   void initialize(Attributor &A) override {
7483     intersectAssumedBits(BEST_STATE);
7484     getKnownStateFromValue(A, getIRPosition(), getState());
7485     AAMemoryLocation::initialize(A);
7486   }
7487 
7488   /// Return the memory behavior information encoded in the IR for \p IRP.
7489   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7490                                      BitIntegerState &State,
7491                                      bool IgnoreSubsumingPositions = false) {
7492     // For internal functions we ignore `argmemonly` and
7493     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7494     // constant propagation. It is unclear if this is the best way but it is
7495     // unlikely this will cause real performance problems. If we are deriving
7496     // attributes for the anchor function we even remove the attribute in
7497     // addition to ignoring it.
7498     bool UseArgMemOnly = true;
7499     Function *AnchorFn = IRP.getAnchorScope();
7500     if (AnchorFn && A.isRunOn(*AnchorFn))
7501       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7502 
7503     SmallVector<Attribute, 2> Attrs;
7504     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7505     for (const Attribute &Attr : Attrs) {
7506       switch (Attr.getKindAsEnum()) {
7507       case Attribute::ReadNone:
7508         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7509         break;
7510       case Attribute::InaccessibleMemOnly:
7511         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7512         break;
7513       case Attribute::ArgMemOnly:
7514         if (UseArgMemOnly)
7515           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7516         else
7517           IRP.removeAttrs({Attribute::ArgMemOnly});
7518         break;
7519       case Attribute::InaccessibleMemOrArgMemOnly:
7520         if (UseArgMemOnly)
7521           State.addKnownBits(inverseLocation(
7522               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7523         else
7524           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7525         break;
7526       default:
7527         llvm_unreachable("Unexpected attribute!");
7528       }
7529     }
7530   }
7531 
7532   /// See AbstractAttribute::getDeducedAttributes(...).
7533   void getDeducedAttributes(LLVMContext &Ctx,
7534                             SmallVectorImpl<Attribute> &Attrs) const override {
7535     assert(Attrs.size() == 0);
7536     if (isAssumedReadNone()) {
7537       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7538     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7539       if (isAssumedInaccessibleMemOnly())
7540         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7541       else if (isAssumedArgMemOnly())
7542         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7543       else if (isAssumedInaccessibleOrArgMemOnly())
7544         Attrs.push_back(
7545             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7546     }
7547     assert(Attrs.size() <= 1);
7548   }
7549 
7550   /// See AbstractAttribute::manifest(...).
7551   ChangeStatus manifest(Attributor &A) override {
7552     const IRPosition &IRP = getIRPosition();
7553 
7554     // Check if we would improve the existing attributes first.
7555     SmallVector<Attribute, 4> DeducedAttrs;
7556     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7557     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7558           return IRP.hasAttr(Attr.getKindAsEnum(),
7559                              /* IgnoreSubsumingPositions */ true);
7560         }))
7561       return ChangeStatus::UNCHANGED;
7562 
7563     // Clear existing attributes.
7564     IRP.removeAttrs(AttrKinds);
7565     if (isAssumedReadNone())
7566       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7567 
7568     // Use the generic manifest method.
7569     return IRAttribute::manifest(A);
7570   }
7571 
7572   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7573   bool checkForAllAccessesToMemoryKind(
7574       function_ref<bool(const Instruction *, const Value *, AccessKind,
7575                         MemoryLocationsKind)>
7576           Pred,
7577       MemoryLocationsKind RequestedMLK) const override {
7578     if (!isValidState())
7579       return false;
7580 
7581     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7582     if (AssumedMLK == NO_LOCATIONS)
7583       return true;
7584 
7585     unsigned Idx = 0;
7586     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7587          CurMLK *= 2, ++Idx) {
7588       if (CurMLK & RequestedMLK)
7589         continue;
7590 
7591       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7592         for (const AccessInfo &AI : *Accesses)
7593           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7594             return false;
7595     }
7596 
7597     return true;
7598   }
7599 
7600   ChangeStatus indicatePessimisticFixpoint() override {
7601     // If we give up and indicate a pessimistic fixpoint this instruction will
7602     // become an access for all potential access kinds:
7603     // TODO: Add pointers for argmemonly and globals to improve the results of
7604     //       checkForAllAccessesToMemoryKind.
7605     bool Changed = false;
7606     MemoryLocationsKind KnownMLK = getKnown();
7607     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7608     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7609       if (!(CurMLK & KnownMLK))
7610         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7611                                   getAccessKindFromInst(I));
7612     return AAMemoryLocation::indicatePessimisticFixpoint();
7613   }
7614 
7615 protected:
7616   /// Helper struct to tie together an instruction that has a read or write
7617   /// effect with the pointer it accesses (if any).
7618   struct AccessInfo {
7619 
7620     /// The instruction that caused the access.
7621     const Instruction *I;
7622 
7623     /// The base pointer that is accessed, or null if unknown.
7624     const Value *Ptr;
7625 
7626     /// The kind of access (read/write/read+write).
7627     AccessKind Kind;
7628 
7629     bool operator==(const AccessInfo &RHS) const {
7630       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7631     }
7632     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7633       if (LHS.I != RHS.I)
7634         return LHS.I < RHS.I;
7635       if (LHS.Ptr != RHS.Ptr)
7636         return LHS.Ptr < RHS.Ptr;
7637       if (LHS.Kind != RHS.Kind)
7638         return LHS.Kind < RHS.Kind;
7639       return false;
7640     }
7641   };
7642 
7643   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7644   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7645   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7646   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7647 
7648   /// Categorize the pointer arguments of CB that might access memory in
7649   /// AccessedLoc and update the state and access map accordingly.
7650   void
7651   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7652                                      AAMemoryLocation::StateType &AccessedLocs,
7653                                      bool &Changed);
7654 
7655   /// Return the kind(s) of location that may be accessed by \p V.
7656   AAMemoryLocation::MemoryLocationsKind
7657   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7658 
7659   /// Return the access kind as determined by \p I.
7660   AccessKind getAccessKindFromInst(const Instruction *I) {
7661     AccessKind AK = READ_WRITE;
7662     if (I) {
7663       AK = I->mayReadFromMemory() ? READ : NONE;
7664       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7665     }
7666     return AK;
7667   }
7668 
7669   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7670   /// an access of kind \p AK to a \p MLK memory location with the access
7671   /// pointer \p Ptr.
7672   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7673                                  MemoryLocationsKind MLK, const Instruction *I,
7674                                  const Value *Ptr, bool &Changed,
7675                                  AccessKind AK = READ_WRITE) {
7676 
7677     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7678     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7679     if (!Accesses)
7680       Accesses = new (Allocator) AccessSet();
7681     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7682     State.removeAssumedBits(MLK);
7683   }
7684 
7685   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7686   /// arguments, and update the state and access map accordingly.
7687   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7688                           AAMemoryLocation::StateType &State, bool &Changed);
7689 
7690   /// Used to allocate access sets.
7691   BumpPtrAllocator &Allocator;
7692 
7693   /// The set of IR attributes AAMemoryLocation deals with.
7694   static const Attribute::AttrKind AttrKinds[4];
7695 };
7696 
7697 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7698     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7699     Attribute::InaccessibleMemOrArgMemOnly};
7700 
7701 void AAMemoryLocationImpl::categorizePtrValue(
7702     Attributor &A, const Instruction &I, const Value &Ptr,
7703     AAMemoryLocation::StateType &State, bool &Changed) {
7704   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7705                     << Ptr << " ["
7706                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
7707 
7708   SmallSetVector<Value *, 8> Objects;
7709   bool UsedAssumedInformation = false;
7710   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I,
7711                                        UsedAssumedInformation,
7712                                        AA::Intraprocedural)) {
7713     LLVM_DEBUG(
7714         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
7715     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
7716                               getAccessKindFromInst(&I));
7717     return;
7718   }
7719 
7720   for (Value *Obj : Objects) {
7721     // TODO: recognize the TBAA used for constant accesses.
7722     MemoryLocationsKind MLK = NO_LOCATIONS;
7723     if (isa<UndefValue>(Obj))
7724       continue;
7725     if (isa<Argument>(Obj)) {
7726       // TODO: For now we do not treat byval arguments as local copies performed
7727       // on the call edge, though, we should. To make that happen we need to
7728       // teach various passes, e.g., DSE, about the copy effect of a byval. That
7729       // would also allow us to mark functions only accessing byval arguments as
7730       // readnone again, atguably their acceses have no effect outside of the
7731       // function, like accesses to allocas.
7732       MLK = NO_ARGUMENT_MEM;
7733     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
7734       // Reading constant memory is not treated as a read "effect" by the
7735       // function attr pass so we won't neither. Constants defined by TBAA are
7736       // similar. (We know we do not write it because it is constant.)
7737       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
7738         if (GVar->isConstant())
7739           continue;
7740 
7741       if (GV->hasLocalLinkage())
7742         MLK = NO_GLOBAL_INTERNAL_MEM;
7743       else
7744         MLK = NO_GLOBAL_EXTERNAL_MEM;
7745     } else if (isa<ConstantPointerNull>(Obj) &&
7746                !NullPointerIsDefined(getAssociatedFunction(),
7747                                      Ptr.getType()->getPointerAddressSpace())) {
7748       continue;
7749     } else if (isa<AllocaInst>(Obj)) {
7750       MLK = NO_LOCAL_MEM;
7751     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
7752       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
7753           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
7754       if (NoAliasAA.isAssumedNoAlias())
7755         MLK = NO_MALLOCED_MEM;
7756       else
7757         MLK = NO_UNKOWN_MEM;
7758     } else {
7759       MLK = NO_UNKOWN_MEM;
7760     }
7761 
7762     assert(MLK != NO_LOCATIONS && "No location specified!");
7763     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
7764                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
7765                       << "\n");
7766     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
7767                               getAccessKindFromInst(&I));
7768   }
7769 
7770   LLVM_DEBUG(
7771       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
7772              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
7773 }
7774 
7775 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
7776     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
7777     bool &Changed) {
7778   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
7779 
7780     // Skip non-pointer arguments.
7781     const Value *ArgOp = CB.getArgOperand(ArgNo);
7782     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
7783       continue;
7784 
7785     // Skip readnone arguments.
7786     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
7787     const auto &ArgOpMemLocationAA =
7788         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
7789 
7790     if (ArgOpMemLocationAA.isAssumedReadNone())
7791       continue;
7792 
7793     // Categorize potentially accessed pointer arguments as if there was an
7794     // access instruction with them as pointer.
7795     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
7796   }
7797 }
7798 
7799 AAMemoryLocation::MemoryLocationsKind
7800 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
7801                                                   bool &Changed) {
7802   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
7803                     << I << "\n");
7804 
7805   AAMemoryLocation::StateType AccessedLocs;
7806   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
7807 
7808   if (auto *CB = dyn_cast<CallBase>(&I)) {
7809 
7810     // First check if we assume any memory is access is visible.
7811     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
7812         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
7813     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
7814                       << " [" << CBMemLocationAA << "]\n");
7815 
7816     if (CBMemLocationAA.isAssumedReadNone())
7817       return NO_LOCATIONS;
7818 
7819     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
7820       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
7821                                 Changed, getAccessKindFromInst(&I));
7822       return AccessedLocs.getAssumed();
7823     }
7824 
7825     uint32_t CBAssumedNotAccessedLocs =
7826         CBMemLocationAA.getAssumedNotAccessedLocation();
7827 
7828     // Set the argmemonly and global bit as we handle them separately below.
7829     uint32_t CBAssumedNotAccessedLocsNoArgMem =
7830         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
7831 
7832     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
7833       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
7834         continue;
7835       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
7836                                 getAccessKindFromInst(&I));
7837     }
7838 
7839     // Now handle global memory if it might be accessed. This is slightly tricky
7840     // as NO_GLOBAL_MEM has multiple bits set.
7841     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
7842     if (HasGlobalAccesses) {
7843       auto AccessPred = [&](const Instruction *, const Value *Ptr,
7844                             AccessKind Kind, MemoryLocationsKind MLK) {
7845         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
7846                                   getAccessKindFromInst(&I));
7847         return true;
7848       };
7849       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
7850               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
7851         return AccessedLocs.getWorstState();
7852     }
7853 
7854     LLVM_DEBUG(
7855         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
7856                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7857 
7858     // Now handle argument memory if it might be accessed.
7859     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
7860     if (HasArgAccesses)
7861       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
7862 
7863     LLVM_DEBUG(
7864         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
7865                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7866 
7867     return AccessedLocs.getAssumed();
7868   }
7869 
7870   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
7871     LLVM_DEBUG(
7872         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
7873                << I << " [" << *Ptr << "]\n");
7874     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
7875     return AccessedLocs.getAssumed();
7876   }
7877 
7878   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
7879                     << I << "\n");
7880   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
7881                             getAccessKindFromInst(&I));
7882   return AccessedLocs.getAssumed();
7883 }
7884 
7885 /// An AA to represent the memory behavior function attributes.
7886 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
7887   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
7888       : AAMemoryLocationImpl(IRP, A) {}
7889 
7890   /// See AbstractAttribute::updateImpl(Attributor &A).
7891   virtual ChangeStatus updateImpl(Attributor &A) override {
7892 
7893     const auto &MemBehaviorAA =
7894         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
7895     if (MemBehaviorAA.isAssumedReadNone()) {
7896       if (MemBehaviorAA.isKnownReadNone())
7897         return indicateOptimisticFixpoint();
7898       assert(isAssumedReadNone() &&
7899              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
7900       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
7901       return ChangeStatus::UNCHANGED;
7902     }
7903 
7904     // The current assumed state used to determine a change.
7905     auto AssumedState = getAssumed();
7906     bool Changed = false;
7907 
7908     auto CheckRWInst = [&](Instruction &I) {
7909       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
7910       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
7911                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
7912       removeAssumedBits(inverseLocation(MLK, false, false));
7913       // Stop once only the valid bit set in the *not assumed location*, thus
7914       // once we don't actually exclude any memory locations in the state.
7915       return getAssumedNotAccessedLocation() != VALID_STATE;
7916     };
7917 
7918     bool UsedAssumedInformation = false;
7919     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7920                                             UsedAssumedInformation))
7921       return indicatePessimisticFixpoint();
7922 
7923     Changed |= AssumedState != getAssumed();
7924     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7925   }
7926 
7927   /// See AbstractAttribute::trackStatistics()
7928   void trackStatistics() const override {
7929     if (isAssumedReadNone())
7930       STATS_DECLTRACK_FN_ATTR(readnone)
7931     else if (isAssumedArgMemOnly())
7932       STATS_DECLTRACK_FN_ATTR(argmemonly)
7933     else if (isAssumedInaccessibleMemOnly())
7934       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
7935     else if (isAssumedInaccessibleOrArgMemOnly())
7936       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
7937   }
7938 };
7939 
7940 /// AAMemoryLocation attribute for call sites.
7941 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
7942   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
7943       : AAMemoryLocationImpl(IRP, A) {}
7944 
7945   /// See AbstractAttribute::initialize(...).
7946   void initialize(Attributor &A) override {
7947     AAMemoryLocationImpl::initialize(A);
7948     Function *F = getAssociatedFunction();
7949     if (!F || F->isDeclaration())
7950       indicatePessimisticFixpoint();
7951   }
7952 
7953   /// See AbstractAttribute::updateImpl(...).
7954   ChangeStatus updateImpl(Attributor &A) override {
7955     // TODO: Once we have call site specific value information we can provide
7956     //       call site specific liveness liveness information and then it makes
7957     //       sense to specialize attributes for call sites arguments instead of
7958     //       redirecting requests to the callee argument.
7959     Function *F = getAssociatedFunction();
7960     const IRPosition &FnPos = IRPosition::function(*F);
7961     auto &FnAA =
7962         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
7963     bool Changed = false;
7964     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
7965                           AccessKind Kind, MemoryLocationsKind MLK) {
7966       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
7967                                 getAccessKindFromInst(I));
7968       return true;
7969     };
7970     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
7971       return indicatePessimisticFixpoint();
7972     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7973   }
7974 
7975   /// See AbstractAttribute::trackStatistics()
7976   void trackStatistics() const override {
7977     if (isAssumedReadNone())
7978       STATS_DECLTRACK_CS_ATTR(readnone)
7979   }
7980 };
7981 } // namespace
7982 
7983 /// ------------------ Value Constant Range Attribute -------------------------
7984 
7985 namespace {
7986 struct AAValueConstantRangeImpl : AAValueConstantRange {
7987   using StateType = IntegerRangeState;
7988   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
7989       : AAValueConstantRange(IRP, A) {}
7990 
7991   /// See AbstractAttribute::initialize(..).
7992   void initialize(Attributor &A) override {
7993     if (A.hasSimplificationCallback(getIRPosition())) {
7994       indicatePessimisticFixpoint();
7995       return;
7996     }
7997 
7998     // Intersect a range given by SCEV.
7999     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
8000 
8001     // Intersect a range given by LVI.
8002     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
8003   }
8004 
8005   /// See AbstractAttribute::getAsStr().
8006   const std::string getAsStr() const override {
8007     std::string Str;
8008     llvm::raw_string_ostream OS(Str);
8009     OS << "range(" << getBitWidth() << ")<";
8010     getKnown().print(OS);
8011     OS << " / ";
8012     getAssumed().print(OS);
8013     OS << ">";
8014     return OS.str();
8015   }
8016 
8017   /// Helper function to get a SCEV expr for the associated value at program
8018   /// point \p I.
8019   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
8020     if (!getAnchorScope())
8021       return nullptr;
8022 
8023     ScalarEvolution *SE =
8024         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8025             *getAnchorScope());
8026 
8027     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
8028         *getAnchorScope());
8029 
8030     if (!SE || !LI)
8031       return nullptr;
8032 
8033     const SCEV *S = SE->getSCEV(&getAssociatedValue());
8034     if (!I)
8035       return S;
8036 
8037     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
8038   }
8039 
8040   /// Helper function to get a range from SCEV for the associated value at
8041   /// program point \p I.
8042   ConstantRange getConstantRangeFromSCEV(Attributor &A,
8043                                          const Instruction *I = nullptr) const {
8044     if (!getAnchorScope())
8045       return getWorstState(getBitWidth());
8046 
8047     ScalarEvolution *SE =
8048         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8049             *getAnchorScope());
8050 
8051     const SCEV *S = getSCEV(A, I);
8052     if (!SE || !S)
8053       return getWorstState(getBitWidth());
8054 
8055     return SE->getUnsignedRange(S);
8056   }
8057 
8058   /// Helper function to get a range from LVI for the associated value at
8059   /// program point \p I.
8060   ConstantRange
8061   getConstantRangeFromLVI(Attributor &A,
8062                           const Instruction *CtxI = nullptr) const {
8063     if (!getAnchorScope())
8064       return getWorstState(getBitWidth());
8065 
8066     LazyValueInfo *LVI =
8067         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8068             *getAnchorScope());
8069 
8070     if (!LVI || !CtxI)
8071       return getWorstState(getBitWidth());
8072     return LVI->getConstantRange(&getAssociatedValue(),
8073                                  const_cast<Instruction *>(CtxI));
8074   }
8075 
8076   /// Return true if \p CtxI is valid for querying outside analyses.
8077   /// This basically makes sure we do not ask intra-procedural analysis
8078   /// about a context in the wrong function or a context that violates
8079   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8080   /// if the original context of this AA is OK or should be considered invalid.
8081   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8082                                                const Instruction *CtxI,
8083                                                bool AllowAACtxI) const {
8084     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8085       return false;
8086 
8087     // Our context might be in a different function, neither intra-procedural
8088     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8089     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8090       return false;
8091 
8092     // If the context is not dominated by the value there are paths to the
8093     // context that do not define the value. This cannot be handled by
8094     // LazyValueInfo so we need to bail.
8095     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8096       InformationCache &InfoCache = A.getInfoCache();
8097       const DominatorTree *DT =
8098           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8099               *I->getFunction());
8100       return DT && DT->dominates(I, CtxI);
8101     }
8102 
8103     return true;
8104   }
8105 
8106   /// See AAValueConstantRange::getKnownConstantRange(..).
8107   ConstantRange
8108   getKnownConstantRange(Attributor &A,
8109                         const Instruction *CtxI = nullptr) const override {
8110     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8111                                                  /* AllowAACtxI */ false))
8112       return getKnown();
8113 
8114     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8115     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8116     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8117   }
8118 
8119   /// See AAValueConstantRange::getAssumedConstantRange(..).
8120   ConstantRange
8121   getAssumedConstantRange(Attributor &A,
8122                           const Instruction *CtxI = nullptr) const override {
8123     // TODO: Make SCEV use Attributor assumption.
8124     //       We may be able to bound a variable range via assumptions in
8125     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8126     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8127     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8128                                                  /* AllowAACtxI */ false))
8129       return getAssumed();
8130 
8131     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8132     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8133     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8134   }
8135 
8136   /// Helper function to create MDNode for range metadata.
8137   static MDNode *
8138   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8139                             const ConstantRange &AssumedConstantRange) {
8140     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8141                                   Ty, AssumedConstantRange.getLower())),
8142                               ConstantAsMetadata::get(ConstantInt::get(
8143                                   Ty, AssumedConstantRange.getUpper()))};
8144     return MDNode::get(Ctx, LowAndHigh);
8145   }
8146 
8147   /// Return true if \p Assumed is included in \p KnownRanges.
8148   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8149 
8150     if (Assumed.isFullSet())
8151       return false;
8152 
8153     if (!KnownRanges)
8154       return true;
8155 
8156     // If multiple ranges are annotated in IR, we give up to annotate assumed
8157     // range for now.
8158 
8159     // TODO:  If there exists a known range which containts assumed range, we
8160     // can say assumed range is better.
8161     if (KnownRanges->getNumOperands() > 2)
8162       return false;
8163 
8164     ConstantInt *Lower =
8165         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8166     ConstantInt *Upper =
8167         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8168 
8169     ConstantRange Known(Lower->getValue(), Upper->getValue());
8170     return Known.contains(Assumed) && Known != Assumed;
8171   }
8172 
8173   /// Helper function to set range metadata.
8174   static bool
8175   setRangeMetadataIfisBetterRange(Instruction *I,
8176                                   const ConstantRange &AssumedConstantRange) {
8177     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8178     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8179       if (!AssumedConstantRange.isEmptySet()) {
8180         I->setMetadata(LLVMContext::MD_range,
8181                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8182                                                  AssumedConstantRange));
8183         return true;
8184       }
8185     }
8186     return false;
8187   }
8188 
8189   /// See AbstractAttribute::manifest()
8190   ChangeStatus manifest(Attributor &A) override {
8191     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8192     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8193     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8194 
8195     auto &V = getAssociatedValue();
8196     if (!AssumedConstantRange.isEmptySet() &&
8197         !AssumedConstantRange.isSingleElement()) {
8198       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8199         assert(I == getCtxI() && "Should not annotate an instruction which is "
8200                                  "not the context instruction");
8201         if (isa<CallInst>(I) || isa<LoadInst>(I))
8202           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8203             Changed = ChangeStatus::CHANGED;
8204       }
8205     }
8206 
8207     return Changed;
8208   }
8209 };
8210 
8211 struct AAValueConstantRangeArgument final
8212     : AAArgumentFromCallSiteArguments<
8213           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8214           true /* BridgeCallBaseContext */> {
8215   using Base = AAArgumentFromCallSiteArguments<
8216       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8217       true /* BridgeCallBaseContext */>;
8218   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8219       : Base(IRP, A) {}
8220 
8221   /// See AbstractAttribute::initialize(..).
8222   void initialize(Attributor &A) override {
8223     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8224       indicatePessimisticFixpoint();
8225     } else {
8226       Base::initialize(A);
8227     }
8228   }
8229 
8230   /// See AbstractAttribute::trackStatistics()
8231   void trackStatistics() const override {
8232     STATS_DECLTRACK_ARG_ATTR(value_range)
8233   }
8234 };
8235 
8236 struct AAValueConstantRangeReturned
8237     : AAReturnedFromReturnedValues<AAValueConstantRange,
8238                                    AAValueConstantRangeImpl,
8239                                    AAValueConstantRangeImpl::StateType,
8240                                    /* PropogateCallBaseContext */ true> {
8241   using Base =
8242       AAReturnedFromReturnedValues<AAValueConstantRange,
8243                                    AAValueConstantRangeImpl,
8244                                    AAValueConstantRangeImpl::StateType,
8245                                    /* PropogateCallBaseContext */ true>;
8246   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8247       : Base(IRP, A) {}
8248 
8249   /// See AbstractAttribute::initialize(...).
8250   void initialize(Attributor &A) override {}
8251 
8252   /// See AbstractAttribute::trackStatistics()
8253   void trackStatistics() const override {
8254     STATS_DECLTRACK_FNRET_ATTR(value_range)
8255   }
8256 };
8257 
8258 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8259   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8260       : AAValueConstantRangeImpl(IRP, A) {}
8261 
8262   /// See AbstractAttribute::initialize(...).
8263   void initialize(Attributor &A) override {
8264     AAValueConstantRangeImpl::initialize(A);
8265     if (isAtFixpoint())
8266       return;
8267 
8268     Value &V = getAssociatedValue();
8269 
8270     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8271       unionAssumed(ConstantRange(C->getValue()));
8272       indicateOptimisticFixpoint();
8273       return;
8274     }
8275 
8276     if (isa<UndefValue>(&V)) {
8277       // Collapse the undef state to 0.
8278       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8279       indicateOptimisticFixpoint();
8280       return;
8281     }
8282 
8283     if (isa<CallBase>(&V))
8284       return;
8285 
8286     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8287       return;
8288 
8289     // If it is a load instruction with range metadata, use it.
8290     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8291       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8292         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8293         return;
8294       }
8295 
8296     // We can work with PHI and select instruction as we traverse their operands
8297     // during update.
8298     if (isa<SelectInst>(V) || isa<PHINode>(V))
8299       return;
8300 
8301     // Otherwise we give up.
8302     indicatePessimisticFixpoint();
8303 
8304     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8305                       << getAssociatedValue() << "\n");
8306   }
8307 
8308   bool calculateBinaryOperator(
8309       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8310       const Instruction *CtxI,
8311       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8312     Value *LHS = BinOp->getOperand(0);
8313     Value *RHS = BinOp->getOperand(1);
8314 
8315     // Simplify the operands first.
8316     bool UsedAssumedInformation = false;
8317     const auto &SimplifiedLHS = A.getAssumedSimplified(
8318         IRPosition::value(*LHS, getCallBaseContext()), *this,
8319         UsedAssumedInformation, AA::Interprocedural);
8320     if (!SimplifiedLHS.hasValue())
8321       return true;
8322     if (!SimplifiedLHS.getValue())
8323       return false;
8324     LHS = *SimplifiedLHS;
8325 
8326     const auto &SimplifiedRHS = A.getAssumedSimplified(
8327         IRPosition::value(*RHS, getCallBaseContext()), *this,
8328         UsedAssumedInformation, AA::Interprocedural);
8329     if (!SimplifiedRHS.hasValue())
8330       return true;
8331     if (!SimplifiedRHS.getValue())
8332       return false;
8333     RHS = *SimplifiedRHS;
8334 
8335     // TODO: Allow non integers as well.
8336     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8337       return false;
8338 
8339     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8340         *this, IRPosition::value(*LHS, getCallBaseContext()),
8341         DepClassTy::REQUIRED);
8342     QuerriedAAs.push_back(&LHSAA);
8343     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8344 
8345     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8346         *this, IRPosition::value(*RHS, getCallBaseContext()),
8347         DepClassTy::REQUIRED);
8348     QuerriedAAs.push_back(&RHSAA);
8349     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8350 
8351     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8352 
8353     T.unionAssumed(AssumedRange);
8354 
8355     // TODO: Track a known state too.
8356 
8357     return T.isValidState();
8358   }
8359 
8360   bool calculateCastInst(
8361       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8362       const Instruction *CtxI,
8363       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8364     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8365     // TODO: Allow non integers as well.
8366     Value *OpV = CastI->getOperand(0);
8367 
8368     // Simplify the operand first.
8369     bool UsedAssumedInformation = false;
8370     const auto &SimplifiedOpV = A.getAssumedSimplified(
8371         IRPosition::value(*OpV, getCallBaseContext()), *this,
8372         UsedAssumedInformation, AA::Interprocedural);
8373     if (!SimplifiedOpV.hasValue())
8374       return true;
8375     if (!SimplifiedOpV.getValue())
8376       return false;
8377     OpV = *SimplifiedOpV;
8378 
8379     if (!OpV->getType()->isIntegerTy())
8380       return false;
8381 
8382     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8383         *this, IRPosition::value(*OpV, getCallBaseContext()),
8384         DepClassTy::REQUIRED);
8385     QuerriedAAs.push_back(&OpAA);
8386     T.unionAssumed(
8387         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8388     return T.isValidState();
8389   }
8390 
8391   bool
8392   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8393                    const Instruction *CtxI,
8394                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8395     Value *LHS = CmpI->getOperand(0);
8396     Value *RHS = CmpI->getOperand(1);
8397 
8398     // Simplify the operands first.
8399     bool UsedAssumedInformation = false;
8400     const auto &SimplifiedLHS = A.getAssumedSimplified(
8401         IRPosition::value(*LHS, getCallBaseContext()), *this,
8402         UsedAssumedInformation, AA::Interprocedural);
8403     if (!SimplifiedLHS.hasValue())
8404       return true;
8405     if (!SimplifiedLHS.getValue())
8406       return false;
8407     LHS = *SimplifiedLHS;
8408 
8409     const auto &SimplifiedRHS = A.getAssumedSimplified(
8410         IRPosition::value(*RHS, getCallBaseContext()), *this,
8411         UsedAssumedInformation, AA::Interprocedural);
8412     if (!SimplifiedRHS.hasValue())
8413       return true;
8414     if (!SimplifiedRHS.getValue())
8415       return false;
8416     RHS = *SimplifiedRHS;
8417 
8418     // TODO: Allow non integers as well.
8419     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8420       return false;
8421 
8422     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8423         *this, IRPosition::value(*LHS, getCallBaseContext()),
8424         DepClassTy::REQUIRED);
8425     QuerriedAAs.push_back(&LHSAA);
8426     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8427         *this, IRPosition::value(*RHS, getCallBaseContext()),
8428         DepClassTy::REQUIRED);
8429     QuerriedAAs.push_back(&RHSAA);
8430     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8431     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8432 
8433     // If one of them is empty set, we can't decide.
8434     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8435       return true;
8436 
8437     bool MustTrue = false, MustFalse = false;
8438 
8439     auto AllowedRegion =
8440         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8441 
8442     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8443       MustFalse = true;
8444 
8445     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8446       MustTrue = true;
8447 
8448     assert((!MustTrue || !MustFalse) &&
8449            "Either MustTrue or MustFalse should be false!");
8450 
8451     if (MustTrue)
8452       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8453     else if (MustFalse)
8454       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8455     else
8456       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8457 
8458     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8459                       << " " << RHSAA << "\n");
8460 
8461     // TODO: Track a known state too.
8462     return T.isValidState();
8463   }
8464 
8465   /// See AbstractAttribute::updateImpl(...).
8466   ChangeStatus updateImpl(Attributor &A) override {
8467 
8468     IntegerRangeState T(getBitWidth());
8469     auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
8470       Instruction *I = dyn_cast<Instruction>(&V);
8471       if (!I || isa<CallBase>(I)) {
8472 
8473         // Simplify the operand first.
8474         bool UsedAssumedInformation = false;
8475         const auto &SimplifiedOpV = A.getAssumedSimplified(
8476             IRPosition::value(V, getCallBaseContext()), *this,
8477             UsedAssumedInformation, AA::Interprocedural);
8478         if (!SimplifiedOpV.hasValue())
8479           return true;
8480         if (!SimplifiedOpV.getValue())
8481           return false;
8482         Value *VPtr = *SimplifiedOpV;
8483 
8484         // If the value is not instruction, we query AA to Attributor.
8485         const auto &AA = A.getAAFor<AAValueConstantRange>(
8486             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8487             DepClassTy::REQUIRED);
8488 
8489         // Clamp operator is not used to utilize a program point CtxI.
8490         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8491 
8492         return T.isValidState();
8493       }
8494 
8495       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8496       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8497         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8498           return false;
8499       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8500         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8501           return false;
8502       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8503         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8504           return false;
8505       } else {
8506         // Give up with other instructions.
8507         // TODO: Add other instructions
8508 
8509         T.indicatePessimisticFixpoint();
8510         return false;
8511       }
8512 
8513       // Catch circular reasoning in a pessimistic way for now.
8514       // TODO: Check how the range evolves and if we stripped anything, see also
8515       //       AADereferenceable or AAAlign for similar situations.
8516       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8517         if (QueriedAA != this)
8518           continue;
8519         // If we are in a stady state we do not need to worry.
8520         if (T.getAssumed() == getState().getAssumed())
8521           continue;
8522         T.indicatePessimisticFixpoint();
8523       }
8524 
8525       return T.isValidState();
8526     };
8527 
8528     if (!VisitValueCB(getAssociatedValue(), getCtxI()))
8529       return indicatePessimisticFixpoint();
8530 
8531     // Ensure that long def-use chains can't cause circular reasoning either by
8532     // introducing a cutoff below.
8533     if (clampStateAndIndicateChange(getState(), T) == ChangeStatus::UNCHANGED)
8534       return ChangeStatus::UNCHANGED;
8535     if (++NumChanges > MaxNumChanges) {
8536       LLVM_DEBUG(dbgs() << "[AAValueConstantRange] performed " << NumChanges
8537                         << " but only " << MaxNumChanges
8538                         << " are allowed to avoid cyclic reasoning.");
8539       return indicatePessimisticFixpoint();
8540     }
8541     return ChangeStatus::CHANGED;
8542   }
8543 
8544   /// See AbstractAttribute::trackStatistics()
8545   void trackStatistics() const override {
8546     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8547   }
8548 
8549   /// Tracker to bail after too many widening steps of the constant range.
8550   int NumChanges = 0;
8551 
8552   /// Upper bound for the number of allowed changes (=widening steps) for the
8553   /// constant range before we give up.
8554   static constexpr int MaxNumChanges = 5;
8555 };
8556 
8557 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8558   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8559       : AAValueConstantRangeImpl(IRP, A) {}
8560 
8561   /// See AbstractAttribute::initialize(...).
8562   ChangeStatus updateImpl(Attributor &A) override {
8563     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8564                      "not be called");
8565   }
8566 
8567   /// See AbstractAttribute::trackStatistics()
8568   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8569 };
8570 
8571 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8572   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8573       : AAValueConstantRangeFunction(IRP, A) {}
8574 
8575   /// See AbstractAttribute::trackStatistics()
8576   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8577 };
8578 
8579 struct AAValueConstantRangeCallSiteReturned
8580     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8581                                      AAValueConstantRangeImpl,
8582                                      AAValueConstantRangeImpl::StateType,
8583                                      /* IntroduceCallBaseContext */ true> {
8584   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8585       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8586                                        AAValueConstantRangeImpl,
8587                                        AAValueConstantRangeImpl::StateType,
8588                                        /* IntroduceCallBaseContext */ true>(IRP,
8589                                                                             A) {
8590   }
8591 
8592   /// See AbstractAttribute::initialize(...).
8593   void initialize(Attributor &A) override {
8594     // If it is a load instruction with range metadata, use the metadata.
8595     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8596       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8597         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8598 
8599     AAValueConstantRangeImpl::initialize(A);
8600   }
8601 
8602   /// See AbstractAttribute::trackStatistics()
8603   void trackStatistics() const override {
8604     STATS_DECLTRACK_CSRET_ATTR(value_range)
8605   }
8606 };
8607 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8608   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8609       : AAValueConstantRangeFloating(IRP, A) {}
8610 
8611   /// See AbstractAttribute::manifest()
8612   ChangeStatus manifest(Attributor &A) override {
8613     return ChangeStatus::UNCHANGED;
8614   }
8615 
8616   /// See AbstractAttribute::trackStatistics()
8617   void trackStatistics() const override {
8618     STATS_DECLTRACK_CSARG_ATTR(value_range)
8619   }
8620 };
8621 } // namespace
8622 
8623 /// ------------------ Potential Values Attribute -------------------------
8624 
8625 namespace {
8626 struct AAPotentialConstantValuesImpl : AAPotentialConstantValues {
8627   using StateType = PotentialConstantIntValuesState;
8628 
8629   AAPotentialConstantValuesImpl(const IRPosition &IRP, Attributor &A)
8630       : AAPotentialConstantValues(IRP, A) {}
8631 
8632   /// See AbstractAttribute::initialize(..).
8633   void initialize(Attributor &A) override {
8634     if (A.hasSimplificationCallback(getIRPosition()))
8635       indicatePessimisticFixpoint();
8636     else
8637       AAPotentialConstantValues::initialize(A);
8638   }
8639 
8640   bool fillSetWithConstantValues(Attributor &A, const IRPosition &IRP, SetTy &S,
8641                                  bool &ContainsUndef) {
8642     SmallVector<AA::ValueAndContext> Values;
8643     bool UsedAssumedInformation = false;
8644     if (!A.getAssumedSimplifiedValues(IRP, *this, Values, AA::Interprocedural,
8645                                       UsedAssumedInformation)) {
8646       if (!IRP.getAssociatedType()->isIntegerTy())
8647         return false;
8648       auto &PotentialValuesAA = A.getAAFor<AAPotentialConstantValues>(
8649           *this, IRP, DepClassTy::REQUIRED);
8650       if (!PotentialValuesAA.getState().isValidState())
8651         return false;
8652       ContainsUndef = PotentialValuesAA.getState().undefIsContained();
8653       S = PotentialValuesAA.getState().getAssumedSet();
8654       return true;
8655     }
8656 
8657     for (auto &It : Values) {
8658       if (isa<UndefValue>(It.getValue()))
8659         continue;
8660       auto *CI = dyn_cast<ConstantInt>(It.getValue());
8661       if (!CI)
8662         return false;
8663       S.insert(CI->getValue());
8664     }
8665     ContainsUndef = S.empty();
8666 
8667     return true;
8668   }
8669 
8670   /// See AbstractAttribute::getAsStr().
8671   const std::string getAsStr() const override {
8672     std::string Str;
8673     llvm::raw_string_ostream OS(Str);
8674     OS << getState();
8675     return OS.str();
8676   }
8677 
8678   /// See AbstractAttribute::updateImpl(...).
8679   ChangeStatus updateImpl(Attributor &A) override {
8680     return indicatePessimisticFixpoint();
8681   }
8682 };
8683 
8684 struct AAPotentialConstantValuesArgument final
8685     : AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8686                                       AAPotentialConstantValuesImpl,
8687                                       PotentialConstantIntValuesState> {
8688   using Base = AAArgumentFromCallSiteArguments<AAPotentialConstantValues,
8689                                                AAPotentialConstantValuesImpl,
8690                                                PotentialConstantIntValuesState>;
8691   AAPotentialConstantValuesArgument(const IRPosition &IRP, Attributor &A)
8692       : Base(IRP, A) {}
8693 
8694   /// See AbstractAttribute::initialize(..).
8695   void initialize(Attributor &A) override {
8696     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8697       indicatePessimisticFixpoint();
8698     } else {
8699       Base::initialize(A);
8700     }
8701   }
8702 
8703   /// See AbstractAttribute::trackStatistics()
8704   void trackStatistics() const override {
8705     STATS_DECLTRACK_ARG_ATTR(potential_values)
8706   }
8707 };
8708 
8709 struct AAPotentialConstantValuesReturned
8710     : AAReturnedFromReturnedValues<AAPotentialConstantValues,
8711                                    AAPotentialConstantValuesImpl> {
8712   using Base = AAReturnedFromReturnedValues<AAPotentialConstantValues,
8713                                             AAPotentialConstantValuesImpl>;
8714   AAPotentialConstantValuesReturned(const IRPosition &IRP, Attributor &A)
8715       : Base(IRP, A) {}
8716 
8717   /// See AbstractAttribute::trackStatistics()
8718   void trackStatistics() const override {
8719     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8720   }
8721 };
8722 
8723 struct AAPotentialConstantValuesFloating : AAPotentialConstantValuesImpl {
8724   AAPotentialConstantValuesFloating(const IRPosition &IRP, Attributor &A)
8725       : AAPotentialConstantValuesImpl(IRP, A) {}
8726 
8727   /// See AbstractAttribute::initialize(..).
8728   void initialize(Attributor &A) override {
8729     AAPotentialConstantValuesImpl::initialize(A);
8730     if (isAtFixpoint())
8731       return;
8732 
8733     Value &V = getAssociatedValue();
8734 
8735     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8736       unionAssumed(C->getValue());
8737       indicateOptimisticFixpoint();
8738       return;
8739     }
8740 
8741     if (isa<UndefValue>(&V)) {
8742       unionAssumedWithUndef();
8743       indicateOptimisticFixpoint();
8744       return;
8745     }
8746 
8747     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
8748       return;
8749 
8750     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
8751       return;
8752 
8753     indicatePessimisticFixpoint();
8754 
8755     LLVM_DEBUG(dbgs() << "[AAPotentialConstantValues] We give up: "
8756                       << getAssociatedValue() << "\n");
8757   }
8758 
8759   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
8760                                 const APInt &RHS) {
8761     return ICmpInst::compare(LHS, RHS, ICI->getPredicate());
8762   }
8763 
8764   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
8765                                  uint32_t ResultBitWidth) {
8766     Instruction::CastOps CastOp = CI->getOpcode();
8767     switch (CastOp) {
8768     default:
8769       llvm_unreachable("unsupported or not integer cast");
8770     case Instruction::Trunc:
8771       return Src.trunc(ResultBitWidth);
8772     case Instruction::SExt:
8773       return Src.sext(ResultBitWidth);
8774     case Instruction::ZExt:
8775       return Src.zext(ResultBitWidth);
8776     case Instruction::BitCast:
8777       return Src;
8778     }
8779   }
8780 
8781   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
8782                                        const APInt &LHS, const APInt &RHS,
8783                                        bool &SkipOperation, bool &Unsupported) {
8784     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
8785     // Unsupported is set to true when the binary operator is not supported.
8786     // SkipOperation is set to true when UB occur with the given operand pair
8787     // (LHS, RHS).
8788     // TODO: we should look at nsw and nuw keywords to handle operations
8789     //       that create poison or undef value.
8790     switch (BinOpcode) {
8791     default:
8792       Unsupported = true;
8793       return LHS;
8794     case Instruction::Add:
8795       return LHS + RHS;
8796     case Instruction::Sub:
8797       return LHS - RHS;
8798     case Instruction::Mul:
8799       return LHS * RHS;
8800     case Instruction::UDiv:
8801       if (RHS.isZero()) {
8802         SkipOperation = true;
8803         return LHS;
8804       }
8805       return LHS.udiv(RHS);
8806     case Instruction::SDiv:
8807       if (RHS.isZero()) {
8808         SkipOperation = true;
8809         return LHS;
8810       }
8811       return LHS.sdiv(RHS);
8812     case Instruction::URem:
8813       if (RHS.isZero()) {
8814         SkipOperation = true;
8815         return LHS;
8816       }
8817       return LHS.urem(RHS);
8818     case Instruction::SRem:
8819       if (RHS.isZero()) {
8820         SkipOperation = true;
8821         return LHS;
8822       }
8823       return LHS.srem(RHS);
8824     case Instruction::Shl:
8825       return LHS.shl(RHS);
8826     case Instruction::LShr:
8827       return LHS.lshr(RHS);
8828     case Instruction::AShr:
8829       return LHS.ashr(RHS);
8830     case Instruction::And:
8831       return LHS & RHS;
8832     case Instruction::Or:
8833       return LHS | RHS;
8834     case Instruction::Xor:
8835       return LHS ^ RHS;
8836     }
8837   }
8838 
8839   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
8840                                            const APInt &LHS, const APInt &RHS) {
8841     bool SkipOperation = false;
8842     bool Unsupported = false;
8843     APInt Result =
8844         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
8845     if (Unsupported)
8846       return false;
8847     // If SkipOperation is true, we can ignore this operand pair (L, R).
8848     if (!SkipOperation)
8849       unionAssumed(Result);
8850     return isValidState();
8851   }
8852 
8853   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
8854     auto AssumedBefore = getAssumed();
8855     Value *LHS = ICI->getOperand(0);
8856     Value *RHS = ICI->getOperand(1);
8857 
8858     bool LHSContainsUndef = false, RHSContainsUndef = false;
8859     SetTy LHSAAPVS, RHSAAPVS;
8860     if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
8861                                    LHSContainsUndef) ||
8862         !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
8863                                    RHSContainsUndef))
8864       return indicatePessimisticFixpoint();
8865 
8866     // TODO: make use of undef flag to limit potential values aggressively.
8867     bool MaybeTrue = false, MaybeFalse = false;
8868     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
8869     if (LHSContainsUndef && RHSContainsUndef) {
8870       // The result of any comparison between undefs can be soundly replaced
8871       // with undef.
8872       unionAssumedWithUndef();
8873     } else if (LHSContainsUndef) {
8874       for (const APInt &R : RHSAAPVS) {
8875         bool CmpResult = calculateICmpInst(ICI, Zero, R);
8876         MaybeTrue |= CmpResult;
8877         MaybeFalse |= !CmpResult;
8878         if (MaybeTrue & MaybeFalse)
8879           return indicatePessimisticFixpoint();
8880       }
8881     } else if (RHSContainsUndef) {
8882       for (const APInt &L : LHSAAPVS) {
8883         bool CmpResult = calculateICmpInst(ICI, L, Zero);
8884         MaybeTrue |= CmpResult;
8885         MaybeFalse |= !CmpResult;
8886         if (MaybeTrue & MaybeFalse)
8887           return indicatePessimisticFixpoint();
8888       }
8889     } else {
8890       for (const APInt &L : LHSAAPVS) {
8891         for (const APInt &R : RHSAAPVS) {
8892           bool CmpResult = calculateICmpInst(ICI, L, R);
8893           MaybeTrue |= CmpResult;
8894           MaybeFalse |= !CmpResult;
8895           if (MaybeTrue & MaybeFalse)
8896             return indicatePessimisticFixpoint();
8897         }
8898       }
8899     }
8900     if (MaybeTrue)
8901       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
8902     if (MaybeFalse)
8903       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
8904     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8905                                          : ChangeStatus::CHANGED;
8906   }
8907 
8908   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
8909     auto AssumedBefore = getAssumed();
8910     Value *LHS = SI->getTrueValue();
8911     Value *RHS = SI->getFalseValue();
8912 
8913     bool UsedAssumedInformation = false;
8914     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
8915                                                   UsedAssumedInformation);
8916 
8917     // Check if we only need one operand.
8918     bool OnlyLeft = false, OnlyRight = false;
8919     if (C && *C && (*C)->isOneValue())
8920       OnlyLeft = true;
8921     else if (C && *C && (*C)->isZeroValue())
8922       OnlyRight = true;
8923 
8924     bool LHSContainsUndef = false, RHSContainsUndef = false;
8925     SetTy LHSAAPVS, RHSAAPVS;
8926     if (!OnlyRight && !fillSetWithConstantValues(A, IRPosition::value(*LHS),
8927                                                  LHSAAPVS, LHSContainsUndef))
8928       return indicatePessimisticFixpoint();
8929 
8930     if (!OnlyLeft && !fillSetWithConstantValues(A, IRPosition::value(*RHS),
8931                                                 RHSAAPVS, RHSContainsUndef))
8932       return indicatePessimisticFixpoint();
8933 
8934     if (OnlyLeft || OnlyRight) {
8935       // select (true/false), lhs, rhs
8936       auto *OpAA = OnlyLeft ? &LHSAAPVS : &RHSAAPVS;
8937       auto Undef = OnlyLeft ? LHSContainsUndef : RHSContainsUndef;
8938 
8939       if (Undef)
8940         unionAssumedWithUndef();
8941       else {
8942         for (auto &It : *OpAA)
8943           unionAssumed(It);
8944       }
8945 
8946     } else if (LHSContainsUndef && RHSContainsUndef) {
8947       // select i1 *, undef , undef => undef
8948       unionAssumedWithUndef();
8949     } else {
8950       for (auto &It : LHSAAPVS)
8951         unionAssumed(It);
8952       for (auto &It : RHSAAPVS)
8953         unionAssumed(It);
8954     }
8955     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8956                                          : ChangeStatus::CHANGED;
8957   }
8958 
8959   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
8960     auto AssumedBefore = getAssumed();
8961     if (!CI->isIntegerCast())
8962       return indicatePessimisticFixpoint();
8963     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
8964     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
8965     Value *Src = CI->getOperand(0);
8966 
8967     bool SrcContainsUndef = false;
8968     SetTy SrcPVS;
8969     if (!fillSetWithConstantValues(A, IRPosition::value(*Src), SrcPVS,
8970                                    SrcContainsUndef))
8971       return indicatePessimisticFixpoint();
8972 
8973     if (SrcContainsUndef)
8974       unionAssumedWithUndef();
8975     else {
8976       for (const APInt &S : SrcPVS) {
8977         APInt T = calculateCastInst(CI, S, ResultBitWidth);
8978         unionAssumed(T);
8979       }
8980     }
8981     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8982                                          : ChangeStatus::CHANGED;
8983   }
8984 
8985   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
8986     auto AssumedBefore = getAssumed();
8987     Value *LHS = BinOp->getOperand(0);
8988     Value *RHS = BinOp->getOperand(1);
8989 
8990     bool LHSContainsUndef = false, RHSContainsUndef = false;
8991     SetTy LHSAAPVS, RHSAAPVS;
8992     if (!fillSetWithConstantValues(A, IRPosition::value(*LHS), LHSAAPVS,
8993                                    LHSContainsUndef) ||
8994         !fillSetWithConstantValues(A, IRPosition::value(*RHS), RHSAAPVS,
8995                                    RHSContainsUndef))
8996       return indicatePessimisticFixpoint();
8997 
8998     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
8999 
9000     // TODO: make use of undef flag to limit potential values aggressively.
9001     if (LHSContainsUndef && RHSContainsUndef) {
9002       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9003         return indicatePessimisticFixpoint();
9004     } else if (LHSContainsUndef) {
9005       for (const APInt &R : RHSAAPVS) {
9006         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9007           return indicatePessimisticFixpoint();
9008       }
9009     } else if (RHSContainsUndef) {
9010       for (const APInt &L : LHSAAPVS) {
9011         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9012           return indicatePessimisticFixpoint();
9013       }
9014     } else {
9015       for (const APInt &L : LHSAAPVS) {
9016         for (const APInt &R : RHSAAPVS) {
9017           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9018             return indicatePessimisticFixpoint();
9019         }
9020       }
9021     }
9022     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9023                                          : ChangeStatus::CHANGED;
9024   }
9025 
9026   /// See AbstractAttribute::updateImpl(...).
9027   ChangeStatus updateImpl(Attributor &A) override {
9028     Value &V = getAssociatedValue();
9029     Instruction *I = dyn_cast<Instruction>(&V);
9030 
9031     if (auto *ICI = dyn_cast<ICmpInst>(I))
9032       return updateWithICmpInst(A, ICI);
9033 
9034     if (auto *SI = dyn_cast<SelectInst>(I))
9035       return updateWithSelectInst(A, SI);
9036 
9037     if (auto *CI = dyn_cast<CastInst>(I))
9038       return updateWithCastInst(A, CI);
9039 
9040     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9041       return updateWithBinaryOperator(A, BinOp);
9042 
9043     return indicatePessimisticFixpoint();
9044   }
9045 
9046   /// See AbstractAttribute::trackStatistics()
9047   void trackStatistics() const override {
9048     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9049   }
9050 };
9051 
9052 struct AAPotentialConstantValuesFunction : AAPotentialConstantValuesImpl {
9053   AAPotentialConstantValuesFunction(const IRPosition &IRP, Attributor &A)
9054       : AAPotentialConstantValuesImpl(IRP, A) {}
9055 
9056   /// See AbstractAttribute::initialize(...).
9057   ChangeStatus updateImpl(Attributor &A) override {
9058     llvm_unreachable(
9059         "AAPotentialConstantValues(Function|CallSite)::updateImpl will "
9060         "not be called");
9061   }
9062 
9063   /// See AbstractAttribute::trackStatistics()
9064   void trackStatistics() const override {
9065     STATS_DECLTRACK_FN_ATTR(potential_values)
9066   }
9067 };
9068 
9069 struct AAPotentialConstantValuesCallSite : AAPotentialConstantValuesFunction {
9070   AAPotentialConstantValuesCallSite(const IRPosition &IRP, Attributor &A)
9071       : AAPotentialConstantValuesFunction(IRP, A) {}
9072 
9073   /// See AbstractAttribute::trackStatistics()
9074   void trackStatistics() const override {
9075     STATS_DECLTRACK_CS_ATTR(potential_values)
9076   }
9077 };
9078 
9079 struct AAPotentialConstantValuesCallSiteReturned
9080     : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9081                                      AAPotentialConstantValuesImpl> {
9082   AAPotentialConstantValuesCallSiteReturned(const IRPosition &IRP,
9083                                             Attributor &A)
9084       : AACallSiteReturnedFromReturned<AAPotentialConstantValues,
9085                                        AAPotentialConstantValuesImpl>(IRP, A) {}
9086 
9087   /// See AbstractAttribute::trackStatistics()
9088   void trackStatistics() const override {
9089     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9090   }
9091 };
9092 
9093 struct AAPotentialConstantValuesCallSiteArgument
9094     : AAPotentialConstantValuesFloating {
9095   AAPotentialConstantValuesCallSiteArgument(const IRPosition &IRP,
9096                                             Attributor &A)
9097       : AAPotentialConstantValuesFloating(IRP, A) {}
9098 
9099   /// See AbstractAttribute::initialize(..).
9100   void initialize(Attributor &A) override {
9101     AAPotentialConstantValuesImpl::initialize(A);
9102     if (isAtFixpoint())
9103       return;
9104 
9105     Value &V = getAssociatedValue();
9106 
9107     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9108       unionAssumed(C->getValue());
9109       indicateOptimisticFixpoint();
9110       return;
9111     }
9112 
9113     if (isa<UndefValue>(&V)) {
9114       unionAssumedWithUndef();
9115       indicateOptimisticFixpoint();
9116       return;
9117     }
9118   }
9119 
9120   /// See AbstractAttribute::updateImpl(...).
9121   ChangeStatus updateImpl(Attributor &A) override {
9122     Value &V = getAssociatedValue();
9123     auto AssumedBefore = getAssumed();
9124     auto &AA = A.getAAFor<AAPotentialConstantValues>(
9125         *this, IRPosition::value(V), DepClassTy::REQUIRED);
9126     const auto &S = AA.getAssumed();
9127     unionAssumed(S);
9128     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9129                                          : ChangeStatus::CHANGED;
9130   }
9131 
9132   /// See AbstractAttribute::trackStatistics()
9133   void trackStatistics() const override {
9134     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9135   }
9136 };
9137 
9138 /// ------------------------ NoUndef Attribute ---------------------------------
9139 struct AANoUndefImpl : AANoUndef {
9140   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9141 
9142   /// See AbstractAttribute::initialize(...).
9143   void initialize(Attributor &A) override {
9144     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9145       indicateOptimisticFixpoint();
9146       return;
9147     }
9148     Value &V = getAssociatedValue();
9149     if (isa<UndefValue>(V))
9150       indicatePessimisticFixpoint();
9151     else if (isa<FreezeInst>(V))
9152       indicateOptimisticFixpoint();
9153     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9154              isGuaranteedNotToBeUndefOrPoison(&V))
9155       indicateOptimisticFixpoint();
9156     else
9157       AANoUndef::initialize(A);
9158   }
9159 
9160   /// See followUsesInMBEC
9161   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9162                        AANoUndef::StateType &State) {
9163     const Value *UseV = U->get();
9164     const DominatorTree *DT = nullptr;
9165     AssumptionCache *AC = nullptr;
9166     InformationCache &InfoCache = A.getInfoCache();
9167     if (Function *F = getAnchorScope()) {
9168       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9169       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9170     }
9171     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9172     bool TrackUse = false;
9173     // Track use for instructions which must produce undef or poison bits when
9174     // at least one operand contains such bits.
9175     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9176       TrackUse = true;
9177     return TrackUse;
9178   }
9179 
9180   /// See AbstractAttribute::getAsStr().
9181   const std::string getAsStr() const override {
9182     return getAssumed() ? "noundef" : "may-undef-or-poison";
9183   }
9184 
9185   ChangeStatus manifest(Attributor &A) override {
9186     // We don't manifest noundef attribute for dead positions because the
9187     // associated values with dead positions would be replaced with undef
9188     // values.
9189     bool UsedAssumedInformation = false;
9190     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9191                         UsedAssumedInformation))
9192       return ChangeStatus::UNCHANGED;
9193     // A position whose simplified value does not have any value is
9194     // considered to be dead. We don't manifest noundef in such positions for
9195     // the same reason above.
9196     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation,
9197                                 AA::Interprocedural)
9198              .hasValue())
9199       return ChangeStatus::UNCHANGED;
9200     return AANoUndef::manifest(A);
9201   }
9202 };
9203 
9204 struct AANoUndefFloating : public AANoUndefImpl {
9205   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9206       : AANoUndefImpl(IRP, A) {}
9207 
9208   /// See AbstractAttribute::initialize(...).
9209   void initialize(Attributor &A) override {
9210     AANoUndefImpl::initialize(A);
9211     if (!getState().isAtFixpoint())
9212       if (Instruction *CtxI = getCtxI())
9213         followUsesInMBEC(*this, A, getState(), *CtxI);
9214   }
9215 
9216   /// See AbstractAttribute::updateImpl(...).
9217   ChangeStatus updateImpl(Attributor &A) override {
9218 
9219     SmallVector<AA::ValueAndContext> Values;
9220     bool UsedAssumedInformation = false;
9221     if (!A.getAssumedSimplifiedValues(getIRPosition(), *this, Values,
9222                                       AA::AnyScope, UsedAssumedInformation)) {
9223       Values.push_back({getAssociatedValue(), getCtxI()});
9224     }
9225 
9226     StateType T;
9227     auto VisitValueCB = [&](Value &V, const Instruction *CtxI) -> bool {
9228       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9229                                              DepClassTy::REQUIRED);
9230       if (this == &AA) {
9231         T.indicatePessimisticFixpoint();
9232       } else {
9233         const AANoUndef::StateType &S =
9234             static_cast<const AANoUndef::StateType &>(AA.getState());
9235         T ^= S;
9236       }
9237       return T.isValidState();
9238     };
9239 
9240     for (const auto &VAC : Values)
9241       if (!VisitValueCB(*VAC.getValue(), VAC.getCtxI()))
9242         return indicatePessimisticFixpoint();
9243 
9244     return clampStateAndIndicateChange(getState(), T);
9245   }
9246 
9247   /// See AbstractAttribute::trackStatistics()
9248   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9249 };
9250 
9251 struct AANoUndefReturned final
9252     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9253   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9254       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9255 
9256   /// See AbstractAttribute::trackStatistics()
9257   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9258 };
9259 
9260 struct AANoUndefArgument final
9261     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9262   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9263       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9264 
9265   /// See AbstractAttribute::trackStatistics()
9266   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9267 };
9268 
9269 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9270   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9271       : AANoUndefFloating(IRP, A) {}
9272 
9273   /// See AbstractAttribute::trackStatistics()
9274   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9275 };
9276 
9277 struct AANoUndefCallSiteReturned final
9278     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9279   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9280       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9281 
9282   /// See AbstractAttribute::trackStatistics()
9283   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9284 };
9285 
9286 struct AACallEdgesImpl : public AACallEdges {
9287   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9288 
9289   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9290     return CalledFunctions;
9291   }
9292 
9293   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9294 
9295   virtual bool hasNonAsmUnknownCallee() const override {
9296     return HasUnknownCalleeNonAsm;
9297   }
9298 
9299   const std::string getAsStr() const override {
9300     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9301            std::to_string(CalledFunctions.size()) + "]";
9302   }
9303 
9304   void trackStatistics() const override {}
9305 
9306 protected:
9307   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9308     if (CalledFunctions.insert(Fn)) {
9309       Change = ChangeStatus::CHANGED;
9310       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9311                         << "\n");
9312     }
9313   }
9314 
9315   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9316     if (!HasUnknownCallee)
9317       Change = ChangeStatus::CHANGED;
9318     if (NonAsm && !HasUnknownCalleeNonAsm)
9319       Change = ChangeStatus::CHANGED;
9320     HasUnknownCalleeNonAsm |= NonAsm;
9321     HasUnknownCallee = true;
9322   }
9323 
9324 private:
9325   /// Optimistic set of functions that might be called by this position.
9326   SetVector<Function *> CalledFunctions;
9327 
9328   /// Is there any call with a unknown callee.
9329   bool HasUnknownCallee = false;
9330 
9331   /// Is there any call with a unknown callee, excluding any inline asm.
9332   bool HasUnknownCalleeNonAsm = false;
9333 };
9334 
9335 struct AACallEdgesCallSite : public AACallEdgesImpl {
9336   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9337       : AACallEdgesImpl(IRP, A) {}
9338   /// See AbstractAttribute::updateImpl(...).
9339   ChangeStatus updateImpl(Attributor &A) override {
9340     ChangeStatus Change = ChangeStatus::UNCHANGED;
9341 
9342     auto VisitValue = [&](Value &V, const Instruction *CtxI) -> bool {
9343       if (Function *Fn = dyn_cast<Function>(&V)) {
9344         addCalledFunction(Fn, Change);
9345       } else {
9346         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9347         setHasUnknownCallee(true, Change);
9348       }
9349 
9350       // Explore all values.
9351       return true;
9352     };
9353 
9354     SmallVector<AA::ValueAndContext> Values;
9355     // Process any value that we might call.
9356     auto ProcessCalledOperand = [&](Value *V, Instruction *CtxI) {
9357       bool UsedAssumedInformation = false;
9358       Values.clear();
9359       if (!A.getAssumedSimplifiedValues(IRPosition::value(*V), *this, Values,
9360                                         AA::AnyScope, UsedAssumedInformation)) {
9361         Values.push_back({*V, CtxI});
9362       }
9363       for (auto &VAC : Values)
9364         VisitValue(*VAC.getValue(), VAC.getCtxI());
9365     };
9366 
9367     CallBase *CB = cast<CallBase>(getCtxI());
9368 
9369     if (CB->isInlineAsm()) {
9370       if (!hasAssumption(*CB->getCaller(), "ompx_no_call_asm") &&
9371           !hasAssumption(*CB, "ompx_no_call_asm"))
9372         setHasUnknownCallee(false, Change);
9373       return Change;
9374     }
9375 
9376     // Process callee metadata if available.
9377     if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9378       for (auto &Op : MD->operands()) {
9379         Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9380         if (Callee)
9381           addCalledFunction(Callee, Change);
9382       }
9383       return Change;
9384     }
9385 
9386     // The most simple case.
9387     ProcessCalledOperand(CB->getCalledOperand(), CB);
9388 
9389     // Process callback functions.
9390     SmallVector<const Use *, 4u> CallbackUses;
9391     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9392     for (const Use *U : CallbackUses)
9393       ProcessCalledOperand(U->get(), CB);
9394 
9395     return Change;
9396   }
9397 };
9398 
9399 struct AACallEdgesFunction : public AACallEdgesImpl {
9400   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9401       : AACallEdgesImpl(IRP, A) {}
9402 
9403   /// See AbstractAttribute::updateImpl(...).
9404   ChangeStatus updateImpl(Attributor &A) override {
9405     ChangeStatus Change = ChangeStatus::UNCHANGED;
9406 
9407     auto ProcessCallInst = [&](Instruction &Inst) {
9408       CallBase &CB = cast<CallBase>(Inst);
9409 
9410       auto &CBEdges = A.getAAFor<AACallEdges>(
9411           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9412       if (CBEdges.hasNonAsmUnknownCallee())
9413         setHasUnknownCallee(true, Change);
9414       if (CBEdges.hasUnknownCallee())
9415         setHasUnknownCallee(false, Change);
9416 
9417       for (Function *F : CBEdges.getOptimisticEdges())
9418         addCalledFunction(F, Change);
9419 
9420       return true;
9421     };
9422 
9423     // Visit all callable instructions.
9424     bool UsedAssumedInformation = false;
9425     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9426                                            UsedAssumedInformation,
9427                                            /* CheckBBLivenessOnly */ true)) {
9428       // If we haven't looked at all call like instructions, assume that there
9429       // are unknown callees.
9430       setHasUnknownCallee(true, Change);
9431     }
9432 
9433     return Change;
9434   }
9435 };
9436 
9437 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9438 private:
9439   struct QuerySet {
9440     void markReachable(const Function &Fn) {
9441       Reachable.insert(&Fn);
9442       Unreachable.erase(&Fn);
9443     }
9444 
9445     /// If there is no information about the function None is returned.
9446     Optional<bool> isCachedReachable(const Function &Fn) {
9447       // Assume that we can reach the function.
9448       // TODO: Be more specific with the unknown callee.
9449       if (CanReachUnknownCallee)
9450         return true;
9451 
9452       if (Reachable.count(&Fn))
9453         return true;
9454 
9455       if (Unreachable.count(&Fn))
9456         return false;
9457 
9458       return llvm::None;
9459     }
9460 
9461     /// Set of functions that we know for sure is reachable.
9462     DenseSet<const Function *> Reachable;
9463 
9464     /// Set of functions that are unreachable, but might become reachable.
9465     DenseSet<const Function *> Unreachable;
9466 
9467     /// If we can reach a function with a call to a unknown function we assume
9468     /// that we can reach any function.
9469     bool CanReachUnknownCallee = false;
9470   };
9471 
9472   struct QueryResolver : public QuerySet {
9473     ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9474                         ArrayRef<const AACallEdges *> AAEdgesList) {
9475       ChangeStatus Change = ChangeStatus::UNCHANGED;
9476 
9477       for (auto *AAEdges : AAEdgesList) {
9478         if (AAEdges->hasUnknownCallee()) {
9479           if (!CanReachUnknownCallee)
9480             Change = ChangeStatus::CHANGED;
9481           CanReachUnknownCallee = true;
9482           return Change;
9483         }
9484       }
9485 
9486       for (const Function *Fn : make_early_inc_range(Unreachable)) {
9487         if (checkIfReachable(A, AA, AAEdgesList, *Fn)) {
9488           Change = ChangeStatus::CHANGED;
9489           markReachable(*Fn);
9490         }
9491       }
9492       return Change;
9493     }
9494 
9495     bool isReachable(Attributor &A, AAFunctionReachability &AA,
9496                      ArrayRef<const AACallEdges *> AAEdgesList,
9497                      const Function &Fn) {
9498       Optional<bool> Cached = isCachedReachable(Fn);
9499       if (Cached)
9500         return Cached.getValue();
9501 
9502       // The query was not cached, thus it is new. We need to request an update
9503       // explicitly to make sure this the information is properly run to a
9504       // fixpoint.
9505       A.registerForUpdate(AA);
9506 
9507       // We need to assume that this function can't reach Fn to prevent
9508       // an infinite loop if this function is recursive.
9509       Unreachable.insert(&Fn);
9510 
9511       bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9512       if (Result)
9513         markReachable(Fn);
9514       return Result;
9515     }
9516 
9517     bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9518                           ArrayRef<const AACallEdges *> AAEdgesList,
9519                           const Function &Fn) const {
9520 
9521       // Handle the most trivial case first.
9522       for (auto *AAEdges : AAEdgesList) {
9523         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9524 
9525         if (Edges.count(const_cast<Function *>(&Fn)))
9526           return true;
9527       }
9528 
9529       SmallVector<const AAFunctionReachability *, 8> Deps;
9530       for (auto &AAEdges : AAEdgesList) {
9531         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9532 
9533         for (Function *Edge : Edges) {
9534           // Functions that do not call back into the module can be ignored.
9535           if (Edge->hasFnAttribute(Attribute::NoCallback))
9536             continue;
9537 
9538           // We don't need a dependency if the result is reachable.
9539           const AAFunctionReachability &EdgeReachability =
9540               A.getAAFor<AAFunctionReachability>(
9541                   AA, IRPosition::function(*Edge), DepClassTy::NONE);
9542           Deps.push_back(&EdgeReachability);
9543 
9544           if (EdgeReachability.canReach(A, Fn))
9545             return true;
9546         }
9547       }
9548 
9549       // The result is false for now, set dependencies and leave.
9550       for (auto *Dep : Deps)
9551         A.recordDependence(*Dep, AA, DepClassTy::REQUIRED);
9552 
9553       return false;
9554     }
9555   };
9556 
9557   /// Get call edges that can be reached by this instruction.
9558   bool getReachableCallEdges(Attributor &A, const AAReachability &Reachability,
9559                              const Instruction &Inst,
9560                              SmallVector<const AACallEdges *> &Result) const {
9561     // Determine call like instructions that we can reach from the inst.
9562     auto CheckCallBase = [&](Instruction &CBInst) {
9563       if (!Reachability.isAssumedReachable(A, Inst, CBInst))
9564         return true;
9565 
9566       auto &CB = cast<CallBase>(CBInst);
9567       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9568           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9569 
9570       Result.push_back(&AAEdges);
9571       return true;
9572     };
9573 
9574     bool UsedAssumedInformation = false;
9575     return A.checkForAllCallLikeInstructions(CheckCallBase, *this,
9576                                              UsedAssumedInformation,
9577                                              /* CheckBBLivenessOnly */ true);
9578   }
9579 
9580 public:
9581   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9582       : AAFunctionReachability(IRP, A) {}
9583 
9584   bool canReach(Attributor &A, const Function &Fn) const override {
9585     if (!isValidState())
9586       return true;
9587 
9588     const AACallEdges &AAEdges =
9589         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9590 
9591     // Attributor returns attributes as const, so this function has to be
9592     // const for users of this attribute to use it without having to do
9593     // a const_cast.
9594     // This is a hack for us to be able to cache queries.
9595     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9596     bool Result = NonConstThis->WholeFunction.isReachable(A, *NonConstThis,
9597                                                           {&AAEdges}, Fn);
9598 
9599     return Result;
9600   }
9601 
9602   /// Can \p CB reach \p Fn
9603   bool canReach(Attributor &A, CallBase &CB,
9604                 const Function &Fn) const override {
9605     if (!isValidState())
9606       return true;
9607 
9608     const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9609         *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9610 
9611     // Attributor returns attributes as const, so this function has to be
9612     // const for users of this attribute to use it without having to do
9613     // a const_cast.
9614     // This is a hack for us to be able to cache queries.
9615     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9616     QueryResolver &CBQuery = NonConstThis->CBQueries[&CB];
9617 
9618     bool Result = CBQuery.isReachable(A, *NonConstThis, {&AAEdges}, Fn);
9619 
9620     return Result;
9621   }
9622 
9623   bool instructionCanReach(Attributor &A, const Instruction &Inst,
9624                            const Function &Fn,
9625                            bool UseBackwards) const override {
9626     if (!isValidState())
9627       return true;
9628 
9629     if (UseBackwards)
9630       return AA::isPotentiallyReachable(A, Inst, Fn, *this, nullptr);
9631 
9632     const auto &Reachability = A.getAAFor<AAReachability>(
9633         *this, IRPosition::function(*getAssociatedFunction()),
9634         DepClassTy::REQUIRED);
9635 
9636     SmallVector<const AACallEdges *> CallEdges;
9637     bool AllKnown = getReachableCallEdges(A, Reachability, Inst, CallEdges);
9638     // Attributor returns attributes as const, so this function has to be
9639     // const for users of this attribute to use it without having to do
9640     // a const_cast.
9641     // This is a hack for us to be able to cache queries.
9642     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9643     QueryResolver &InstQSet = NonConstThis->InstQueries[&Inst];
9644     if (!AllKnown)
9645       InstQSet.CanReachUnknownCallee = true;
9646 
9647     return InstQSet.isReachable(A, *NonConstThis, CallEdges, Fn);
9648   }
9649 
9650   /// See AbstractAttribute::updateImpl(...).
9651   ChangeStatus updateImpl(Attributor &A) override {
9652     const AACallEdges &AAEdges =
9653         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9654     ChangeStatus Change = ChangeStatus::UNCHANGED;
9655 
9656     Change |= WholeFunction.update(A, *this, {&AAEdges});
9657 
9658     for (auto &CBPair : CBQueries) {
9659       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9660           *this, IRPosition::callsite_function(*CBPair.first),
9661           DepClassTy::REQUIRED);
9662 
9663       Change |= CBPair.second.update(A, *this, {&AAEdges});
9664     }
9665 
9666     // Update the Instruction queries.
9667     if (!InstQueries.empty()) {
9668       const AAReachability *Reachability = &A.getAAFor<AAReachability>(
9669           *this, IRPosition::function(*getAssociatedFunction()),
9670           DepClassTy::REQUIRED);
9671 
9672       // Check for local callbases first.
9673       for (auto &InstPair : InstQueries) {
9674         SmallVector<const AACallEdges *> CallEdges;
9675         bool AllKnown =
9676             getReachableCallEdges(A, *Reachability, *InstPair.first, CallEdges);
9677         // Update will return change if we this effects any queries.
9678         if (!AllKnown)
9679           InstPair.second.CanReachUnknownCallee = true;
9680         Change |= InstPair.second.update(A, *this, CallEdges);
9681       }
9682     }
9683 
9684     return Change;
9685   }
9686 
9687   const std::string getAsStr() const override {
9688     size_t QueryCount =
9689         WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
9690 
9691     return "FunctionReachability [" +
9692            std::to_string(WholeFunction.Reachable.size()) + "," +
9693            std::to_string(QueryCount) + "]";
9694   }
9695 
9696   void trackStatistics() const override {}
9697 
9698 private:
9699   bool canReachUnknownCallee() const override {
9700     return WholeFunction.CanReachUnknownCallee;
9701   }
9702 
9703   /// Used to answer if a the whole function can reacha a specific function.
9704   QueryResolver WholeFunction;
9705 
9706   /// Used to answer if a call base inside this function can reach a specific
9707   /// function.
9708   MapVector<const CallBase *, QueryResolver> CBQueries;
9709 
9710   /// This is for instruction queries than scan "forward".
9711   MapVector<const Instruction *, QueryResolver> InstQueries;
9712 };
9713 } // namespace
9714 
9715 template <typename AAType>
9716 static Optional<Constant *>
9717 askForAssumedConstant(Attributor &A, const AbstractAttribute &QueryingAA,
9718                       const IRPosition &IRP, Type &Ty) {
9719   if (!Ty.isIntegerTy())
9720     return nullptr;
9721 
9722   // This will also pass the call base context.
9723   const auto &AA = A.getAAFor<AAType>(QueryingAA, IRP, DepClassTy::NONE);
9724 
9725   Optional<Constant *> COpt = AA.getAssumedConstant(A);
9726 
9727   if (!COpt.hasValue()) {
9728     A.recordDependence(AA, QueryingAA, DepClassTy::OPTIONAL);
9729     return llvm::None;
9730   }
9731   if (auto *C = COpt.getValue()) {
9732     A.recordDependence(AA, QueryingAA, DepClassTy::OPTIONAL);
9733     return C;
9734   }
9735   return nullptr;
9736 }
9737 
9738 Value *AAPotentialValues::getSingleValue(
9739     Attributor &A, const AbstractAttribute &AA, const IRPosition &IRP,
9740     SmallVectorImpl<AA::ValueAndContext> &Values) {
9741   Type &Ty = *IRP.getAssociatedType();
9742   Optional<Value *> V;
9743   for (auto &It : Values) {
9744     V = AA::combineOptionalValuesInAAValueLatice(V, It.getValue(), &Ty);
9745     if (V.hasValue() && !V.getValue())
9746       break;
9747   }
9748   if (!V.hasValue())
9749     return UndefValue::get(&Ty);
9750   return V.getValue();
9751 }
9752 
9753 namespace {
9754 struct AAPotentialValuesImpl : AAPotentialValues {
9755   using StateType = PotentialLLVMValuesState;
9756 
9757   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
9758       : AAPotentialValues(IRP, A) {}
9759 
9760   /// See AbstractAttribute::initialize(..).
9761   void initialize(Attributor &A) override {
9762     if (A.hasSimplificationCallback(getIRPosition())) {
9763       indicatePessimisticFixpoint();
9764       return;
9765     }
9766     Value *Stripped = getAssociatedValue().stripPointerCasts();
9767     if (isa<Constant>(Stripped)) {
9768       addValue(A, getState(), *Stripped, getCtxI(), AA::AnyScope,
9769                getAnchorScope());
9770       indicateOptimisticFixpoint();
9771       return;
9772     }
9773     AAPotentialValues::initialize(A);
9774   }
9775 
9776   /// See AbstractAttribute::getAsStr().
9777   const std::string getAsStr() const override {
9778     std::string Str;
9779     llvm::raw_string_ostream OS(Str);
9780     OS << getState();
9781     return OS.str();
9782   }
9783 
9784   template <typename AAType>
9785   static Optional<Value *> askOtherAA(Attributor &A,
9786                                       const AbstractAttribute &AA,
9787                                       const IRPosition &IRP, Type &Ty) {
9788     if (isa<Constant>(IRP.getAssociatedValue()))
9789       return &IRP.getAssociatedValue();
9790     Optional<Constant *> C = askForAssumedConstant<AAType>(A, AA, IRP, Ty);
9791     if (!C)
9792       return llvm::None;
9793     if (C.getValue())
9794       if (auto *CC = AA::getWithType(**C, Ty))
9795         return CC;
9796     return nullptr;
9797   }
9798 
9799   void addValue(Attributor &A, StateType &State, Value &V,
9800                 const Instruction *CtxI, AA::ValueScope S,
9801                 Function *AnchorScope) const {
9802 
9803     IRPosition ValIRP = IRPosition::value(V);
9804     if (auto *CB = dyn_cast_or_null<CallBase>(CtxI)) {
9805       for (auto &U : CB->args()) {
9806         if (U.get() != &V)
9807           continue;
9808         ValIRP = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
9809         break;
9810       }
9811     }
9812 
9813     Value *VPtr = &V;
9814     if (ValIRP.getAssociatedType()->isIntegerTy()) {
9815       Type &Ty = *getAssociatedType();
9816       Optional<Value *> SimpleV =
9817           askOtherAA<AAValueConstantRange>(A, *this, ValIRP, Ty);
9818       if (SimpleV.hasValue() && !SimpleV.getValue()) {
9819         auto &PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>(
9820             *this, ValIRP, DepClassTy::OPTIONAL);
9821         if (PotentialConstantsAA.isValidState()) {
9822           for (auto &It : PotentialConstantsAA.getAssumedSet()) {
9823             State.unionAssumed({{*ConstantInt::get(&Ty, It), nullptr}, S});
9824           }
9825           assert(!PotentialConstantsAA.undefIsContained() &&
9826                  "Undef should be an explicit value!");
9827           return;
9828         }
9829       }
9830       if (!SimpleV.hasValue())
9831         return;
9832 
9833       if (SimpleV.getValue())
9834         VPtr = SimpleV.getValue();
9835     }
9836 
9837     if (isa<ConstantInt>(VPtr))
9838       CtxI = nullptr;
9839     if (!AA::isValidInScope(*VPtr, AnchorScope))
9840       S = AA::ValueScope(S | AA::Interprocedural);
9841 
9842     State.unionAssumed({{*VPtr, CtxI}, S});
9843   }
9844 
9845   /// Helper struct to tie a value+context pair together with the scope for
9846   /// which this is the simplified version.
9847   struct ItemInfo {
9848     AA::ValueAndContext I;
9849     AA::ValueScope S;
9850   };
9851 
9852   bool recurseForValue(Attributor &A, const IRPosition &IRP, AA::ValueScope S) {
9853     SmallMapVector<AA::ValueAndContext, int, 8> ValueScopeMap;
9854     for (auto CS : {AA::Intraprocedural, AA::Interprocedural}) {
9855       if (!(CS & S))
9856         continue;
9857 
9858       bool UsedAssumedInformation = false;
9859       SmallVector<AA::ValueAndContext> Values;
9860       if (!A.getAssumedSimplifiedValues(IRP, this, Values, CS,
9861                                         UsedAssumedInformation))
9862         return false;
9863 
9864       for (auto &It : Values)
9865         ValueScopeMap[It] += CS;
9866     }
9867     for (auto &It : ValueScopeMap)
9868       addValue(A, getState(), *It.first.getValue(), It.first.getCtxI(),
9869                AA::ValueScope(It.second), getAnchorScope());
9870 
9871     return true;
9872   }
9873 
9874   void giveUpOnIntraprocedural(Attributor &A) {
9875     auto NewS = StateType::getBestState(getState());
9876     for (auto &It : getAssumedSet()) {
9877       if (It.second == AA::Intraprocedural)
9878         continue;
9879       addValue(A, NewS, *It.first.getValue(), It.first.getCtxI(),
9880                AA::Interprocedural, getAnchorScope());
9881     }
9882     assert(!undefIsContained() && "Undef should be an explicit value!");
9883     addValue(A, NewS, getAssociatedValue(), getCtxI(), AA::Intraprocedural,
9884              getAnchorScope());
9885     getState() = NewS;
9886   }
9887 
9888   /// See AbstractState::indicatePessimisticFixpoint(...).
9889   ChangeStatus indicatePessimisticFixpoint() override {
9890     getState() = StateType::getBestState(getState());
9891     getState().unionAssumed({{getAssociatedValue(), getCtxI()}, AA::AnyScope});
9892     AAPotentialValues::indicateOptimisticFixpoint();
9893     return ChangeStatus::CHANGED;
9894   }
9895 
9896   /// See AbstractAttribute::updateImpl(...).
9897   ChangeStatus updateImpl(Attributor &A) override {
9898     return indicatePessimisticFixpoint();
9899   }
9900 
9901   /// See AbstractAttribute::manifest(...).
9902   ChangeStatus manifest(Attributor &A) override {
9903     SmallVector<AA::ValueAndContext> Values;
9904     for (AA::ValueScope S : {AA::Interprocedural, AA::Intraprocedural}) {
9905       Values.clear();
9906       if (!getAssumedSimplifiedValues(A, Values, S))
9907         continue;
9908       Value &OldV = getAssociatedValue();
9909       if (isa<UndefValue>(OldV))
9910         continue;
9911       Value *NewV = getSingleValue(A, *this, getIRPosition(), Values);
9912       if (!NewV || NewV == &OldV)
9913         continue;
9914       if (getCtxI() &&
9915           !AA::isValidAtPosition({*NewV, *getCtxI()}, A.getInfoCache()))
9916         continue;
9917       if (A.changeAfterManifest(getIRPosition(), *NewV))
9918         return ChangeStatus::CHANGED;
9919     }
9920     return ChangeStatus::UNCHANGED;
9921   }
9922 
9923   bool getAssumedSimplifiedValues(Attributor &A,
9924                                   SmallVectorImpl<AA::ValueAndContext> &Values,
9925                                   AA::ValueScope S) const override {
9926     if (!isValidState())
9927       return false;
9928     for (auto &It : getAssumedSet())
9929       if (It.second & S)
9930         Values.push_back(It.first);
9931     assert(!undefIsContained() && "Undef should be an explicit value!");
9932     return true;
9933   }
9934 };
9935 
9936 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
9937   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
9938       : AAPotentialValuesImpl(IRP, A) {}
9939 
9940   /// See AbstractAttribute::updateImpl(...).
9941   ChangeStatus updateImpl(Attributor &A) override {
9942     auto AssumedBefore = getAssumed();
9943 
9944     genericValueTraversal(A);
9945 
9946     return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
9947                                            : ChangeStatus::CHANGED;
9948   }
9949 
9950   /// Helper struct to remember which AAIsDead instances we actually used.
9951   struct LivenessInfo {
9952     const AAIsDead *LivenessAA = nullptr;
9953     bool AnyDead = false;
9954   };
9955 
9956   /// Check if \p Cmp is a comparison we can simplify.
9957   ///
9958   /// We handle multiple cases, one in which at least one operand is an
9959   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
9960   /// operand. Return true if successful, in that case Worklist will be updated.
9961   bool handleCmp(Attributor &A, CmpInst &Cmp, ItemInfo II,
9962                  SmallVectorImpl<ItemInfo> &Worklist) {
9963     Value *LHS = Cmp.getOperand(0);
9964     Value *RHS = Cmp.getOperand(1);
9965 
9966     // Simplify the operands first.
9967     bool UsedAssumedInformation = false;
9968     const auto &SimplifiedLHS = A.getAssumedSimplified(
9969         IRPosition::value(*LHS, getCallBaseContext()), *this,
9970         UsedAssumedInformation, AA::Intraprocedural);
9971     if (!SimplifiedLHS.hasValue())
9972       return true;
9973     if (!SimplifiedLHS.getValue())
9974       return false;
9975     LHS = *SimplifiedLHS;
9976 
9977     const auto &SimplifiedRHS = A.getAssumedSimplified(
9978         IRPosition::value(*RHS, getCallBaseContext()), *this,
9979         UsedAssumedInformation, AA::Intraprocedural);
9980     if (!SimplifiedRHS.hasValue())
9981       return true;
9982     if (!SimplifiedRHS.getValue())
9983       return false;
9984     RHS = *SimplifiedRHS;
9985 
9986     LLVMContext &Ctx = Cmp.getContext();
9987     // Handle the trivial case first in which we don't even need to think about
9988     // null or non-null.
9989     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
9990       Constant *NewV =
9991           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
9992       addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S,
9993                getAnchorScope());
9994       return true;
9995     }
9996 
9997     // From now on we only handle equalities (==, !=).
9998     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
9999     if (!ICmp || !ICmp->isEquality())
10000       return false;
10001 
10002     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
10003     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
10004     if (!LHSIsNull && !RHSIsNull)
10005       return false;
10006 
10007     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
10008     // non-nullptr operand and if we assume it's non-null we can conclude the
10009     // result of the comparison.
10010     assert((LHSIsNull || RHSIsNull) &&
10011            "Expected nullptr versus non-nullptr comparison at this point");
10012 
10013     // The index is the operand that we assume is not null.
10014     unsigned PtrIdx = LHSIsNull;
10015     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
10016         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
10017         DepClassTy::REQUIRED);
10018     if (!PtrNonNullAA.isAssumedNonNull())
10019       return false;
10020 
10021     // The new value depends on the predicate, true for != and false for ==.
10022     Constant *NewV = ConstantInt::get(Type::getInt1Ty(Ctx),
10023                                       ICmp->getPredicate() == CmpInst::ICMP_NE);
10024     addValue(A, getState(), *NewV, /* CtxI */ nullptr, II.S, getAnchorScope());
10025     return true;
10026   }
10027 
10028   bool handleSelectInst(Attributor &A, SelectInst &SI, ItemInfo II,
10029                         SmallVectorImpl<ItemInfo> &Worklist) {
10030     const Instruction *CtxI = II.I.getCtxI();
10031     bool UsedAssumedInformation = false;
10032 
10033     Optional<Constant *> C =
10034         A.getAssumedConstant(*SI.getCondition(), *this, UsedAssumedInformation);
10035     bool NoValueYet = !C.hasValue();
10036     if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
10037       return true;
10038     if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
10039       if (CI->isZero())
10040         Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S});
10041       else
10042         Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S});
10043     } else {
10044       // We could not simplify the condition, assume both values.
10045       Worklist.push_back({{*SI.getTrueValue(), CtxI}, II.S});
10046       Worklist.push_back({{*SI.getFalseValue(), CtxI}, II.S});
10047     }
10048     return true;
10049   }
10050 
10051   bool handleLoadInst(Attributor &A, LoadInst &LI, ItemInfo II,
10052                       SmallVectorImpl<ItemInfo> &Worklist) {
10053     SmallSetVector<Value *, 4> PotentialCopies;
10054     SmallSetVector<Instruction *, 4> PotentialValueOrigins;
10055     bool UsedAssumedInformation = false;
10056     if (!AA::getPotentiallyLoadedValues(A, LI, PotentialCopies,
10057                                         PotentialValueOrigins, *this,
10058                                         UsedAssumedInformation,
10059                                         /* OnlyExact */ true))
10060       return false;
10061 
10062     // Do not simplify loads that are only used in llvm.assume if we cannot also
10063     // remove all stores that may feed into the load. The reason is that the
10064     // assume is probably worth something as long as the stores are around.
10065     InformationCache &InfoCache = A.getInfoCache();
10066     if (InfoCache.isOnlyUsedByAssume(LI)) {
10067       if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
10068             if (!I)
10069               return true;
10070             if (auto *SI = dyn_cast<StoreInst>(I))
10071               return A.isAssumedDead(SI->getOperandUse(0), this,
10072                                      /* LivenessAA */ nullptr,
10073                                      UsedAssumedInformation,
10074                                      /* CheckBBLivenessOnly */ false);
10075             return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
10076                                    UsedAssumedInformation,
10077                                    /* CheckBBLivenessOnly */ false);
10078           }))
10079         return false;
10080     }
10081 
10082     // Values have to be dynamically unique or we loose the fact that a
10083     // single llvm::Value might represent two runtime values (e.g.,
10084     // stack locations in different recursive calls).
10085     const Instruction *CtxI = II.I.getCtxI();
10086     bool ScopeIsLocal = (II.S & AA::Intraprocedural);
10087     bool AllLocal = ScopeIsLocal;
10088     bool DynamicallyUnique = llvm::all_of(PotentialCopies, [&](Value *PC) {
10089       AllLocal &= AA::isValidInScope(*PC, getAnchorScope());
10090       return AA::isDynamicallyUnique(A, *this, *PC);
10091     });
10092     if (!DynamicallyUnique)
10093       return false;
10094 
10095     for (auto *PotentialCopy : PotentialCopies) {
10096       if (AllLocal) {
10097         Worklist.push_back({{*PotentialCopy, CtxI}, II.S});
10098       } else {
10099         Worklist.push_back({{*PotentialCopy, CtxI}, AA::Interprocedural});
10100       }
10101     }
10102     if (!AllLocal && ScopeIsLocal)
10103       addValue(A, getState(), LI, CtxI, AA::Intraprocedural, getAnchorScope());
10104     return true;
10105   }
10106 
10107   bool handlePHINode(
10108       Attributor &A, PHINode &PHI, ItemInfo II,
10109       SmallVectorImpl<ItemInfo> &Worklist,
10110       SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) {
10111     auto GetLivenessInfo = [&](const Function &F) -> LivenessInfo & {
10112       LivenessInfo &LI = LivenessAAs[&F];
10113       if (!LI.LivenessAA)
10114         LI.LivenessAA = &A.getAAFor<AAIsDead>(*this, IRPosition::function(F),
10115                                               DepClassTy::NONE);
10116       return LI;
10117     };
10118 
10119     LivenessInfo &LI = GetLivenessInfo(*PHI.getFunction());
10120     for (unsigned u = 0, e = PHI.getNumIncomingValues(); u < e; u++) {
10121       BasicBlock *IncomingBB = PHI.getIncomingBlock(u);
10122       if (LI.LivenessAA->isEdgeDead(IncomingBB, PHI.getParent())) {
10123         LI.AnyDead = true;
10124         continue;
10125       }
10126       Worklist.push_back(
10127           {{*PHI.getIncomingValue(u), IncomingBB->getTerminator()}, II.S});
10128     }
10129     return true;
10130   }
10131 
10132   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
10133   /// simplify any operand of the instruction \p I. Return true if successful,
10134   /// in that case Worklist will be updated.
10135   bool handleGenericInst(Attributor &A, Instruction &I, ItemInfo II,
10136                          SmallVectorImpl<ItemInfo> &Worklist) {
10137     bool SomeSimplified = false;
10138     bool UsedAssumedInformation = false;
10139 
10140     SmallVector<Value *, 8> NewOps(I.getNumOperands());
10141     int Idx = 0;
10142     for (Value *Op : I.operands()) {
10143       const auto &SimplifiedOp = A.getAssumedSimplified(
10144           IRPosition::value(*Op, getCallBaseContext()), *this,
10145           UsedAssumedInformation, AA::Intraprocedural);
10146       // If we are not sure about any operand we are not sure about the entire
10147       // instruction, we'll wait.
10148       if (!SimplifiedOp.hasValue())
10149         return true;
10150 
10151       if (SimplifiedOp.getValue())
10152         NewOps[Idx] = SimplifiedOp.getValue();
10153       else
10154         NewOps[Idx] = Op;
10155 
10156       SomeSimplified |= (NewOps[Idx] != Op);
10157       ++Idx;
10158     }
10159 
10160     // We won't bother with the InstSimplify interface if we didn't simplify any
10161     // operand ourselves.
10162     if (!SomeSimplified)
10163       return false;
10164 
10165     InformationCache &InfoCache = A.getInfoCache();
10166     Function *F = I.getFunction();
10167     const auto *DT =
10168         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
10169     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
10170     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
10171     OptimizationRemarkEmitter *ORE = nullptr;
10172 
10173     const DataLayout &DL = I.getModule()->getDataLayout();
10174     SimplifyQuery Q(DL, TLI, DT, AC, &I);
10175     Value *NewV = simplifyInstructionWithOperands(&I, NewOps, Q, ORE);
10176     if (!NewV || NewV == &I)
10177       return false;
10178 
10179     LLVM_DEBUG(dbgs() << "Generic inst " << I << " assumed simplified to "
10180                       << *NewV << "\n");
10181     Worklist.push_back({{*NewV, II.I.getCtxI()}, II.S});
10182     return true;
10183   }
10184 
10185   bool simplifyInstruction(
10186       Attributor &A, Instruction &I, ItemInfo II,
10187       SmallVectorImpl<ItemInfo> &Worklist,
10188       SmallMapVector<const Function *, LivenessInfo, 4> &LivenessAAs) {
10189     if (auto *CI = dyn_cast<CmpInst>(&I))
10190       if (handleCmp(A, *CI, II, Worklist))
10191         return true;
10192 
10193     switch (I.getOpcode()) {
10194     case Instruction::Select:
10195       return handleSelectInst(A, cast<SelectInst>(I), II, Worklist);
10196     case Instruction::PHI:
10197       return handlePHINode(A, cast<PHINode>(I), II, Worklist, LivenessAAs);
10198     case Instruction::Load:
10199       return handleLoadInst(A, cast<LoadInst>(I), II, Worklist);
10200     default:
10201       return handleGenericInst(A, I, II, Worklist);
10202     };
10203     return false;
10204   }
10205 
10206   void genericValueTraversal(Attributor &A) {
10207     SmallMapVector<const Function *, LivenessInfo, 4> LivenessAAs;
10208 
10209     Value *InitialV = &getAssociatedValue();
10210     SmallSet<AA::ValueAndContext, 16> Visited;
10211     SmallVector<ItemInfo, 16> Worklist;
10212     Worklist.push_back({{*InitialV, getCtxI()}, AA::AnyScope});
10213 
10214     int Iteration = 0;
10215     do {
10216       ItemInfo II = Worklist.pop_back_val();
10217       Value *V = II.I.getValue();
10218       assert(V);
10219       const Instruction *CtxI = II.I.getCtxI();
10220       AA::ValueScope S = II.S;
10221 
10222       // Check if we should process the current value. To prevent endless
10223       // recursion keep a record of the values we followed!
10224       if (!Visited.insert(II.I).second)
10225         continue;
10226 
10227       // Make sure we limit the compile time for complex expressions.
10228       if (Iteration++ >= MaxPotentialValuesIterations) {
10229         LLVM_DEBUG(dbgs() << "Generic value traversal reached iteration limit: "
10230                           << Iteration << "!\n");
10231         addValue(A, getState(), *V, CtxI, S, getAnchorScope());
10232         continue;
10233       }
10234 
10235       // Explicitly look through calls with a "returned" attribute if we do
10236       // not have a pointer as stripPointerCasts only works on them.
10237       Value *NewV = nullptr;
10238       if (V->getType()->isPointerTy()) {
10239         NewV = AA::getWithType(*V->stripPointerCasts(), *V->getType());
10240       } else {
10241         auto *CB = dyn_cast<CallBase>(V);
10242         if (CB && CB->getCalledFunction()) {
10243           for (Argument &Arg : CB->getCalledFunction()->args())
10244             if (Arg.hasReturnedAttr()) {
10245               NewV = CB->getArgOperand(Arg.getArgNo());
10246               break;
10247             }
10248         }
10249       }
10250       if (NewV && NewV != V) {
10251         Worklist.push_back({{*NewV, CtxI}, S});
10252         continue;
10253       }
10254 
10255       if (auto *I = dyn_cast<Instruction>(V)) {
10256         if (simplifyInstruction(A, *I, II, Worklist, LivenessAAs))
10257           continue;
10258       }
10259 
10260       if (V != InitialV || isa<Argument>(V))
10261         if (recurseForValue(A, IRPosition::value(*V), II.S))
10262           continue;
10263 
10264       // If we haven't stripped anything we give up.
10265       if (V == InitialV && CtxI == getCtxI()) {
10266         indicatePessimisticFixpoint();
10267         return;
10268       }
10269 
10270       addValue(A, getState(), *V, CtxI, S, getAnchorScope());
10271     } while (!Worklist.empty());
10272 
10273     // If we actually used liveness information so we have to record a
10274     // dependence.
10275     for (auto &It : LivenessAAs)
10276       if (It.second.AnyDead)
10277         A.recordDependence(*It.second.LivenessAA, *this, DepClassTy::OPTIONAL);
10278   }
10279 
10280   /// See AbstractAttribute::trackStatistics()
10281   void trackStatistics() const override {
10282     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
10283   }
10284 };
10285 
10286 struct AAPotentialValuesArgument final : AAPotentialValuesImpl {
10287   using Base = AAPotentialValuesImpl;
10288   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
10289       : Base(IRP, A) {}
10290 
10291   /// See AbstractAttribute::initialize(..).
10292   void initialize(Attributor &A) override {
10293     auto &Arg = cast<Argument>(getAssociatedValue());
10294     if (Arg.hasPointeeInMemoryValueAttr())
10295       indicatePessimisticFixpoint();
10296   }
10297 
10298   /// See AbstractAttribute::updateImpl(...).
10299   ChangeStatus updateImpl(Attributor &A) override {
10300     auto AssumedBefore = getAssumed();
10301 
10302     unsigned CSArgNo = getCallSiteArgNo();
10303 
10304     bool UsedAssumedInformation = false;
10305     SmallVector<AA::ValueAndContext> Values;
10306     auto CallSitePred = [&](AbstractCallSite ACS) {
10307       const auto CSArgIRP = IRPosition::callsite_argument(ACS, CSArgNo);
10308       if (CSArgIRP.getPositionKind() == IRP_INVALID)
10309         return false;
10310 
10311       if (!A.getAssumedSimplifiedValues(CSArgIRP, this, Values,
10312                                         AA::Interprocedural,
10313                                         UsedAssumedInformation))
10314         return false;
10315 
10316       return isValidState();
10317     };
10318 
10319     if (!A.checkForAllCallSites(CallSitePred, *this,
10320                                 /* RequireAllCallSites */ true,
10321                                 UsedAssumedInformation))
10322       return indicatePessimisticFixpoint();
10323 
10324     Function *Fn = getAssociatedFunction();
10325     bool AnyNonLocal = false;
10326     for (auto &It : Values) {
10327       if (isa<Constant>(It.getValue())) {
10328         addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope,
10329                  getAnchorScope());
10330         continue;
10331       }
10332       if (!AA::isDynamicallyUnique(A, *this, *It.getValue()))
10333         return indicatePessimisticFixpoint();
10334 
10335       if (auto *Arg = dyn_cast<Argument>(It.getValue()))
10336         if (Arg->getParent() == Fn) {
10337           addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::AnyScope,
10338                    getAnchorScope());
10339           continue;
10340         }
10341       addValue(A, getState(), *It.getValue(), It.getCtxI(), AA::Interprocedural,
10342                getAnchorScope());
10343       AnyNonLocal = true;
10344     }
10345     if (undefIsContained())
10346       unionAssumedWithUndef();
10347     if (AnyNonLocal)
10348       giveUpOnIntraprocedural(A);
10349 
10350     return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
10351                                            : ChangeStatus::CHANGED;
10352   }
10353 
10354   /// See AbstractAttribute::trackStatistics()
10355   void trackStatistics() const override {
10356     STATS_DECLTRACK_ARG_ATTR(potential_values)
10357   }
10358 };
10359 
10360 struct AAPotentialValuesReturned
10361     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
10362   using Base =
10363       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
10364   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
10365       : Base(IRP, A) {}
10366 
10367   /// See AbstractAttribute::initialize(..).
10368   void initialize(Attributor &A) override {
10369     if (A.hasSimplificationCallback(getIRPosition()))
10370       indicatePessimisticFixpoint();
10371     else
10372       AAPotentialValues::initialize(A);
10373   }
10374 
10375   ChangeStatus manifest(Attributor &A) override {
10376     // We queried AAValueSimplify for the returned values so they will be
10377     // replaced if a simplified form was found. Nothing to do here.
10378     return ChangeStatus::UNCHANGED;
10379   }
10380 
10381   ChangeStatus indicatePessimisticFixpoint() override {
10382     return AAPotentialValues::indicatePessimisticFixpoint();
10383   }
10384 
10385   /// See AbstractAttribute::trackStatistics()
10386   void trackStatistics() const override {
10387     STATS_DECLTRACK_FNRET_ATTR(potential_values)
10388   }
10389 };
10390 
10391 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
10392   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
10393       : AAPotentialValuesImpl(IRP, A) {}
10394 
10395   /// See AbstractAttribute::updateImpl(...).
10396   ChangeStatus updateImpl(Attributor &A) override {
10397     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
10398                      "not be called");
10399   }
10400 
10401   /// See AbstractAttribute::trackStatistics()
10402   void trackStatistics() const override {
10403     STATS_DECLTRACK_FN_ATTR(potential_values)
10404   }
10405 };
10406 
10407 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
10408   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
10409       : AAPotentialValuesFunction(IRP, A) {}
10410 
10411   /// See AbstractAttribute::trackStatistics()
10412   void trackStatistics() const override {
10413     STATS_DECLTRACK_CS_ATTR(potential_values)
10414   }
10415 };
10416 
10417 struct AAPotentialValuesCallSiteReturned : AAPotentialValuesImpl {
10418   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
10419       : AAPotentialValuesImpl(IRP, A) {}
10420 
10421   /// See AbstractAttribute::updateImpl(...).
10422   ChangeStatus updateImpl(Attributor &A) override {
10423     auto AssumedBefore = getAssumed();
10424 
10425     Function *Callee = getAssociatedFunction();
10426     if (!Callee)
10427       return indicatePessimisticFixpoint();
10428 
10429     bool UsedAssumedInformation = false;
10430     auto *CB = cast<CallBase>(getCtxI());
10431     if (CB->isMustTailCall() &&
10432         !A.isAssumedDead(IRPosition::inst(*CB), this, nullptr,
10433                          UsedAssumedInformation))
10434       return indicatePessimisticFixpoint();
10435 
10436     SmallVector<AA::ValueAndContext> Values;
10437     if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this,
10438                                       Values, AA::Intraprocedural,
10439                                       UsedAssumedInformation))
10440       return indicatePessimisticFixpoint();
10441 
10442     Function *Caller = CB->getCaller();
10443 
10444     bool AnyNonLocal = false;
10445     for (auto &It : Values) {
10446       Value *V = It.getValue();
10447       Optional<Value *> CallerV = A.translateArgumentToCallSiteContent(
10448           V, *CB, *this, UsedAssumedInformation);
10449       if (!CallerV.hasValue()) {
10450         // Nothing to do as long as no value was determined.
10451         continue;
10452       }
10453       V = CallerV.getValue() ? CallerV.getValue() : V;
10454       if (AA::isDynamicallyUnique(A, *this, *V) &&
10455           AA::isValidInScope(*V, Caller)) {
10456         if (CallerV.getValue()) {
10457           SmallVector<AA::ValueAndContext> ArgValues;
10458           IRPosition IRP = IRPosition::value(*V);
10459           if (auto *Arg = dyn_cast<Argument>(V))
10460             if (Arg->getParent() == CB->getCalledFunction())
10461               IRP = IRPosition::callsite_argument(*CB, Arg->getArgNo());
10462           if (recurseForValue(A, IRP, AA::AnyScope))
10463             continue;
10464         }
10465         addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope());
10466       } else {
10467         AnyNonLocal = true;
10468         break;
10469       }
10470     }
10471     if (AnyNonLocal) {
10472       Values.clear();
10473       if (!A.getAssumedSimplifiedValues(IRPosition::returned(*Callee), this,
10474                                         Values, AA::Interprocedural,
10475                                         UsedAssumedInformation))
10476         return indicatePessimisticFixpoint();
10477       AnyNonLocal = false;
10478       getState() = PotentialLLVMValuesState::getBestState();
10479       for (auto &It : Values) {
10480         Value *V = It.getValue();
10481         if (!AA::isDynamicallyUnique(A, *this, *V))
10482           return indicatePessimisticFixpoint();
10483         if (AA::isValidInScope(*V, Caller)) {
10484           addValue(A, getState(), *V, CB, AA::AnyScope, getAnchorScope());
10485         } else {
10486           AnyNonLocal = true;
10487           addValue(A, getState(), *V, CB, AA::Interprocedural,
10488                    getAnchorScope());
10489         }
10490       }
10491       if (AnyNonLocal)
10492         giveUpOnIntraprocedural(A);
10493     }
10494     return (AssumedBefore == getAssumed()) ? ChangeStatus::UNCHANGED
10495                                            : ChangeStatus::CHANGED;
10496   }
10497 
10498   ChangeStatus indicatePessimisticFixpoint() override {
10499     return AAPotentialValues::indicatePessimisticFixpoint();
10500   }
10501 
10502   /// See AbstractAttribute::trackStatistics()
10503   void trackStatistics() const override {
10504     STATS_DECLTRACK_CSRET_ATTR(potential_values)
10505   }
10506 };
10507 
10508 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
10509   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
10510       : AAPotentialValuesFloating(IRP, A) {}
10511 
10512   /// See AbstractAttribute::trackStatistics()
10513   void trackStatistics() const override {
10514     STATS_DECLTRACK_CSARG_ATTR(potential_values)
10515   }
10516 };
10517 } // namespace
10518 
10519 /// ---------------------- Assumption Propagation ------------------------------
10520 namespace {
10521 struct AAAssumptionInfoImpl : public AAAssumptionInfo {
10522   AAAssumptionInfoImpl(const IRPosition &IRP, Attributor &A,
10523                        const DenseSet<StringRef> &Known)
10524       : AAAssumptionInfo(IRP, A, Known) {}
10525 
10526   bool hasAssumption(const StringRef Assumption) const override {
10527     return isValidState() && setContains(Assumption);
10528   }
10529 
10530   /// See AbstractAttribute::getAsStr()
10531   const std::string getAsStr() const override {
10532     const SetContents &Known = getKnown();
10533     const SetContents &Assumed = getAssumed();
10534 
10535     const std::string KnownStr =
10536         llvm::join(Known.getSet().begin(), Known.getSet().end(), ",");
10537     const std::string AssumedStr =
10538         (Assumed.isUniversal())
10539             ? "Universal"
10540             : llvm::join(Assumed.getSet().begin(), Assumed.getSet().end(), ",");
10541 
10542     return "Known [" + KnownStr + "]," + " Assumed [" + AssumedStr + "]";
10543   }
10544 };
10545 
10546 /// Propagates assumption information from parent functions to all of their
10547 /// successors. An assumption can be propagated if the containing function
10548 /// dominates the called function.
10549 ///
10550 /// We start with a "known" set of assumptions already valid for the associated
10551 /// function and an "assumed" set that initially contains all possible
10552 /// assumptions. The assumed set is inter-procedurally updated by narrowing its
10553 /// contents as concrete values are known. The concrete values are seeded by the
10554 /// first nodes that are either entries into the call graph, or contains no
10555 /// assumptions. Each node is updated as the intersection of the assumed state
10556 /// with all of its predecessors.
10557 struct AAAssumptionInfoFunction final : AAAssumptionInfoImpl {
10558   AAAssumptionInfoFunction(const IRPosition &IRP, Attributor &A)
10559       : AAAssumptionInfoImpl(IRP, A,
10560                              getAssumptions(*IRP.getAssociatedFunction())) {}
10561 
10562   /// See AbstractAttribute::manifest(...).
10563   ChangeStatus manifest(Attributor &A) override {
10564     const auto &Assumptions = getKnown();
10565 
10566     // Don't manifest a universal set if it somehow made it here.
10567     if (Assumptions.isUniversal())
10568       return ChangeStatus::UNCHANGED;
10569 
10570     Function *AssociatedFunction = getAssociatedFunction();
10571 
10572     bool Changed = addAssumptions(*AssociatedFunction, Assumptions.getSet());
10573 
10574     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10575   }
10576 
10577   /// See AbstractAttribute::updateImpl(...).
10578   ChangeStatus updateImpl(Attributor &A) override {
10579     bool Changed = false;
10580 
10581     auto CallSitePred = [&](AbstractCallSite ACS) {
10582       const auto &AssumptionAA = A.getAAFor<AAAssumptionInfo>(
10583           *this, IRPosition::callsite_function(*ACS.getInstruction()),
10584           DepClassTy::REQUIRED);
10585       // Get the set of assumptions shared by all of this function's callers.
10586       Changed |= getIntersection(AssumptionAA.getAssumed());
10587       return !getAssumed().empty() || !getKnown().empty();
10588     };
10589 
10590     bool UsedAssumedInformation = false;
10591     // Get the intersection of all assumptions held by this node's predecessors.
10592     // If we don't know all the call sites then this is either an entry into the
10593     // call graph or an empty node. This node is known to only contain its own
10594     // assumptions and can be propagated to its successors.
10595     if (!A.checkForAllCallSites(CallSitePred, *this, true,
10596                                 UsedAssumedInformation))
10597       return indicatePessimisticFixpoint();
10598 
10599     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10600   }
10601 
10602   void trackStatistics() const override {}
10603 };
10604 
10605 /// Assumption Info defined for call sites.
10606 struct AAAssumptionInfoCallSite final : AAAssumptionInfoImpl {
10607 
10608   AAAssumptionInfoCallSite(const IRPosition &IRP, Attributor &A)
10609       : AAAssumptionInfoImpl(IRP, A, getInitialAssumptions(IRP)) {}
10610 
10611   /// See AbstractAttribute::initialize(...).
10612   void initialize(Attributor &A) override {
10613     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10614     A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10615   }
10616 
10617   /// See AbstractAttribute::manifest(...).
10618   ChangeStatus manifest(Attributor &A) override {
10619     // Don't manifest a universal set if it somehow made it here.
10620     if (getKnown().isUniversal())
10621       return ChangeStatus::UNCHANGED;
10622 
10623     CallBase &AssociatedCall = cast<CallBase>(getAssociatedValue());
10624     bool Changed = addAssumptions(AssociatedCall, getAssumed().getSet());
10625 
10626     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10627   }
10628 
10629   /// See AbstractAttribute::updateImpl(...).
10630   ChangeStatus updateImpl(Attributor &A) override {
10631     const IRPosition &FnPos = IRPosition::function(*getAnchorScope());
10632     auto &AssumptionAA =
10633         A.getAAFor<AAAssumptionInfo>(*this, FnPos, DepClassTy::REQUIRED);
10634     bool Changed = getIntersection(AssumptionAA.getAssumed());
10635     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
10636   }
10637 
10638   /// See AbstractAttribute::trackStatistics()
10639   void trackStatistics() const override {}
10640 
10641 private:
10642   /// Helper to initialized the known set as all the assumptions this call and
10643   /// the callee contain.
10644   DenseSet<StringRef> getInitialAssumptions(const IRPosition &IRP) {
10645     const CallBase &CB = cast<CallBase>(IRP.getAssociatedValue());
10646     auto Assumptions = getAssumptions(CB);
10647     if (Function *F = IRP.getAssociatedFunction())
10648       set_union(Assumptions, getAssumptions(*F));
10649     if (Function *F = IRP.getAssociatedFunction())
10650       set_union(Assumptions, getAssumptions(*F));
10651     return Assumptions;
10652   }
10653 };
10654 } // namespace
10655 
10656 AACallGraphNode *AACallEdgeIterator::operator*() const {
10657   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
10658       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
10659 }
10660 
10661 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
10662 
10663 const char AAReturnedValues::ID = 0;
10664 const char AANoUnwind::ID = 0;
10665 const char AANoSync::ID = 0;
10666 const char AANoFree::ID = 0;
10667 const char AANonNull::ID = 0;
10668 const char AANoRecurse::ID = 0;
10669 const char AAWillReturn::ID = 0;
10670 const char AAUndefinedBehavior::ID = 0;
10671 const char AANoAlias::ID = 0;
10672 const char AAReachability::ID = 0;
10673 const char AANoReturn::ID = 0;
10674 const char AAIsDead::ID = 0;
10675 const char AADereferenceable::ID = 0;
10676 const char AAAlign::ID = 0;
10677 const char AAInstanceInfo::ID = 0;
10678 const char AANoCapture::ID = 0;
10679 const char AAValueSimplify::ID = 0;
10680 const char AAHeapToStack::ID = 0;
10681 const char AAPrivatizablePtr::ID = 0;
10682 const char AAMemoryBehavior::ID = 0;
10683 const char AAMemoryLocation::ID = 0;
10684 const char AAValueConstantRange::ID = 0;
10685 const char AAPotentialConstantValues::ID = 0;
10686 const char AAPotentialValues::ID = 0;
10687 const char AANoUndef::ID = 0;
10688 const char AACallEdges::ID = 0;
10689 const char AAFunctionReachability::ID = 0;
10690 const char AAPointerInfo::ID = 0;
10691 const char AAAssumptionInfo::ID = 0;
10692 
10693 // Macro magic to create the static generator function for attributes that
10694 // follow the naming scheme.
10695 
10696 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
10697   case IRPosition::PK:                                                         \
10698     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
10699 
10700 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
10701   case IRPosition::PK:                                                         \
10702     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
10703     ++NumAAs;                                                                  \
10704     break;
10705 
10706 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
10707   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10708     CLASS *AA = nullptr;                                                       \
10709     switch (IRP.getPositionKind()) {                                           \
10710       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10711       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10712       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10713       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10714       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10715       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10716       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10717       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10718     }                                                                          \
10719     return *AA;                                                                \
10720   }
10721 
10722 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
10723   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10724     CLASS *AA = nullptr;                                                       \
10725     switch (IRP.getPositionKind()) {                                           \
10726       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10727       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
10728       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10729       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10730       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10731       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10732       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10733       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10734     }                                                                          \
10735     return *AA;                                                                \
10736   }
10737 
10738 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
10739   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10740     CLASS *AA = nullptr;                                                       \
10741     switch (IRP.getPositionKind()) {                                           \
10742       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10743       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10744       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10745       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10746       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10747       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
10748       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10749       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10750     }                                                                          \
10751     return *AA;                                                                \
10752   }
10753 
10754 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
10755   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10756     CLASS *AA = nullptr;                                                       \
10757     switch (IRP.getPositionKind()) {                                           \
10758       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10759       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
10760       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
10761       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10762       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
10763       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
10764       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
10765       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10766     }                                                                          \
10767     return *AA;                                                                \
10768   }
10769 
10770 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
10771   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
10772     CLASS *AA = nullptr;                                                       \
10773     switch (IRP.getPositionKind()) {                                           \
10774       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
10775       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
10776       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
10777       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
10778       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
10779       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
10780       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
10781       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
10782     }                                                                          \
10783     return *AA;                                                                \
10784   }
10785 
10786 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
10787 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
10788 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
10789 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
10790 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
10791 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
10792 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
10793 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
10794 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAssumptionInfo)
10795 
10796 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
10797 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
10798 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
10799 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
10800 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
10801 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAInstanceInfo)
10802 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
10803 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
10804 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialConstantValues)
10805 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
10806 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
10807 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
10808 
10809 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
10810 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
10811 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
10812 
10813 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
10814 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
10815 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
10816 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
10817 
10818 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
10819 
10820 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
10821 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
10822 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
10823 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
10824 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
10825 #undef SWITCH_PK_CREATE
10826 #undef SWITCH_PK_INV
10827