1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SCCIterator.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumeBundleQueries.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LazyValueInfo.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
28 #include "llvm/Analysis/ScalarEvolution.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/Instruction.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/NoFolder.h"
37 #include "llvm/Support/Alignment.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/FileSystem.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
44 #include "llvm/Transforms/Utils/Local.h"
45 #include <cassert>
46 
47 using namespace llvm;
48 
49 #define DEBUG_TYPE "attributor"
50 
51 static cl::opt<bool> ManifestInternal(
52     "attributor-manifest-internal", cl::Hidden,
53     cl::desc("Manifest Attributor internal string attributes."),
54     cl::init(false));
55 
56 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
57                                        cl::Hidden);
58 
59 template <>
60 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
61 
62 static cl::opt<unsigned, true> MaxPotentialValues(
63     "attributor-max-potential-values", cl::Hidden,
64     cl::desc("Maximum number of potential values to be "
65              "tracked for each position."),
66     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
67     cl::init(7));
68 
69 STATISTIC(NumAAs, "Number of abstract attributes created");
70 
71 // Some helper macros to deal with statistics tracking.
72 //
73 // Usage:
74 // For simple IR attribute tracking overload trackStatistics in the abstract
75 // attribute and choose the right STATS_DECLTRACK_********* macro,
76 // e.g.,:
77 //  void trackStatistics() const override {
78 //    STATS_DECLTRACK_ARG_ATTR(returned)
79 //  }
80 // If there is a single "increment" side one can use the macro
81 // STATS_DECLTRACK with a custom message. If there are multiple increment
82 // sides, STATS_DECL and STATS_TRACK can also be used separately.
83 //
84 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
85   ("Number of " #TYPE " marked '" #NAME "'")
86 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
87 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
88 #define STATS_DECL(NAME, TYPE, MSG)                                            \
89   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
90 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
91 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
92   {                                                                            \
93     STATS_DECL(NAME, TYPE, MSG)                                                \
94     STATS_TRACK(NAME, TYPE)                                                    \
95   }
96 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
97   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
98 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
99   STATS_DECLTRACK(NAME, CSArguments,                                           \
100                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
101 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
102   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
103 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
104   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
105 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
106   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
107                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
108 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
109   STATS_DECLTRACK(NAME, CSReturn,                                              \
110                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
111 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
112   STATS_DECLTRACK(NAME, Floating,                                              \
113                   ("Number of floating values known to be '" #NAME "'"))
114 
115 // Specialization of the operator<< for abstract attributes subclasses. This
116 // disambiguates situations where multiple operators are applicable.
117 namespace llvm {
118 #define PIPE_OPERATOR(CLASS)                                                   \
119   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
120     return OS << static_cast<const AbstractAttribute &>(AA);                   \
121   }
122 
123 PIPE_OPERATOR(AAIsDead)
124 PIPE_OPERATOR(AANoUnwind)
125 PIPE_OPERATOR(AANoSync)
126 PIPE_OPERATOR(AANoRecurse)
127 PIPE_OPERATOR(AAWillReturn)
128 PIPE_OPERATOR(AANoReturn)
129 PIPE_OPERATOR(AAReturnedValues)
130 PIPE_OPERATOR(AANonNull)
131 PIPE_OPERATOR(AANoAlias)
132 PIPE_OPERATOR(AADereferenceable)
133 PIPE_OPERATOR(AAAlign)
134 PIPE_OPERATOR(AANoCapture)
135 PIPE_OPERATOR(AAValueSimplify)
136 PIPE_OPERATOR(AANoFree)
137 PIPE_OPERATOR(AAHeapToStack)
138 PIPE_OPERATOR(AAReachability)
139 PIPE_OPERATOR(AAMemoryBehavior)
140 PIPE_OPERATOR(AAMemoryLocation)
141 PIPE_OPERATOR(AAValueConstantRange)
142 PIPE_OPERATOR(AAPrivatizablePtr)
143 PIPE_OPERATOR(AAUndefinedBehavior)
144 PIPE_OPERATOR(AAPotentialValues)
145 PIPE_OPERATOR(AANoUndef)
146 PIPE_OPERATOR(AACallEdges)
147 PIPE_OPERATOR(AAFunctionReachability)
148 PIPE_OPERATOR(AAPointerInfo)
149 
150 #undef PIPE_OPERATOR
151 
152 template <>
153 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
154                                                      const DerefState &R) {
155   ChangeStatus CS0 =
156       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
157   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
158   return CS0 | CS1;
159 }
160 
161 } // namespace llvm
162 
163 /// Get pointer operand of memory accessing instruction. If \p I is
164 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
165 /// is set to false and the instruction is volatile, return nullptr.
166 static const Value *getPointerOperand(const Instruction *I,
167                                       bool AllowVolatile) {
168   if (!AllowVolatile && I->isVolatile())
169     return nullptr;
170 
171   if (auto *LI = dyn_cast<LoadInst>(I)) {
172     return LI->getPointerOperand();
173   }
174 
175   if (auto *SI = dyn_cast<StoreInst>(I)) {
176     return SI->getPointerOperand();
177   }
178 
179   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
180     return CXI->getPointerOperand();
181   }
182 
183   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
184     return RMWI->getPointerOperand();
185   }
186 
187   return nullptr;
188 }
189 
190 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
191 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
192 /// getelement pointer instructions that traverse the natural type of \p Ptr if
193 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
194 /// through a cast to i8*.
195 ///
196 /// TODO: This could probably live somewhere more prominantly if it doesn't
197 ///       already exist.
198 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
199                                int64_t Offset, IRBuilder<NoFolder> &IRB,
200                                const DataLayout &DL) {
201   assert(Offset >= 0 && "Negative offset not supported yet!");
202   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
203                     << "-bytes as " << *ResTy << "\n");
204 
205   if (Offset) {
206     SmallVector<Value *, 4> Indices;
207     std::string GEPName = Ptr->getName().str() + ".0";
208 
209     // Add 0 index to look through the pointer.
210     assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
211            "Offset out of bounds");
212     Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
213 
214     Type *Ty = PtrElemTy;
215     do {
216       auto *STy = dyn_cast<StructType>(Ty);
217       if (!STy)
218         // Non-aggregate type, we cast and make byte-wise progress now.
219         break;
220 
221       const StructLayout *SL = DL.getStructLayout(STy);
222       if (int64_t(SL->getSizeInBytes()) < Offset)
223         break;
224 
225       uint64_t Idx = SL->getElementContainingOffset(Offset);
226       assert(Idx < STy->getNumElements() && "Offset calculation error!");
227       uint64_t Rem = Offset - SL->getElementOffset(Idx);
228       Ty = STy->getElementType(Idx);
229 
230       LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
231                         << " Idx: " << Idx << " Rem: " << Rem << "\n");
232 
233       GEPName += "." + std::to_string(Idx);
234       Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
235       Offset = Rem;
236     } while (Offset);
237 
238     // Create a GEP for the indices collected above.
239     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
240 
241     // If an offset is left we use byte-wise adjustment.
242     if (Offset) {
243       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
244       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
245                           GEPName + ".b" + Twine(Offset));
246     }
247   }
248 
249   // Ensure the result has the requested type.
250   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
251 
252   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
253   return Ptr;
254 }
255 
256 /// Recursively visit all values that might become \p IRP at some point. This
257 /// will be done by looking through cast instructions, selects, phis, and calls
258 /// with the "returned" attribute. Once we cannot look through the value any
259 /// further, the callback \p VisitValueCB is invoked and passed the current
260 /// value, the \p State, and a flag to indicate if we stripped anything.
261 /// Stripped means that we unpacked the value associated with \p IRP at least
262 /// once. Note that the value used for the callback may still be the value
263 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
264 /// we will never visit more values than specified by \p MaxValues.
265 template <typename StateTy>
266 static bool genericValueTraversal(
267     Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
268     StateTy &State,
269     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
270         VisitValueCB,
271     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
272     function_ref<Value *(Value *)> StripCB = nullptr) {
273 
274   const AAIsDead *LivenessAA = nullptr;
275   if (IRP.getAnchorScope())
276     LivenessAA = &A.getAAFor<AAIsDead>(
277         QueryingAA,
278         IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
279         DepClassTy::NONE);
280   bool AnyDead = false;
281 
282   Value *InitialV = &IRP.getAssociatedValue();
283   using Item = std::pair<Value *, const Instruction *>;
284   SmallSet<Item, 16> Visited;
285   SmallVector<Item, 16> Worklist;
286   Worklist.push_back({InitialV, CtxI});
287 
288   int Iteration = 0;
289   do {
290     Item I = Worklist.pop_back_val();
291     Value *V = I.first;
292     CtxI = I.second;
293     if (StripCB)
294       V = StripCB(V);
295 
296     // Check if we should process the current value. To prevent endless
297     // recursion keep a record of the values we followed!
298     if (!Visited.insert(I).second)
299       continue;
300 
301     // Make sure we limit the compile time for complex expressions.
302     if (Iteration++ >= MaxValues)
303       return false;
304 
305     // Explicitly look through calls with a "returned" attribute if we do
306     // not have a pointer as stripPointerCasts only works on them.
307     Value *NewV = nullptr;
308     if (V->getType()->isPointerTy()) {
309       NewV = V->stripPointerCasts();
310     } else {
311       auto *CB = dyn_cast<CallBase>(V);
312       if (CB && CB->getCalledFunction()) {
313         for (Argument &Arg : CB->getCalledFunction()->args())
314           if (Arg.hasReturnedAttr()) {
315             NewV = CB->getArgOperand(Arg.getArgNo());
316             break;
317           }
318       }
319     }
320     if (NewV && NewV != V) {
321       Worklist.push_back({NewV, CtxI});
322       continue;
323     }
324 
325     // Look through select instructions, visit assumed potential values.
326     if (auto *SI = dyn_cast<SelectInst>(V)) {
327       bool UsedAssumedInformation = false;
328       Optional<Constant *> C = A.getAssumedConstant(
329           *SI->getCondition(), QueryingAA, UsedAssumedInformation);
330       bool NoValueYet = !C.hasValue();
331       if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
332         continue;
333       if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
334         if (CI->isZero())
335           Worklist.push_back({SI->getFalseValue(), CtxI});
336         else
337           Worklist.push_back({SI->getTrueValue(), CtxI});
338         continue;
339       }
340       // We could not simplify the condition, assume both values.(
341       Worklist.push_back({SI->getTrueValue(), CtxI});
342       Worklist.push_back({SI->getFalseValue(), CtxI});
343       continue;
344     }
345 
346     // Look through phi nodes, visit all live operands.
347     if (auto *PHI = dyn_cast<PHINode>(V)) {
348       assert(LivenessAA &&
349              "Expected liveness in the presence of instructions!");
350       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
351         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
352         bool UsedAssumedInformation = false;
353         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
354                             LivenessAA, UsedAssumedInformation,
355                             /* CheckBBLivenessOnly */ true)) {
356           AnyDead = true;
357           continue;
358         }
359         Worklist.push_back(
360             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
361       }
362       continue;
363     }
364 
365     if (UseValueSimplify && !isa<Constant>(V)) {
366       bool UsedAssumedInformation = false;
367       Optional<Value *> SimpleV =
368           A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
369       if (!SimpleV.hasValue())
370         continue;
371       if (!SimpleV.getValue())
372         return false;
373       Value *NewV = SimpleV.getValue();
374       if (NewV != V) {
375         Worklist.push_back({NewV, CtxI});
376         continue;
377       }
378     }
379 
380     // Once a leaf is reached we inform the user through the callback.
381     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
382       return false;
383   } while (!Worklist.empty());
384 
385   // If we actually used liveness information so we have to record a dependence.
386   if (AnyDead)
387     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
388 
389   // All values have been visited.
390   return true;
391 }
392 
393 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
394                                      SmallVectorImpl<Value *> &Objects,
395                                      const AbstractAttribute &QueryingAA,
396                                      const Instruction *CtxI) {
397   auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
398   SmallPtrSet<Value *, 8> SeenObjects;
399   auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
400                                      SmallVectorImpl<Value *> &Objects,
401                                      bool) -> bool {
402     if (SeenObjects.insert(&Val).second)
403       Objects.push_back(&Val);
404     return true;
405   };
406   if (!genericValueTraversal<decltype(Objects)>(
407           A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
408           true, 32, StripCB))
409     return false;
410   return true;
411 }
412 
413 const Value *stripAndAccumulateMinimalOffsets(
414     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
415     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
416     bool UseAssumed = false) {
417 
418   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
419     const IRPosition &Pos = IRPosition::value(V);
420     // Only track dependence if we are going to use the assumed info.
421     const AAValueConstantRange &ValueConstantRangeAA =
422         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
423                                          UseAssumed ? DepClassTy::OPTIONAL
424                                                     : DepClassTy::NONE);
425     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
426                                      : ValueConstantRangeAA.getKnown();
427     // We can only use the lower part of the range because the upper part can
428     // be higher than what the value can really be.
429     ROffset = Range.getSignedMin();
430     return true;
431   };
432 
433   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
434                                                 AttributorAnalysis);
435 }
436 
437 static const Value *getMinimalBaseOfAccsesPointerOperand(
438     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
439     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
440   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
441   if (!Ptr)
442     return nullptr;
443   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
444   const Value *Base = stripAndAccumulateMinimalOffsets(
445       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
446 
447   BytesOffset = OffsetAPInt.getSExtValue();
448   return Base;
449 }
450 
451 static const Value *
452 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
453                                      const DataLayout &DL,
454                                      bool AllowNonInbounds = false) {
455   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
456   if (!Ptr)
457     return nullptr;
458 
459   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
460                                           AllowNonInbounds);
461 }
462 
463 /// Clamp the information known for all returned values of a function
464 /// (identified by \p QueryingAA) into \p S.
465 template <typename AAType, typename StateType = typename AAType::StateType>
466 static void clampReturnedValueStates(
467     Attributor &A, const AAType &QueryingAA, StateType &S,
468     const IRPosition::CallBaseContext *CBContext = nullptr) {
469   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
470                     << QueryingAA << " into " << S << "\n");
471 
472   assert((QueryingAA.getIRPosition().getPositionKind() ==
473               IRPosition::IRP_RETURNED ||
474           QueryingAA.getIRPosition().getPositionKind() ==
475               IRPosition::IRP_CALL_SITE_RETURNED) &&
476          "Can only clamp returned value states for a function returned or call "
477          "site returned position!");
478 
479   // Use an optional state as there might not be any return values and we want
480   // to join (IntegerState::operator&) the state of all there are.
481   Optional<StateType> T;
482 
483   // Callback for each possibly returned value.
484   auto CheckReturnValue = [&](Value &RV) -> bool {
485     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
486     const AAType &AA =
487         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
488     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
489                       << " @ " << RVPos << "\n");
490     const StateType &AAS = AA.getState();
491     if (T.hasValue())
492       *T &= AAS;
493     else
494       T = AAS;
495     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
496                       << "\n");
497     return T->isValidState();
498   };
499 
500   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
501     S.indicatePessimisticFixpoint();
502   else if (T.hasValue())
503     S ^= *T;
504 }
505 
506 /// Helper class for generic deduction: return value -> returned position.
507 template <typename AAType, typename BaseType,
508           typename StateType = typename BaseType::StateType,
509           bool PropagateCallBaseContext = false>
510 struct AAReturnedFromReturnedValues : public BaseType {
511   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
512       : BaseType(IRP, A) {}
513 
514   /// See AbstractAttribute::updateImpl(...).
515   ChangeStatus updateImpl(Attributor &A) override {
516     StateType S(StateType::getBestState(this->getState()));
517     clampReturnedValueStates<AAType, StateType>(
518         A, *this, S,
519         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
520     // TODO: If we know we visited all returned values, thus no are assumed
521     // dead, we can take the known information from the state T.
522     return clampStateAndIndicateChange<StateType>(this->getState(), S);
523   }
524 };
525 
526 /// Clamp the information known at all call sites for a given argument
527 /// (identified by \p QueryingAA) into \p S.
528 template <typename AAType, typename StateType = typename AAType::StateType>
529 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
530                                         StateType &S) {
531   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
532                     << QueryingAA << " into " << S << "\n");
533 
534   assert(QueryingAA.getIRPosition().getPositionKind() ==
535              IRPosition::IRP_ARGUMENT &&
536          "Can only clamp call site argument states for an argument position!");
537 
538   // Use an optional state as there might not be any return values and we want
539   // to join (IntegerState::operator&) the state of all there are.
540   Optional<StateType> T;
541 
542   // The argument number which is also the call site argument number.
543   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
544 
545   auto CallSiteCheck = [&](AbstractCallSite ACS) {
546     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
547     // Check if a coresponding argument was found or if it is on not associated
548     // (which can happen for callback calls).
549     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
550       return false;
551 
552     const AAType &AA =
553         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
554     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
555                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
556     const StateType &AAS = AA.getState();
557     if (T.hasValue())
558       *T &= AAS;
559     else
560       T = AAS;
561     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
562                       << "\n");
563     return T->isValidState();
564   };
565 
566   bool AllCallSitesKnown;
567   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
568                               AllCallSitesKnown))
569     S.indicatePessimisticFixpoint();
570   else if (T.hasValue())
571     S ^= *T;
572 }
573 
574 /// This function is the bridge between argument position and the call base
575 /// context.
576 template <typename AAType, typename BaseType,
577           typename StateType = typename AAType::StateType>
578 bool getArgumentStateFromCallBaseContext(Attributor &A,
579                                          BaseType &QueryingAttribute,
580                                          IRPosition &Pos, StateType &State) {
581   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
582          "Expected an 'argument' position !");
583   const CallBase *CBContext = Pos.getCallBaseContext();
584   if (!CBContext)
585     return false;
586 
587   int ArgNo = Pos.getCallSiteArgNo();
588   assert(ArgNo >= 0 && "Invalid Arg No!");
589 
590   const auto &AA = A.getAAFor<AAType>(
591       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
592       DepClassTy::REQUIRED);
593   const StateType &CBArgumentState =
594       static_cast<const StateType &>(AA.getState());
595 
596   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
597                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
598                     << "\n");
599 
600   // NOTE: If we want to do call site grouping it should happen here.
601   State ^= CBArgumentState;
602   return true;
603 }
604 
605 /// Helper class for generic deduction: call site argument -> argument position.
606 template <typename AAType, typename BaseType,
607           typename StateType = typename AAType::StateType,
608           bool BridgeCallBaseContext = false>
609 struct AAArgumentFromCallSiteArguments : public BaseType {
610   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
611       : BaseType(IRP, A) {}
612 
613   /// See AbstractAttribute::updateImpl(...).
614   ChangeStatus updateImpl(Attributor &A) override {
615     StateType S = StateType::getBestState(this->getState());
616 
617     if (BridgeCallBaseContext) {
618       bool Success =
619           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
620               A, *this, this->getIRPosition(), S);
621       if (Success)
622         return clampStateAndIndicateChange<StateType>(this->getState(), S);
623     }
624     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
625 
626     // TODO: If we know we visited all incoming values, thus no are assumed
627     // dead, we can take the known information from the state T.
628     return clampStateAndIndicateChange<StateType>(this->getState(), S);
629   }
630 };
631 
632 /// Helper class for generic replication: function returned -> cs returned.
633 template <typename AAType, typename BaseType,
634           typename StateType = typename BaseType::StateType,
635           bool IntroduceCallBaseContext = false>
636 struct AACallSiteReturnedFromReturned : public BaseType {
637   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
638       : BaseType(IRP, A) {}
639 
640   /// See AbstractAttribute::updateImpl(...).
641   ChangeStatus updateImpl(Attributor &A) override {
642     assert(this->getIRPosition().getPositionKind() ==
643                IRPosition::IRP_CALL_SITE_RETURNED &&
644            "Can only wrap function returned positions for call site returned "
645            "positions!");
646     auto &S = this->getState();
647 
648     const Function *AssociatedFunction =
649         this->getIRPosition().getAssociatedFunction();
650     if (!AssociatedFunction)
651       return S.indicatePessimisticFixpoint();
652 
653     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
654     if (IntroduceCallBaseContext)
655       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
656                         << CBContext << "\n");
657 
658     IRPosition FnPos = IRPosition::returned(
659         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
660     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
661     return clampStateAndIndicateChange(S, AA.getState());
662   }
663 };
664 
665 /// Helper function to accumulate uses.
666 template <class AAType, typename StateType = typename AAType::StateType>
667 static void followUsesInContext(AAType &AA, Attributor &A,
668                                 MustBeExecutedContextExplorer &Explorer,
669                                 const Instruction *CtxI,
670                                 SetVector<const Use *> &Uses,
671                                 StateType &State) {
672   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
673   for (unsigned u = 0; u < Uses.size(); ++u) {
674     const Use *U = Uses[u];
675     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
676       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
677       if (Found && AA.followUseInMBEC(A, U, UserI, State))
678         for (const Use &Us : UserI->uses())
679           Uses.insert(&Us);
680     }
681   }
682 }
683 
684 /// Use the must-be-executed-context around \p I to add information into \p S.
685 /// The AAType class is required to have `followUseInMBEC` method with the
686 /// following signature and behaviour:
687 ///
688 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
689 /// U - Underlying use.
690 /// I - The user of the \p U.
691 /// Returns true if the value should be tracked transitively.
692 ///
693 template <class AAType, typename StateType = typename AAType::StateType>
694 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
695                              Instruction &CtxI) {
696 
697   // Container for (transitive) uses of the associated value.
698   SetVector<const Use *> Uses;
699   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
700     Uses.insert(&U);
701 
702   MustBeExecutedContextExplorer &Explorer =
703       A.getInfoCache().getMustBeExecutedContextExplorer();
704 
705   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
706 
707   if (S.isAtFixpoint())
708     return;
709 
710   SmallVector<const BranchInst *, 4> BrInsts;
711   auto Pred = [&](const Instruction *I) {
712     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
713       if (Br->isConditional())
714         BrInsts.push_back(Br);
715     return true;
716   };
717 
718   // Here, accumulate conditional branch instructions in the context. We
719   // explore the child paths and collect the known states. The disjunction of
720   // those states can be merged to its own state. Let ParentState_i be a state
721   // to indicate the known information for an i-th branch instruction in the
722   // context. ChildStates are created for its successors respectively.
723   //
724   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
725   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
726   //      ...
727   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
728   //
729   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
730   //
731   // FIXME: Currently, recursive branches are not handled. For example, we
732   // can't deduce that ptr must be dereferenced in below function.
733   //
734   // void f(int a, int c, int *ptr) {
735   //    if(a)
736   //      if (b) {
737   //        *ptr = 0;
738   //      } else {
739   //        *ptr = 1;
740   //      }
741   //    else {
742   //      if (b) {
743   //        *ptr = 0;
744   //      } else {
745   //        *ptr = 1;
746   //      }
747   //    }
748   // }
749 
750   Explorer.checkForAllContext(&CtxI, Pred);
751   for (const BranchInst *Br : BrInsts) {
752     StateType ParentState;
753 
754     // The known state of the parent state is a conjunction of children's
755     // known states so it is initialized with a best state.
756     ParentState.indicateOptimisticFixpoint();
757 
758     for (const BasicBlock *BB : Br->successors()) {
759       StateType ChildState;
760 
761       size_t BeforeSize = Uses.size();
762       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
763 
764       // Erase uses which only appear in the child.
765       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
766         It = Uses.erase(It);
767 
768       ParentState &= ChildState;
769     }
770 
771     // Use only known state.
772     S += ParentState;
773   }
774 }
775 
776 /// ------------------------ PointerInfo ---------------------------------------
777 
778 namespace llvm {
779 namespace AA {
780 namespace PointerInfo {
781 
782 /// An access kind description as used by AAPointerInfo.
783 struct OffsetAndSize;
784 
785 struct State;
786 
787 } // namespace PointerInfo
788 } // namespace AA
789 
790 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
791 template <>
792 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
793   using Access = AAPointerInfo::Access;
794   static inline Access getEmptyKey();
795   static inline Access getTombstoneKey();
796   static unsigned getHashValue(const Access &A);
797   static bool isEqual(const Access &LHS, const Access &RHS);
798 };
799 
800 /// Helper that allows OffsetAndSize as a key in a DenseMap.
801 template <>
802 struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize>
803     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
804 
805 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
806 /// but the instruction
807 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
808   using Base = DenseMapInfo<Instruction *>;
809   using Access = AAPointerInfo::Access;
810   static inline Access getEmptyKey();
811   static inline Access getTombstoneKey();
812   static unsigned getHashValue(const Access &A);
813   static bool isEqual(const Access &LHS, const Access &RHS);
814 };
815 
816 } // namespace llvm
817 
818 /// Helper to represent an access offset and size, with logic to deal with
819 /// uncertainty and check for overlapping accesses.
820 struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> {
821   using BaseTy = std::pair<int64_t, int64_t>;
822   OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {}
823   OffsetAndSize(const BaseTy &P) : BaseTy(P) {}
824   int64_t getOffset() const { return first; }
825   int64_t getSize() const { return second; }
826   static OffsetAndSize getUnknown() { return OffsetAndSize(Unknown, Unknown); }
827 
828   /// Return true if this offset and size pair might describe an address that
829   /// overlaps with \p OAS.
830   bool mayOverlap(const OffsetAndSize &OAS) const {
831     // Any unknown value and we are giving up -> overlap.
832     if (OAS.getOffset() == OffsetAndSize::Unknown ||
833         OAS.getSize() == OffsetAndSize::Unknown ||
834         getOffset() == OffsetAndSize::Unknown ||
835         getSize() == OffsetAndSize::Unknown)
836       return true;
837 
838     // Check if one offset point is in the other interval [offset, offset+size].
839     return OAS.getOffset() + OAS.getSize() > getOffset() &&
840            OAS.getOffset() < getOffset() + getSize();
841   }
842 
843   /// Constant used to represent unknown offset or sizes.
844   static constexpr int64_t Unknown = 1 << 31;
845 };
846 
847 /// Implementation of the DenseMapInfo.
848 ///
849 ///{
850 inline llvm::AccessAsInstructionInfo::Access
851 llvm::AccessAsInstructionInfo::getEmptyKey() {
852   return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr);
853 }
854 inline llvm::AccessAsInstructionInfo::Access
855 llvm::AccessAsInstructionInfo::getTombstoneKey() {
856   return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ,
857                 nullptr);
858 }
859 unsigned llvm::AccessAsInstructionInfo::getHashValue(
860     const llvm::AccessAsInstructionInfo::Access &A) {
861   return Base::getHashValue(A.getRemoteInst());
862 }
863 bool llvm::AccessAsInstructionInfo::isEqual(
864     const llvm::AccessAsInstructionInfo::Access &LHS,
865     const llvm::AccessAsInstructionInfo::Access &RHS) {
866   return LHS.getRemoteInst() == RHS.getRemoteInst();
867 }
868 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
869 llvm::DenseMapInfo<AAPointerInfo::Access>::getEmptyKey() {
870   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ,
871                                nullptr);
872 }
873 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
874 llvm::DenseMapInfo<AAPointerInfo::Access>::getTombstoneKey() {
875   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE,
876                                nullptr);
877 }
878 
879 unsigned llvm::DenseMapInfo<AAPointerInfo::Access>::getHashValue(
880     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &A) {
881   return detail::combineHashValue(
882              DenseMapInfo<Instruction *>::getHashValue(A.getRemoteInst()),
883              (A.isWrittenValueYetUndetermined()
884                   ? ~0
885                   : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) +
886          A.getKind();
887 }
888 
889 bool llvm::DenseMapInfo<AAPointerInfo::Access>::isEqual(
890     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &LHS,
891     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &RHS) {
892   return LHS == RHS;
893 }
894 ///}
895 
896 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
897 struct AA::PointerInfo::State : public AbstractState {
898 
899   /// Return the best possible representable state.
900   static State getBestState(const State &SIS) { return State(); }
901 
902   /// Return the worst possible representable state.
903   static State getWorstState(const State &SIS) {
904     State R;
905     R.indicatePessimisticFixpoint();
906     return R;
907   }
908 
909   State() {}
910   State(const State &SIS) : AccessBins(SIS.AccessBins) {}
911   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {}
912 
913   const State &getAssumed() const { return *this; }
914 
915   /// See AbstractState::isValidState().
916   bool isValidState() const override { return BS.isValidState(); }
917 
918   /// See AbstractState::isAtFixpoint().
919   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
920 
921   /// See AbstractState::indicateOptimisticFixpoint().
922   ChangeStatus indicateOptimisticFixpoint() override {
923     BS.indicateOptimisticFixpoint();
924     return ChangeStatus::UNCHANGED;
925   }
926 
927   /// See AbstractState::indicatePessimisticFixpoint().
928   ChangeStatus indicatePessimisticFixpoint() override {
929     BS.indicatePessimisticFixpoint();
930     return ChangeStatus::CHANGED;
931   }
932 
933   State &operator=(const State &R) {
934     if (this == &R)
935       return *this;
936     BS = R.BS;
937     AccessBins = R.AccessBins;
938     return *this;
939   }
940 
941   State &operator=(State &&R) {
942     if (this == &R)
943       return *this;
944     std::swap(BS, R.BS);
945     std::swap(AccessBins, R.AccessBins);
946     return *this;
947   }
948 
949   bool operator==(const State &R) const {
950     if (BS != R.BS)
951       return false;
952     if (AccessBins.size() != R.AccessBins.size())
953       return false;
954     auto It = begin(), RIt = R.begin(), E = end();
955     while (It != E) {
956       if (It->getFirst() != RIt->getFirst())
957         return false;
958       auto &Accs = It->getSecond();
959       auto &RAccs = RIt->getSecond();
960       if (Accs.size() != RAccs.size())
961         return false;
962       auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end();
963       while (AccIt != AccE) {
964         if (*AccIt != *RAccIt)
965           return false;
966         ++AccIt;
967         ++RAccIt;
968       }
969       ++It;
970       ++RIt;
971     }
972     return true;
973   }
974   bool operator!=(const State &R) const { return !(*this == R); }
975 
976   /// We store accesses in a set with the instruction as key.
977   using Accesses = DenseSet<AAPointerInfo::Access, AccessAsInstructionInfo>;
978 
979   /// We store all accesses in bins denoted by their offset and size.
980   using AccessBinsTy = DenseMap<OffsetAndSize, Accesses>;
981 
982   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
983   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
984 
985 protected:
986   /// The bins with all the accesses for the associated pointer.
987   DenseMap<OffsetAndSize, Accesses> AccessBins;
988 
989   /// Add a new access to the state at offset \p Offset and with size \p Size.
990   /// The access is associated with \p I, writes \p Content (if anything), and
991   /// is of kind \p Kind.
992   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
993   ChangeStatus addAccess(int64_t Offset, int64_t Size, Instruction &I,
994                          Optional<Value *> Content,
995                          AAPointerInfo::AccessKind Kind, Type *Ty,
996                          Instruction *RemoteI = nullptr,
997                          Accesses *BinPtr = nullptr) {
998     OffsetAndSize Key{Offset, Size};
999     Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key];
1000     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
1001     // Check if we have an access for this instruction in this bin, if not,
1002     // simply add it.
1003     auto It = Bin.find(Acc);
1004     if (It == Bin.end()) {
1005       Bin.insert(Acc);
1006       return ChangeStatus::CHANGED;
1007     }
1008     // If the existing access is the same as then new one, nothing changed.
1009     AAPointerInfo::Access Before = *It;
1010     // The new one will be combined with the existing one.
1011     *It &= Acc;
1012     return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1013   }
1014 
1015   /// See AAPointerInfo::forallInterferingAccesses.
1016   bool forallInterferingAccesses(
1017       Instruction &I,
1018       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1019     if (!isValidState())
1020       return false;
1021     // First find the offset and size of I.
1022     OffsetAndSize OAS(-1, -1);
1023     for (auto &It : AccessBins) {
1024       for (auto &Access : It.getSecond()) {
1025         if (Access.getRemoteInst() == &I) {
1026           OAS = It.getFirst();
1027           break;
1028         }
1029       }
1030       if (OAS.getSize() != -1)
1031         break;
1032     }
1033     if (OAS.getSize() == -1)
1034       return true;
1035 
1036     // Now that we have an offset and size, find all overlapping ones and use
1037     // the callback on the accesses.
1038     for (auto &It : AccessBins) {
1039       OffsetAndSize ItOAS = It.getFirst();
1040       if (!OAS.mayOverlap(ItOAS))
1041         continue;
1042       for (auto &Access : It.getSecond())
1043         if (!CB(Access, OAS == ItOAS))
1044           return false;
1045     }
1046     return true;
1047   }
1048 
1049 private:
1050   /// State to track fixpoint and validity.
1051   BooleanState BS;
1052 };
1053 
1054 struct AAPointerInfoImpl
1055     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1056   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1057   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1058 
1059   /// See AbstractAttribute::initialize(...).
1060   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1061 
1062   /// See AbstractAttribute::getAsStr().
1063   const std::string getAsStr() const override {
1064     return std::string("PointerInfo ") +
1065            (isValidState() ? (std::string("#") +
1066                               std::to_string(AccessBins.size()) + " bins")
1067                            : "<invalid>");
1068   }
1069 
1070   /// See AbstractAttribute::manifest(...).
1071   ChangeStatus manifest(Attributor &A) override {
1072     return AAPointerInfo::manifest(A);
1073   }
1074 
1075   bool forallInterferingAccesses(
1076       LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1077       const override {
1078     return State::forallInterferingAccesses(LI, CB);
1079   }
1080   bool forallInterferingAccesses(
1081       StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1082       const override {
1083     return State::forallInterferingAccesses(SI, CB);
1084   }
1085 
1086   ChangeStatus translateAndAddCalleeState(Attributor &A,
1087                                           const AAPointerInfo &CalleeAA,
1088                                           int64_t CallArgOffset, CallBase &CB) {
1089     using namespace AA::PointerInfo;
1090     if (!CalleeAA.getState().isValidState() || !isValidState())
1091       return indicatePessimisticFixpoint();
1092 
1093     const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1094     bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1095 
1096     // Combine the accesses bin by bin.
1097     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1098     for (auto &It : CalleeImplAA.getState()) {
1099       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1100       if (CallArgOffset != OffsetAndSize::Unknown)
1101         OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1102                             It.first.getSize());
1103       Accesses &Bin = AccessBins[OAS];
1104       for (const AAPointerInfo::Access &RAcc : It.second) {
1105         if (IsByval && !RAcc.isRead())
1106           continue;
1107         bool UsedAssumedInformation = false;
1108         Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1109             RAcc.getContent(), CB, *this, UsedAssumedInformation);
1110         AccessKind AK =
1111             AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1112                                                  : AccessKind::AK_READ_WRITE));
1113         Changed =
1114             Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK,
1115                                 RAcc.getType(), RAcc.getRemoteInst(), &Bin);
1116       }
1117     }
1118     return Changed;
1119   }
1120 
1121   /// Statistic tracking for all AAPointerInfo implementations.
1122   /// See AbstractAttribute::trackStatistics().
1123   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1124 };
1125 
1126 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1127   using AccessKind = AAPointerInfo::AccessKind;
1128   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1129       : AAPointerInfoImpl(IRP, A) {}
1130 
1131   /// See AbstractAttribute::initialize(...).
1132   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1133 
1134   /// Deal with an access and signal if it was handled successfully.
1135   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1136                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1137                     ChangeStatus &Changed, Type *Ty,
1138                     int64_t Size = AA::PointerInfo::OffsetAndSize::Unknown) {
1139     using namespace AA::PointerInfo;
1140     // No need to find a size if one is given or the offset is unknown.
1141     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1142         Ty) {
1143       const DataLayout &DL = A.getDataLayout();
1144       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1145       if (!AccessSize.isScalable())
1146         Size = AccessSize.getFixedSize();
1147     }
1148     Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty);
1149     return true;
1150   };
1151 
1152   /// Helper struct, will support ranges eventually.
1153   struct OffsetInfo {
1154     int64_t Offset = AA::PointerInfo::OffsetAndSize::Unknown;
1155 
1156     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1157   };
1158 
1159   /// See AbstractAttribute::updateImpl(...).
1160   ChangeStatus updateImpl(Attributor &A) override {
1161     using namespace AA::PointerInfo;
1162     State S = getState();
1163     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1164     Value &AssociatedValue = getAssociatedValue();
1165 
1166     const DataLayout &DL = A.getDataLayout();
1167     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1168     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1169 
1170     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI,
1171                                      bool &Follow) {
1172       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1173       UsrOI = PtrOI;
1174       Follow = true;
1175       return true;
1176     };
1177 
1178     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1179       Value *CurPtr = U.get();
1180       User *Usr = U.getUser();
1181       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1182                         << *Usr << "\n");
1183 
1184       OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1185 
1186       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1187         if (CE->isCast())
1188           return HandlePassthroughUser(Usr, PtrOI, Follow);
1189         if (CE->isCompare())
1190           return true;
1191         if (!CE->isGEPWithNoNotionalOverIndexing()) {
1192           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1193                             << "\n");
1194           return false;
1195         }
1196       }
1197       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1198         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1199         UsrOI = PtrOI;
1200 
1201         // TODO: Use range information.
1202         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1203             !GEP->hasAllConstantIndices()) {
1204           UsrOI.Offset = OffsetAndSize::Unknown;
1205           Follow = true;
1206           return true;
1207         }
1208 
1209         SmallVector<Value *, 8> Indices;
1210         for (Use &Idx : llvm::make_range(GEP->idx_begin(), GEP->idx_end())) {
1211           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1212             Indices.push_back(CIdx);
1213             continue;
1214           }
1215 
1216           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1217                             << " : " << *Idx << "\n");
1218           return false;
1219         }
1220         UsrOI.Offset = PtrOI.Offset +
1221                        DL.getIndexedOffsetInType(
1222                            CurPtr->getType()->getPointerElementType(), Indices);
1223         Follow = true;
1224         return true;
1225       }
1226       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1227         return HandlePassthroughUser(Usr, PtrOI, Follow);
1228 
1229       // For PHIs we need to take care of the recurrence explicitly as the value
1230       // might change while we iterate through a loop. For now, we give up if
1231       // the PHI is not invariant.
1232       if (isa<PHINode>(Usr)) {
1233         // Check if the PHI is invariant (so far).
1234         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1235         if (UsrOI == PtrOI)
1236           return true;
1237 
1238         // Check if the PHI operand has already an unknown offset as we can't
1239         // improve on that anymore.
1240         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1241           UsrOI = PtrOI;
1242           Follow = true;
1243           return true;
1244         }
1245 
1246         // Check if the PHI operand is not dependent on the PHI itself.
1247         APInt Offset(DL.getIndexTypeSizeInBits(AssociatedValue.getType()), 0);
1248         if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1249                                     DL, Offset, /* AllowNonInbounds */ true)) {
1250           if (Offset != PtrOI.Offset) {
1251             LLVM_DEBUG(dbgs()
1252                        << "[AAPointerInfo] PHI operand pointer offset mismatch "
1253                        << *CurPtr << " in " << *Usr << "\n");
1254             return false;
1255           }
1256           return HandlePassthroughUser(Usr, PtrOI, Follow);
1257         }
1258 
1259         // TODO: Approximate in case we know the direction of the recurrence.
1260         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1261                           << *CurPtr << " in " << *Usr << "\n");
1262         UsrOI = PtrOI;
1263         UsrOI.Offset = OffsetAndSize::Unknown;
1264         Follow = true;
1265         return true;
1266       }
1267 
1268       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1269         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1270                             AccessKind::AK_READ, PtrOI.Offset, Changed,
1271                             LoadI->getType());
1272       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1273         if (StoreI->getValueOperand() == CurPtr) {
1274           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1275                             << *StoreI << "\n");
1276           return false;
1277         }
1278         bool UsedAssumedInformation = false;
1279         Optional<Value *> Content = A.getAssumedSimplified(
1280             *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1281         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1282                             PtrOI.Offset, Changed,
1283                             StoreI->getValueOperand()->getType());
1284       }
1285       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1286         if (CB->isLifetimeStartOrEnd())
1287           return true;
1288         if (CB->isArgOperand(&U)) {
1289           unsigned ArgNo = CB->getArgOperandNo(&U);
1290           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1291               *this, IRPosition::callsite_argument(*CB, ArgNo),
1292               DepClassTy::REQUIRED);
1293           Changed = translateAndAddCalleeState(A, CSArgPI, PtrOI.Offset, *CB) |
1294                     Changed;
1295           return true;
1296         }
1297         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1298                           << "\n");
1299         // TODO: Allow some call uses
1300         return false;
1301       }
1302 
1303       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1304       return false;
1305     };
1306     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1307                            /* CheckBBLivenessOnly */ true))
1308       return indicatePessimisticFixpoint();
1309 
1310     LLVM_DEBUG({
1311       dbgs() << "Accesses by bin after update:\n";
1312       for (auto &It : AccessBins) {
1313         dbgs() << "[" << It.first.getOffset() << "-"
1314                << It.first.getOffset() + It.first.getSize()
1315                << "] : " << It.getSecond().size() << "\n";
1316         for (auto &Acc : It.getSecond()) {
1317           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1318                  << "\n";
1319           if (Acc.getLocalInst() != Acc.getRemoteInst())
1320             dbgs() << "     -->                         "
1321                    << *Acc.getRemoteInst() << "\n";
1322           if (!Acc.isWrittenValueYetUndetermined())
1323             dbgs() << "     - " << Acc.getWrittenValue() << "\n";
1324         }
1325       }
1326     });
1327 
1328     return Changed;
1329   }
1330 
1331   /// See AbstractAttribute::trackStatistics()
1332   void trackStatistics() const override {
1333     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1334   }
1335 };
1336 
1337 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1338   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1339       : AAPointerInfoImpl(IRP, A) {}
1340 
1341   /// See AbstractAttribute::updateImpl(...).
1342   ChangeStatus updateImpl(Attributor &A) override {
1343     return indicatePessimisticFixpoint();
1344   }
1345 
1346   /// See AbstractAttribute::trackStatistics()
1347   void trackStatistics() const override {
1348     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1349   }
1350 };
1351 
1352 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1353   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1354       : AAPointerInfoFloating(IRP, A) {}
1355 
1356   /// See AbstractAttribute::initialize(...).
1357   void initialize(Attributor &A) override {
1358     AAPointerInfoFloating::initialize(A);
1359     if (getAnchorScope()->isDeclaration())
1360       indicatePessimisticFixpoint();
1361   }
1362 
1363   /// See AbstractAttribute::trackStatistics()
1364   void trackStatistics() const override {
1365     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1366   }
1367 };
1368 
1369 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1370   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1371       : AAPointerInfoFloating(IRP, A) {}
1372 
1373   /// See AbstractAttribute::updateImpl(...).
1374   ChangeStatus updateImpl(Attributor &A) override {
1375     using namespace AA::PointerInfo;
1376     // We handle memory intrinsics explicitly, at least the first (=
1377     // destination) and second (=source) arguments as we know how they are
1378     // accessed.
1379     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1380       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1381       int64_t LengthVal = OffsetAndSize::Unknown;
1382       if (Length)
1383         LengthVal = Length->getSExtValue();
1384       Value &Ptr = getAssociatedValue();
1385       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1386       ChangeStatus Changed;
1387       if (ArgNo == 0) {
1388         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1389                      nullptr, LengthVal);
1390       } else if (ArgNo == 1) {
1391         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1392                      nullptr, LengthVal);
1393       } else {
1394         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1395                           << *MI << "\n");
1396         return indicatePessimisticFixpoint();
1397       }
1398       return Changed;
1399     }
1400 
1401     // TODO: Once we have call site specific value information we can provide
1402     //       call site specific liveness information and then it makes
1403     //       sense to specialize attributes for call sites arguments instead of
1404     //       redirecting requests to the callee argument.
1405     Argument *Arg = getAssociatedArgument();
1406     if (!Arg)
1407       return indicatePessimisticFixpoint();
1408     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1409     auto &ArgAA =
1410         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1411     return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1412   }
1413 
1414   /// See AbstractAttribute::trackStatistics()
1415   void trackStatistics() const override {
1416     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1417   }
1418 };
1419 
1420 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1421   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1422       : AAPointerInfoFloating(IRP, A) {}
1423 
1424   /// See AbstractAttribute::trackStatistics()
1425   void trackStatistics() const override {
1426     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1427   }
1428 };
1429 
1430 /// -----------------------NoUnwind Function Attribute--------------------------
1431 
1432 struct AANoUnwindImpl : AANoUnwind {
1433   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1434 
1435   const std::string getAsStr() const override {
1436     return getAssumed() ? "nounwind" : "may-unwind";
1437   }
1438 
1439   /// See AbstractAttribute::updateImpl(...).
1440   ChangeStatus updateImpl(Attributor &A) override {
1441     auto Opcodes = {
1442         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1443         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1444         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1445 
1446     auto CheckForNoUnwind = [&](Instruction &I) {
1447       if (!I.mayThrow())
1448         return true;
1449 
1450       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1451         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1452             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1453         return NoUnwindAA.isAssumedNoUnwind();
1454       }
1455       return false;
1456     };
1457 
1458     bool UsedAssumedInformation = false;
1459     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1460                                    UsedAssumedInformation))
1461       return indicatePessimisticFixpoint();
1462 
1463     return ChangeStatus::UNCHANGED;
1464   }
1465 };
1466 
1467 struct AANoUnwindFunction final : public AANoUnwindImpl {
1468   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1469       : AANoUnwindImpl(IRP, A) {}
1470 
1471   /// See AbstractAttribute::trackStatistics()
1472   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1473 };
1474 
1475 /// NoUnwind attribute deduction for a call sites.
1476 struct AANoUnwindCallSite final : AANoUnwindImpl {
1477   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1478       : AANoUnwindImpl(IRP, A) {}
1479 
1480   /// See AbstractAttribute::initialize(...).
1481   void initialize(Attributor &A) override {
1482     AANoUnwindImpl::initialize(A);
1483     Function *F = getAssociatedFunction();
1484     if (!F || F->isDeclaration())
1485       indicatePessimisticFixpoint();
1486   }
1487 
1488   /// See AbstractAttribute::updateImpl(...).
1489   ChangeStatus updateImpl(Attributor &A) override {
1490     // TODO: Once we have call site specific value information we can provide
1491     //       call site specific liveness information and then it makes
1492     //       sense to specialize attributes for call sites arguments instead of
1493     //       redirecting requests to the callee argument.
1494     Function *F = getAssociatedFunction();
1495     const IRPosition &FnPos = IRPosition::function(*F);
1496     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1497     return clampStateAndIndicateChange(getState(), FnAA.getState());
1498   }
1499 
1500   /// See AbstractAttribute::trackStatistics()
1501   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1502 };
1503 
1504 /// --------------------- Function Return Values -------------------------------
1505 
1506 /// "Attribute" that collects all potential returned values and the return
1507 /// instructions that they arise from.
1508 ///
1509 /// If there is a unique returned value R, the manifest method will:
1510 ///   - mark R with the "returned" attribute, if R is an argument.
1511 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1512 
1513   /// Mapping of values potentially returned by the associated function to the
1514   /// return instructions that might return them.
1515   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1516 
1517   /// State flags
1518   ///
1519   ///{
1520   bool IsFixed = false;
1521   bool IsValidState = true;
1522   ///}
1523 
1524 public:
1525   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1526       : AAReturnedValues(IRP, A) {}
1527 
1528   /// See AbstractAttribute::initialize(...).
1529   void initialize(Attributor &A) override {
1530     // Reset the state.
1531     IsFixed = false;
1532     IsValidState = true;
1533     ReturnedValues.clear();
1534 
1535     Function *F = getAssociatedFunction();
1536     if (!F || F->isDeclaration()) {
1537       indicatePessimisticFixpoint();
1538       return;
1539     }
1540     assert(!F->getReturnType()->isVoidTy() &&
1541            "Did not expect a void return type!");
1542 
1543     // The map from instruction opcodes to those instructions in the function.
1544     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1545 
1546     // Look through all arguments, if one is marked as returned we are done.
1547     for (Argument &Arg : F->args()) {
1548       if (Arg.hasReturnedAttr()) {
1549         auto &ReturnInstSet = ReturnedValues[&Arg];
1550         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1551           for (Instruction *RI : *Insts)
1552             ReturnInstSet.insert(cast<ReturnInst>(RI));
1553 
1554         indicateOptimisticFixpoint();
1555         return;
1556       }
1557     }
1558 
1559     if (!A.isFunctionIPOAmendable(*F))
1560       indicatePessimisticFixpoint();
1561   }
1562 
1563   /// See AbstractAttribute::manifest(...).
1564   ChangeStatus manifest(Attributor &A) override;
1565 
1566   /// See AbstractAttribute::getState(...).
1567   AbstractState &getState() override { return *this; }
1568 
1569   /// See AbstractAttribute::getState(...).
1570   const AbstractState &getState() const override { return *this; }
1571 
1572   /// See AbstractAttribute::updateImpl(Attributor &A).
1573   ChangeStatus updateImpl(Attributor &A) override;
1574 
1575   llvm::iterator_range<iterator> returned_values() override {
1576     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1577   }
1578 
1579   llvm::iterator_range<const_iterator> returned_values() const override {
1580     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1581   }
1582 
1583   /// Return the number of potential return values, -1 if unknown.
1584   size_t getNumReturnValues() const override {
1585     return isValidState() ? ReturnedValues.size() : -1;
1586   }
1587 
1588   /// Return an assumed unique return value if a single candidate is found. If
1589   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1590   /// Optional::NoneType.
1591   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1592 
1593   /// See AbstractState::checkForAllReturnedValues(...).
1594   bool checkForAllReturnedValuesAndReturnInsts(
1595       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1596       const override;
1597 
1598   /// Pretty print the attribute similar to the IR representation.
1599   const std::string getAsStr() const override;
1600 
1601   /// See AbstractState::isAtFixpoint().
1602   bool isAtFixpoint() const override { return IsFixed; }
1603 
1604   /// See AbstractState::isValidState().
1605   bool isValidState() const override { return IsValidState; }
1606 
1607   /// See AbstractState::indicateOptimisticFixpoint(...).
1608   ChangeStatus indicateOptimisticFixpoint() override {
1609     IsFixed = true;
1610     return ChangeStatus::UNCHANGED;
1611   }
1612 
1613   ChangeStatus indicatePessimisticFixpoint() override {
1614     IsFixed = true;
1615     IsValidState = false;
1616     return ChangeStatus::CHANGED;
1617   }
1618 };
1619 
1620 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1621   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1622 
1623   // Bookkeeping.
1624   assert(isValidState());
1625   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1626                   "Number of function with known return values");
1627 
1628   // Check if we have an assumed unique return value that we could manifest.
1629   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1630 
1631   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1632     return Changed;
1633 
1634   // Bookkeeping.
1635   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1636                   "Number of function with unique return");
1637   // If the assumed unique return value is an argument, annotate it.
1638   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1639     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1640             getAssociatedFunction()->getReturnType())) {
1641       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1642       Changed = IRAttribute::manifest(A);
1643     }
1644   }
1645   return Changed;
1646 }
1647 
1648 const std::string AAReturnedValuesImpl::getAsStr() const {
1649   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1650          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1651 }
1652 
1653 Optional<Value *>
1654 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1655   // If checkForAllReturnedValues provides a unique value, ignoring potential
1656   // undef values that can also be present, it is assumed to be the actual
1657   // return value and forwarded to the caller of this method. If there are
1658   // multiple, a nullptr is returned indicating there cannot be a unique
1659   // returned value.
1660   Optional<Value *> UniqueRV;
1661   Type *Ty = getAssociatedFunction()->getReturnType();
1662 
1663   auto Pred = [&](Value &RV) -> bool {
1664     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1665     return UniqueRV != Optional<Value *>(nullptr);
1666   };
1667 
1668   if (!A.checkForAllReturnedValues(Pred, *this))
1669     UniqueRV = nullptr;
1670 
1671   return UniqueRV;
1672 }
1673 
1674 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1675     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1676     const {
1677   if (!isValidState())
1678     return false;
1679 
1680   // Check all returned values but ignore call sites as long as we have not
1681   // encountered an overdefined one during an update.
1682   for (auto &It : ReturnedValues) {
1683     Value *RV = It.first;
1684     if (!Pred(*RV, It.second))
1685       return false;
1686   }
1687 
1688   return true;
1689 }
1690 
1691 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1692   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1693 
1694   auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1695                            bool) -> bool {
1696     bool UsedAssumedInformation = false;
1697     Optional<Value *> SimpleRetVal =
1698         A.getAssumedSimplified(V, *this, UsedAssumedInformation);
1699     if (!SimpleRetVal.hasValue())
1700       return true;
1701     if (!SimpleRetVal.getValue())
1702       return false;
1703     Value *RetVal = *SimpleRetVal;
1704     assert(AA::isValidInScope(*RetVal, Ret.getFunction()) &&
1705            "Assumed returned value should be valid in function scope!");
1706     if (ReturnedValues[RetVal].insert(&Ret))
1707       Changed = ChangeStatus::CHANGED;
1708     return true;
1709   };
1710 
1711   auto ReturnInstCB = [&](Instruction &I) {
1712     ReturnInst &Ret = cast<ReturnInst>(I);
1713     return genericValueTraversal<ReturnInst>(
1714         A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1715         &I);
1716   };
1717 
1718   // Discover returned values from all live returned instructions in the
1719   // associated function.
1720   bool UsedAssumedInformation = false;
1721   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1722                                  UsedAssumedInformation))
1723     return indicatePessimisticFixpoint();
1724   return Changed;
1725 }
1726 
1727 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1728   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1729       : AAReturnedValuesImpl(IRP, A) {}
1730 
1731   /// See AbstractAttribute::trackStatistics()
1732   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1733 };
1734 
1735 /// Returned values information for a call sites.
1736 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1737   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1738       : AAReturnedValuesImpl(IRP, A) {}
1739 
1740   /// See AbstractAttribute::initialize(...).
1741   void initialize(Attributor &A) override {
1742     // TODO: Once we have call site specific value information we can provide
1743     //       call site specific liveness information and then it makes
1744     //       sense to specialize attributes for call sites instead of
1745     //       redirecting requests to the callee.
1746     llvm_unreachable("Abstract attributes for returned values are not "
1747                      "supported for call sites yet!");
1748   }
1749 
1750   /// See AbstractAttribute::updateImpl(...).
1751   ChangeStatus updateImpl(Attributor &A) override {
1752     return indicatePessimisticFixpoint();
1753   }
1754 
1755   /// See AbstractAttribute::trackStatistics()
1756   void trackStatistics() const override {}
1757 };
1758 
1759 /// ------------------------ NoSync Function Attribute -------------------------
1760 
1761 struct AANoSyncImpl : AANoSync {
1762   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1763 
1764   const std::string getAsStr() const override {
1765     return getAssumed() ? "nosync" : "may-sync";
1766   }
1767 
1768   /// See AbstractAttribute::updateImpl(...).
1769   ChangeStatus updateImpl(Attributor &A) override;
1770 
1771   /// Helper function used to determine whether an instruction is non-relaxed
1772   /// atomic. In other words, if an atomic instruction does not have unordered
1773   /// or monotonic ordering
1774   static bool isNonRelaxedAtomic(Instruction *I);
1775 
1776   /// Helper function specific for intrinsics which are potentially volatile
1777   static bool isNoSyncIntrinsic(Instruction *I);
1778 };
1779 
1780 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1781   if (!I->isAtomic())
1782     return false;
1783 
1784   if (auto *FI = dyn_cast<FenceInst>(I))
1785     // All legal orderings for fence are stronger than monotonic.
1786     return FI->getSyncScopeID() != SyncScope::SingleThread;
1787   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1788     // Unordered is not a legal ordering for cmpxchg.
1789     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1790             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1791   }
1792 
1793   AtomicOrdering Ordering;
1794   switch (I->getOpcode()) {
1795   case Instruction::AtomicRMW:
1796     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1797     break;
1798   case Instruction::Store:
1799     Ordering = cast<StoreInst>(I)->getOrdering();
1800     break;
1801   case Instruction::Load:
1802     Ordering = cast<LoadInst>(I)->getOrdering();
1803     break;
1804   default:
1805     llvm_unreachable(
1806         "New atomic operations need to be known in the attributor.");
1807   }
1808 
1809   return (Ordering != AtomicOrdering::Unordered &&
1810           Ordering != AtomicOrdering::Monotonic);
1811 }
1812 
1813 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1814 /// which would be nosync except that they have a volatile flag.  All other
1815 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1816 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1817   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1818     return !MI->isVolatile();
1819   return false;
1820 }
1821 
1822 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1823 
1824   auto CheckRWInstForNoSync = [&](Instruction &I) {
1825     /// We are looking for volatile instructions or Non-Relaxed atomics.
1826 
1827     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1828       if (CB->hasFnAttr(Attribute::NoSync))
1829         return true;
1830 
1831       if (isNoSyncIntrinsic(&I))
1832         return true;
1833 
1834       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1835           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1836       return NoSyncAA.isAssumedNoSync();
1837     }
1838 
1839     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1840       return true;
1841 
1842     return false;
1843   };
1844 
1845   auto CheckForNoSync = [&](Instruction &I) {
1846     // At this point we handled all read/write effects and they are all
1847     // nosync, so they can be skipped.
1848     if (I.mayReadOrWriteMemory())
1849       return true;
1850 
1851     // non-convergent and readnone imply nosync.
1852     return !cast<CallBase>(I).isConvergent();
1853   };
1854 
1855   bool UsedAssumedInformation = false;
1856   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
1857                                           UsedAssumedInformation) ||
1858       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
1859                                          UsedAssumedInformation))
1860     return indicatePessimisticFixpoint();
1861 
1862   return ChangeStatus::UNCHANGED;
1863 }
1864 
1865 struct AANoSyncFunction final : public AANoSyncImpl {
1866   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1867       : AANoSyncImpl(IRP, A) {}
1868 
1869   /// See AbstractAttribute::trackStatistics()
1870   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1871 };
1872 
1873 /// NoSync attribute deduction for a call sites.
1874 struct AANoSyncCallSite final : AANoSyncImpl {
1875   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1876       : AANoSyncImpl(IRP, A) {}
1877 
1878   /// See AbstractAttribute::initialize(...).
1879   void initialize(Attributor &A) override {
1880     AANoSyncImpl::initialize(A);
1881     Function *F = getAssociatedFunction();
1882     if (!F || F->isDeclaration())
1883       indicatePessimisticFixpoint();
1884   }
1885 
1886   /// See AbstractAttribute::updateImpl(...).
1887   ChangeStatus updateImpl(Attributor &A) override {
1888     // TODO: Once we have call site specific value information we can provide
1889     //       call site specific liveness information and then it makes
1890     //       sense to specialize attributes for call sites arguments instead of
1891     //       redirecting requests to the callee argument.
1892     Function *F = getAssociatedFunction();
1893     const IRPosition &FnPos = IRPosition::function(*F);
1894     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1895     return clampStateAndIndicateChange(getState(), FnAA.getState());
1896   }
1897 
1898   /// See AbstractAttribute::trackStatistics()
1899   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1900 };
1901 
1902 /// ------------------------ No-Free Attributes ----------------------------
1903 
1904 struct AANoFreeImpl : public AANoFree {
1905   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1906 
1907   /// See AbstractAttribute::updateImpl(...).
1908   ChangeStatus updateImpl(Attributor &A) override {
1909     auto CheckForNoFree = [&](Instruction &I) {
1910       const auto &CB = cast<CallBase>(I);
1911       if (CB.hasFnAttr(Attribute::NoFree))
1912         return true;
1913 
1914       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1915           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1916       return NoFreeAA.isAssumedNoFree();
1917     };
1918 
1919     bool UsedAssumedInformation = false;
1920     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
1921                                            UsedAssumedInformation))
1922       return indicatePessimisticFixpoint();
1923     return ChangeStatus::UNCHANGED;
1924   }
1925 
1926   /// See AbstractAttribute::getAsStr().
1927   const std::string getAsStr() const override {
1928     return getAssumed() ? "nofree" : "may-free";
1929   }
1930 };
1931 
1932 struct AANoFreeFunction final : public AANoFreeImpl {
1933   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1934       : AANoFreeImpl(IRP, A) {}
1935 
1936   /// See AbstractAttribute::trackStatistics()
1937   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1938 };
1939 
1940 /// NoFree attribute deduction for a call sites.
1941 struct AANoFreeCallSite final : AANoFreeImpl {
1942   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1943       : AANoFreeImpl(IRP, A) {}
1944 
1945   /// See AbstractAttribute::initialize(...).
1946   void initialize(Attributor &A) override {
1947     AANoFreeImpl::initialize(A);
1948     Function *F = getAssociatedFunction();
1949     if (!F || F->isDeclaration())
1950       indicatePessimisticFixpoint();
1951   }
1952 
1953   /// See AbstractAttribute::updateImpl(...).
1954   ChangeStatus updateImpl(Attributor &A) override {
1955     // TODO: Once we have call site specific value information we can provide
1956     //       call site specific liveness information and then it makes
1957     //       sense to specialize attributes for call sites arguments instead of
1958     //       redirecting requests to the callee argument.
1959     Function *F = getAssociatedFunction();
1960     const IRPosition &FnPos = IRPosition::function(*F);
1961     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1962     return clampStateAndIndicateChange(getState(), FnAA.getState());
1963   }
1964 
1965   /// See AbstractAttribute::trackStatistics()
1966   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1967 };
1968 
1969 /// NoFree attribute for floating values.
1970 struct AANoFreeFloating : AANoFreeImpl {
1971   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1972       : AANoFreeImpl(IRP, A) {}
1973 
1974   /// See AbstractAttribute::trackStatistics()
1975   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1976 
1977   /// See Abstract Attribute::updateImpl(...).
1978   ChangeStatus updateImpl(Attributor &A) override {
1979     const IRPosition &IRP = getIRPosition();
1980 
1981     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1982         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1983     if (NoFreeAA.isAssumedNoFree())
1984       return ChangeStatus::UNCHANGED;
1985 
1986     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1987     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1988       Instruction *UserI = cast<Instruction>(U.getUser());
1989       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1990         if (CB->isBundleOperand(&U))
1991           return false;
1992         if (!CB->isArgOperand(&U))
1993           return true;
1994         unsigned ArgNo = CB->getArgOperandNo(&U);
1995 
1996         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1997             *this, IRPosition::callsite_argument(*CB, ArgNo),
1998             DepClassTy::REQUIRED);
1999         return NoFreeArg.isAssumedNoFree();
2000       }
2001 
2002       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2003           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2004         Follow = true;
2005         return true;
2006       }
2007       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2008           isa<ReturnInst>(UserI))
2009         return true;
2010 
2011       // Unknown user.
2012       return false;
2013     };
2014     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2015       return indicatePessimisticFixpoint();
2016 
2017     return ChangeStatus::UNCHANGED;
2018   }
2019 };
2020 
2021 /// NoFree attribute for a call site argument.
2022 struct AANoFreeArgument final : AANoFreeFloating {
2023   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2024       : AANoFreeFloating(IRP, A) {}
2025 
2026   /// See AbstractAttribute::trackStatistics()
2027   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2028 };
2029 
2030 /// NoFree attribute for call site arguments.
2031 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2032   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2033       : AANoFreeFloating(IRP, A) {}
2034 
2035   /// See AbstractAttribute::updateImpl(...).
2036   ChangeStatus updateImpl(Attributor &A) override {
2037     // TODO: Once we have call site specific value information we can provide
2038     //       call site specific liveness information and then it makes
2039     //       sense to specialize attributes for call sites arguments instead of
2040     //       redirecting requests to the callee argument.
2041     Argument *Arg = getAssociatedArgument();
2042     if (!Arg)
2043       return indicatePessimisticFixpoint();
2044     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2045     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2046     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2047   }
2048 
2049   /// See AbstractAttribute::trackStatistics()
2050   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2051 };
2052 
2053 /// NoFree attribute for function return value.
2054 struct AANoFreeReturned final : AANoFreeFloating {
2055   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2056       : AANoFreeFloating(IRP, A) {
2057     llvm_unreachable("NoFree is not applicable to function returns!");
2058   }
2059 
2060   /// See AbstractAttribute::initialize(...).
2061   void initialize(Attributor &A) override {
2062     llvm_unreachable("NoFree is not applicable to function returns!");
2063   }
2064 
2065   /// See AbstractAttribute::updateImpl(...).
2066   ChangeStatus updateImpl(Attributor &A) override {
2067     llvm_unreachable("NoFree is not applicable to function returns!");
2068   }
2069 
2070   /// See AbstractAttribute::trackStatistics()
2071   void trackStatistics() const override {}
2072 };
2073 
2074 /// NoFree attribute deduction for a call site return value.
2075 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2076   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2077       : AANoFreeFloating(IRP, A) {}
2078 
2079   ChangeStatus manifest(Attributor &A) override {
2080     return ChangeStatus::UNCHANGED;
2081   }
2082   /// See AbstractAttribute::trackStatistics()
2083   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2084 };
2085 
2086 /// ------------------------ NonNull Argument Attribute ------------------------
2087 static int64_t getKnownNonNullAndDerefBytesForUse(
2088     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2089     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2090   TrackUse = false;
2091 
2092   const Value *UseV = U->get();
2093   if (!UseV->getType()->isPointerTy())
2094     return 0;
2095 
2096   // We need to follow common pointer manipulation uses to the accesses they
2097   // feed into. We can try to be smart to avoid looking through things we do not
2098   // like for now, e.g., non-inbounds GEPs.
2099   if (isa<CastInst>(I)) {
2100     TrackUse = true;
2101     return 0;
2102   }
2103 
2104   if (isa<GetElementPtrInst>(I)) {
2105     TrackUse = true;
2106     return 0;
2107   }
2108 
2109   Type *PtrTy = UseV->getType();
2110   const Function *F = I->getFunction();
2111   bool NullPointerIsDefined =
2112       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2113   const DataLayout &DL = A.getInfoCache().getDL();
2114   if (const auto *CB = dyn_cast<CallBase>(I)) {
2115     if (CB->isBundleOperand(U)) {
2116       if (RetainedKnowledge RK = getKnowledgeFromUse(
2117               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2118         IsNonNull |=
2119             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2120         return RK.ArgValue;
2121       }
2122       return 0;
2123     }
2124 
2125     if (CB->isCallee(U)) {
2126       IsNonNull |= !NullPointerIsDefined;
2127       return 0;
2128     }
2129 
2130     unsigned ArgNo = CB->getArgOperandNo(U);
2131     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2132     // As long as we only use known information there is no need to track
2133     // dependences here.
2134     auto &DerefAA =
2135         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2136     IsNonNull |= DerefAA.isKnownNonNull();
2137     return DerefAA.getKnownDereferenceableBytes();
2138   }
2139 
2140   int64_t Offset;
2141   const Value *Base =
2142       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
2143   if (Base) {
2144     if (Base == &AssociatedValue &&
2145         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2146       int64_t DerefBytes =
2147           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
2148 
2149       IsNonNull |= !NullPointerIsDefined;
2150       return std::max(int64_t(0), DerefBytes);
2151     }
2152   }
2153 
2154   /// Corner case when an offset is 0.
2155   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
2156                                               /*AllowNonInbounds*/ true);
2157   if (Base) {
2158     if (Offset == 0 && Base == &AssociatedValue &&
2159         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2160       int64_t DerefBytes =
2161           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
2162       IsNonNull |= !NullPointerIsDefined;
2163       return std::max(int64_t(0), DerefBytes);
2164     }
2165   }
2166 
2167   return 0;
2168 }
2169 
2170 struct AANonNullImpl : AANonNull {
2171   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2172       : AANonNull(IRP, A),
2173         NullIsDefined(NullPointerIsDefined(
2174             getAnchorScope(),
2175             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2176 
2177   /// See AbstractAttribute::initialize(...).
2178   void initialize(Attributor &A) override {
2179     Value &V = getAssociatedValue();
2180     if (!NullIsDefined &&
2181         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2182                 /* IgnoreSubsumingPositions */ false, &A)) {
2183       indicateOptimisticFixpoint();
2184       return;
2185     }
2186 
2187     if (isa<ConstantPointerNull>(V)) {
2188       indicatePessimisticFixpoint();
2189       return;
2190     }
2191 
2192     AANonNull::initialize(A);
2193 
2194     bool CanBeNull, CanBeFreed;
2195     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2196                                          CanBeFreed)) {
2197       if (!CanBeNull) {
2198         indicateOptimisticFixpoint();
2199         return;
2200       }
2201     }
2202 
2203     if (isa<GlobalValue>(&getAssociatedValue())) {
2204       indicatePessimisticFixpoint();
2205       return;
2206     }
2207 
2208     if (Instruction *CtxI = getCtxI())
2209       followUsesInMBEC(*this, A, getState(), *CtxI);
2210   }
2211 
2212   /// See followUsesInMBEC
2213   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2214                        AANonNull::StateType &State) {
2215     bool IsNonNull = false;
2216     bool TrackUse = false;
2217     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2218                                        IsNonNull, TrackUse);
2219     State.setKnown(IsNonNull);
2220     return TrackUse;
2221   }
2222 
2223   /// See AbstractAttribute::getAsStr().
2224   const std::string getAsStr() const override {
2225     return getAssumed() ? "nonnull" : "may-null";
2226   }
2227 
2228   /// Flag to determine if the underlying value can be null and still allow
2229   /// valid accesses.
2230   const bool NullIsDefined;
2231 };
2232 
2233 /// NonNull attribute for a floating value.
2234 struct AANonNullFloating : public AANonNullImpl {
2235   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2236       : AANonNullImpl(IRP, A) {}
2237 
2238   /// See AbstractAttribute::updateImpl(...).
2239   ChangeStatus updateImpl(Attributor &A) override {
2240     const DataLayout &DL = A.getDataLayout();
2241 
2242     DominatorTree *DT = nullptr;
2243     AssumptionCache *AC = nullptr;
2244     InformationCache &InfoCache = A.getInfoCache();
2245     if (const Function *Fn = getAnchorScope()) {
2246       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2247       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2248     }
2249 
2250     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2251                             AANonNull::StateType &T, bool Stripped) -> bool {
2252       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2253                                              DepClassTy::REQUIRED);
2254       if (!Stripped && this == &AA) {
2255         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2256           T.indicatePessimisticFixpoint();
2257       } else {
2258         // Use abstract attribute information.
2259         const AANonNull::StateType &NS = AA.getState();
2260         T ^= NS;
2261       }
2262       return T.isValidState();
2263     };
2264 
2265     StateType T;
2266     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2267                                           VisitValueCB, getCtxI()))
2268       return indicatePessimisticFixpoint();
2269 
2270     return clampStateAndIndicateChange(getState(), T);
2271   }
2272 
2273   /// See AbstractAttribute::trackStatistics()
2274   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2275 };
2276 
2277 /// NonNull attribute for function return value.
2278 struct AANonNullReturned final
2279     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2280   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2281       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2282 
2283   /// See AbstractAttribute::getAsStr().
2284   const std::string getAsStr() const override {
2285     return getAssumed() ? "nonnull" : "may-null";
2286   }
2287 
2288   /// See AbstractAttribute::trackStatistics()
2289   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2290 };
2291 
2292 /// NonNull attribute for function argument.
2293 struct AANonNullArgument final
2294     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2295   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2296       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2297 
2298   /// See AbstractAttribute::trackStatistics()
2299   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2300 };
2301 
2302 struct AANonNullCallSiteArgument final : AANonNullFloating {
2303   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2304       : AANonNullFloating(IRP, A) {}
2305 
2306   /// See AbstractAttribute::trackStatistics()
2307   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2308 };
2309 
2310 /// NonNull attribute for a call site return position.
2311 struct AANonNullCallSiteReturned final
2312     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2313   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2314       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2315 
2316   /// See AbstractAttribute::trackStatistics()
2317   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2318 };
2319 
2320 /// ------------------------ No-Recurse Attributes ----------------------------
2321 
2322 struct AANoRecurseImpl : public AANoRecurse {
2323   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2324 
2325   /// See AbstractAttribute::getAsStr()
2326   const std::string getAsStr() const override {
2327     return getAssumed() ? "norecurse" : "may-recurse";
2328   }
2329 };
2330 
2331 struct AANoRecurseFunction final : AANoRecurseImpl {
2332   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2333       : AANoRecurseImpl(IRP, A) {}
2334 
2335   /// See AbstractAttribute::initialize(...).
2336   void initialize(Attributor &A) override {
2337     AANoRecurseImpl::initialize(A);
2338     if (const Function *F = getAnchorScope())
2339       if (A.getInfoCache().getSccSize(*F) != 1)
2340         indicatePessimisticFixpoint();
2341   }
2342 
2343   /// See AbstractAttribute::updateImpl(...).
2344   ChangeStatus updateImpl(Attributor &A) override {
2345 
2346     // If all live call sites are known to be no-recurse, we are as well.
2347     auto CallSitePred = [&](AbstractCallSite ACS) {
2348       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2349           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2350           DepClassTy::NONE);
2351       return NoRecurseAA.isKnownNoRecurse();
2352     };
2353     bool AllCallSitesKnown;
2354     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
2355       // If we know all call sites and all are known no-recurse, we are done.
2356       // If all known call sites, which might not be all that exist, are known
2357       // to be no-recurse, we are not done but we can continue to assume
2358       // no-recurse. If one of the call sites we have not visited will become
2359       // live, another update is triggered.
2360       if (AllCallSitesKnown)
2361         indicateOptimisticFixpoint();
2362       return ChangeStatus::UNCHANGED;
2363     }
2364 
2365     // If the above check does not hold anymore we look at the calls.
2366     auto CheckForNoRecurse = [&](Instruction &I) {
2367       const auto &CB = cast<CallBase>(I);
2368       if (CB.hasFnAttr(Attribute::NoRecurse))
2369         return true;
2370 
2371       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2372           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2373       if (!NoRecurseAA.isAssumedNoRecurse())
2374         return false;
2375 
2376       // Recursion to the same function
2377       if (CB.getCalledFunction() == getAnchorScope())
2378         return false;
2379 
2380       return true;
2381     };
2382 
2383     bool UsedAssumedInformation = false;
2384     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this,
2385                                            UsedAssumedInformation))
2386       return indicatePessimisticFixpoint();
2387     return ChangeStatus::UNCHANGED;
2388   }
2389 
2390   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2391 };
2392 
2393 /// NoRecurse attribute deduction for a call sites.
2394 struct AANoRecurseCallSite final : AANoRecurseImpl {
2395   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2396       : AANoRecurseImpl(IRP, A) {}
2397 
2398   /// See AbstractAttribute::initialize(...).
2399   void initialize(Attributor &A) override {
2400     AANoRecurseImpl::initialize(A);
2401     Function *F = getAssociatedFunction();
2402     if (!F || F->isDeclaration())
2403       indicatePessimisticFixpoint();
2404   }
2405 
2406   /// See AbstractAttribute::updateImpl(...).
2407   ChangeStatus updateImpl(Attributor &A) override {
2408     // TODO: Once we have call site specific value information we can provide
2409     //       call site specific liveness information and then it makes
2410     //       sense to specialize attributes for call sites arguments instead of
2411     //       redirecting requests to the callee argument.
2412     Function *F = getAssociatedFunction();
2413     const IRPosition &FnPos = IRPosition::function(*F);
2414     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2415     return clampStateAndIndicateChange(getState(), FnAA.getState());
2416   }
2417 
2418   /// See AbstractAttribute::trackStatistics()
2419   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2420 };
2421 
2422 /// -------------------- Undefined-Behavior Attributes ------------------------
2423 
2424 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2425   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2426       : AAUndefinedBehavior(IRP, A) {}
2427 
2428   /// See AbstractAttribute::updateImpl(...).
2429   // through a pointer (i.e. also branches etc.)
2430   ChangeStatus updateImpl(Attributor &A) override {
2431     const size_t UBPrevSize = KnownUBInsts.size();
2432     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2433 
2434     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2435       // Lang ref now states volatile store is not UB, let's skip them.
2436       if (I.isVolatile() && I.mayWriteToMemory())
2437         return true;
2438 
2439       // Skip instructions that are already saved.
2440       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2441         return true;
2442 
2443       // If we reach here, we know we have an instruction
2444       // that accesses memory through a pointer operand,
2445       // for which getPointerOperand() should give it to us.
2446       Value *PtrOp =
2447           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2448       assert(PtrOp &&
2449              "Expected pointer operand of memory accessing instruction");
2450 
2451       // Either we stopped and the appropriate action was taken,
2452       // or we got back a simplified value to continue.
2453       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2454       if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2455         return true;
2456       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2457 
2458       // A memory access through a pointer is considered UB
2459       // only if the pointer has constant null value.
2460       // TODO: Expand it to not only check constant values.
2461       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2462         AssumedNoUBInsts.insert(&I);
2463         return true;
2464       }
2465       const Type *PtrTy = PtrOpVal->getType();
2466 
2467       // Because we only consider instructions inside functions,
2468       // assume that a parent function exists.
2469       const Function *F = I.getFunction();
2470 
2471       // A memory access using constant null pointer is only considered UB
2472       // if null pointer is _not_ defined for the target platform.
2473       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2474         AssumedNoUBInsts.insert(&I);
2475       else
2476         KnownUBInsts.insert(&I);
2477       return true;
2478     };
2479 
2480     auto InspectBrInstForUB = [&](Instruction &I) {
2481       // A conditional branch instruction is considered UB if it has `undef`
2482       // condition.
2483 
2484       // Skip instructions that are already saved.
2485       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2486         return true;
2487 
2488       // We know we have a branch instruction.
2489       auto *BrInst = cast<BranchInst>(&I);
2490 
2491       // Unconditional branches are never considered UB.
2492       if (BrInst->isUnconditional())
2493         return true;
2494 
2495       // Either we stopped and the appropriate action was taken,
2496       // or we got back a simplified value to continue.
2497       Optional<Value *> SimplifiedCond =
2498           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2499       if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2500         return true;
2501       AssumedNoUBInsts.insert(&I);
2502       return true;
2503     };
2504 
2505     auto InspectCallSiteForUB = [&](Instruction &I) {
2506       // Check whether a callsite always cause UB or not
2507 
2508       // Skip instructions that are already saved.
2509       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2510         return true;
2511 
2512       // Check nonnull and noundef argument attribute violation for each
2513       // callsite.
2514       CallBase &CB = cast<CallBase>(I);
2515       Function *Callee = CB.getCalledFunction();
2516       if (!Callee)
2517         return true;
2518       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2519         // If current argument is known to be simplified to null pointer and the
2520         // corresponding argument position is known to have nonnull attribute,
2521         // the argument is poison. Furthermore, if the argument is poison and
2522         // the position is known to have noundef attriubte, this callsite is
2523         // considered UB.
2524         if (idx >= Callee->arg_size())
2525           break;
2526         Value *ArgVal = CB.getArgOperand(idx);
2527         if (!ArgVal)
2528           continue;
2529         // Here, we handle three cases.
2530         //   (1) Not having a value means it is dead. (we can replace the value
2531         //       with undef)
2532         //   (2) Simplified to undef. The argument violate noundef attriubte.
2533         //   (3) Simplified to null pointer where known to be nonnull.
2534         //       The argument is a poison value and violate noundef attribute.
2535         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2536         auto &NoUndefAA =
2537             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2538         if (!NoUndefAA.isKnownNoUndef())
2539           continue;
2540         bool UsedAssumedInformation = false;
2541         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2542             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2543         if (UsedAssumedInformation)
2544           continue;
2545         if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2546           return true;
2547         if (!SimplifiedVal.hasValue() ||
2548             isa<UndefValue>(*SimplifiedVal.getValue())) {
2549           KnownUBInsts.insert(&I);
2550           continue;
2551         }
2552         if (!ArgVal->getType()->isPointerTy() ||
2553             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2554           continue;
2555         auto &NonNullAA =
2556             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2557         if (NonNullAA.isKnownNonNull())
2558           KnownUBInsts.insert(&I);
2559       }
2560       return true;
2561     };
2562 
2563     auto InspectReturnInstForUB =
2564         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2565           // Check if a return instruction always cause UB or not
2566           // Note: It is guaranteed that the returned position of the anchor
2567           //       scope has noundef attribute when this is called.
2568           //       We also ensure the return position is not "assumed dead"
2569           //       because the returned value was then potentially simplified to
2570           //       `undef` in AAReturnedValues without removing the `noundef`
2571           //       attribute yet.
2572 
2573           // When the returned position has noundef attriubte, UB occur in the
2574           // following cases.
2575           //   (1) Returned value is known to be undef.
2576           //   (2) The value is known to be a null pointer and the returned
2577           //       position has nonnull attribute (because the returned value is
2578           //       poison).
2579           bool FoundUB = false;
2580           if (isa<UndefValue>(V)) {
2581             FoundUB = true;
2582           } else {
2583             if (isa<ConstantPointerNull>(V)) {
2584               auto &NonNullAA = A.getAAFor<AANonNull>(
2585                   *this, IRPosition::returned(*getAnchorScope()),
2586                   DepClassTy::NONE);
2587               if (NonNullAA.isKnownNonNull())
2588                 FoundUB = true;
2589             }
2590           }
2591 
2592           if (FoundUB)
2593             for (ReturnInst *RI : RetInsts)
2594               KnownUBInsts.insert(RI);
2595           return true;
2596         };
2597 
2598     bool UsedAssumedInformation = false;
2599     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2600                               {Instruction::Load, Instruction::Store,
2601                                Instruction::AtomicCmpXchg,
2602                                Instruction::AtomicRMW},
2603                               UsedAssumedInformation,
2604                               /* CheckBBLivenessOnly */ true);
2605     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2606                               UsedAssumedInformation,
2607                               /* CheckBBLivenessOnly */ true);
2608     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2609                                       UsedAssumedInformation);
2610 
2611     // If the returned position of the anchor scope has noundef attriubte, check
2612     // all returned instructions.
2613     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2614       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2615       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2616         auto &RetPosNoUndefAA =
2617             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2618         if (RetPosNoUndefAA.isKnownNoUndef())
2619           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2620                                                     *this);
2621       }
2622     }
2623 
2624     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2625         UBPrevSize != KnownUBInsts.size())
2626       return ChangeStatus::CHANGED;
2627     return ChangeStatus::UNCHANGED;
2628   }
2629 
2630   bool isKnownToCauseUB(Instruction *I) const override {
2631     return KnownUBInsts.count(I);
2632   }
2633 
2634   bool isAssumedToCauseUB(Instruction *I) const override {
2635     // In simple words, if an instruction is not in the assumed to _not_
2636     // cause UB, then it is assumed UB (that includes those
2637     // in the KnownUBInsts set). The rest is boilerplate
2638     // is to ensure that it is one of the instructions we test
2639     // for UB.
2640 
2641     switch (I->getOpcode()) {
2642     case Instruction::Load:
2643     case Instruction::Store:
2644     case Instruction::AtomicCmpXchg:
2645     case Instruction::AtomicRMW:
2646       return !AssumedNoUBInsts.count(I);
2647     case Instruction::Br: {
2648       auto BrInst = cast<BranchInst>(I);
2649       if (BrInst->isUnconditional())
2650         return false;
2651       return !AssumedNoUBInsts.count(I);
2652     } break;
2653     default:
2654       return false;
2655     }
2656     return false;
2657   }
2658 
2659   ChangeStatus manifest(Attributor &A) override {
2660     if (KnownUBInsts.empty())
2661       return ChangeStatus::UNCHANGED;
2662     for (Instruction *I : KnownUBInsts)
2663       A.changeToUnreachableAfterManifest(I);
2664     return ChangeStatus::CHANGED;
2665   }
2666 
2667   /// See AbstractAttribute::getAsStr()
2668   const std::string getAsStr() const override {
2669     return getAssumed() ? "undefined-behavior" : "no-ub";
2670   }
2671 
2672   /// Note: The correctness of this analysis depends on the fact that the
2673   /// following 2 sets will stop changing after some point.
2674   /// "Change" here means that their size changes.
2675   /// The size of each set is monotonically increasing
2676   /// (we only add items to them) and it is upper bounded by the number of
2677   /// instructions in the processed function (we can never save more
2678   /// elements in either set than this number). Hence, at some point,
2679   /// they will stop increasing.
2680   /// Consequently, at some point, both sets will have stopped
2681   /// changing, effectively making the analysis reach a fixpoint.
2682 
2683   /// Note: These 2 sets are disjoint and an instruction can be considered
2684   /// one of 3 things:
2685   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2686   ///    the KnownUBInsts set.
2687   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2688   ///    has a reason to assume it).
2689   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2690   ///    could not find a reason to assume or prove that it can cause UB,
2691   ///    hence it assumes it doesn't. We have a set for these instructions
2692   ///    so that we don't reprocess them in every update.
2693   ///    Note however that instructions in this set may cause UB.
2694 
2695 protected:
2696   /// A set of all live instructions _known_ to cause UB.
2697   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2698 
2699 private:
2700   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2701   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2702 
2703   // Should be called on updates in which if we're processing an instruction
2704   // \p I that depends on a value \p V, one of the following has to happen:
2705   // - If the value is assumed, then stop.
2706   // - If the value is known but undef, then consider it UB.
2707   // - Otherwise, do specific processing with the simplified value.
2708   // We return None in the first 2 cases to signify that an appropriate
2709   // action was taken and the caller should stop.
2710   // Otherwise, we return the simplified value that the caller should
2711   // use for specific processing.
2712   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2713                                          Instruction *I) {
2714     bool UsedAssumedInformation = false;
2715     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2716         IRPosition::value(*V), *this, UsedAssumedInformation);
2717     if (!UsedAssumedInformation) {
2718       // Don't depend on assumed values.
2719       if (!SimplifiedV.hasValue()) {
2720         // If it is known (which we tested above) but it doesn't have a value,
2721         // then we can assume `undef` and hence the instruction is UB.
2722         KnownUBInsts.insert(I);
2723         return llvm::None;
2724       }
2725       if (!SimplifiedV.getValue())
2726         return nullptr;
2727       V = *SimplifiedV;
2728     }
2729     if (isa<UndefValue>(V)) {
2730       KnownUBInsts.insert(I);
2731       return llvm::None;
2732     }
2733     return V;
2734   }
2735 };
2736 
2737 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2738   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2739       : AAUndefinedBehaviorImpl(IRP, A) {}
2740 
2741   /// See AbstractAttribute::trackStatistics()
2742   void trackStatistics() const override {
2743     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2744                "Number of instructions known to have UB");
2745     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2746         KnownUBInsts.size();
2747   }
2748 };
2749 
2750 /// ------------------------ Will-Return Attributes ----------------------------
2751 
2752 // Helper function that checks whether a function has any cycle which we don't
2753 // know if it is bounded or not.
2754 // Loops with maximum trip count are considered bounded, any other cycle not.
2755 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2756   ScalarEvolution *SE =
2757       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2758   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2759   // If either SCEV or LoopInfo is not available for the function then we assume
2760   // any cycle to be unbounded cycle.
2761   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2762   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2763   if (!SE || !LI) {
2764     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2765       if (SCCI.hasCycle())
2766         return true;
2767     return false;
2768   }
2769 
2770   // If there's irreducible control, the function may contain non-loop cycles.
2771   if (mayContainIrreducibleControl(F, LI))
2772     return true;
2773 
2774   // Any loop that does not have a max trip count is considered unbounded cycle.
2775   for (auto *L : LI->getLoopsInPreorder()) {
2776     if (!SE->getSmallConstantMaxTripCount(L))
2777       return true;
2778   }
2779   return false;
2780 }
2781 
2782 struct AAWillReturnImpl : public AAWillReturn {
2783   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2784       : AAWillReturn(IRP, A) {}
2785 
2786   /// See AbstractAttribute::initialize(...).
2787   void initialize(Attributor &A) override {
2788     AAWillReturn::initialize(A);
2789 
2790     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2791       indicateOptimisticFixpoint();
2792       return;
2793     }
2794   }
2795 
2796   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2797   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2798     // Check for `mustprogress` in the scope and the associated function which
2799     // might be different if this is a call site.
2800     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2801         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2802       return false;
2803 
2804     const auto &MemAA =
2805         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2806     if (!MemAA.isAssumedReadOnly())
2807       return false;
2808     if (KnownOnly && !MemAA.isKnownReadOnly())
2809       return false;
2810     if (!MemAA.isKnownReadOnly())
2811       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2812 
2813     return true;
2814   }
2815 
2816   /// See AbstractAttribute::updateImpl(...).
2817   ChangeStatus updateImpl(Attributor &A) override {
2818     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2819       return ChangeStatus::UNCHANGED;
2820 
2821     auto CheckForWillReturn = [&](Instruction &I) {
2822       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2823       const auto &WillReturnAA =
2824           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2825       if (WillReturnAA.isKnownWillReturn())
2826         return true;
2827       if (!WillReturnAA.isAssumedWillReturn())
2828         return false;
2829       const auto &NoRecurseAA =
2830           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2831       return NoRecurseAA.isAssumedNoRecurse();
2832     };
2833 
2834     bool UsedAssumedInformation = false;
2835     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2836                                            UsedAssumedInformation))
2837       return indicatePessimisticFixpoint();
2838 
2839     return ChangeStatus::UNCHANGED;
2840   }
2841 
2842   /// See AbstractAttribute::getAsStr()
2843   const std::string getAsStr() const override {
2844     return getAssumed() ? "willreturn" : "may-noreturn";
2845   }
2846 };
2847 
2848 struct AAWillReturnFunction final : AAWillReturnImpl {
2849   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2850       : AAWillReturnImpl(IRP, A) {}
2851 
2852   /// See AbstractAttribute::initialize(...).
2853   void initialize(Attributor &A) override {
2854     AAWillReturnImpl::initialize(A);
2855 
2856     Function *F = getAnchorScope();
2857     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2858       indicatePessimisticFixpoint();
2859   }
2860 
2861   /// See AbstractAttribute::trackStatistics()
2862   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2863 };
2864 
2865 /// WillReturn attribute deduction for a call sites.
2866 struct AAWillReturnCallSite final : AAWillReturnImpl {
2867   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2868       : AAWillReturnImpl(IRP, A) {}
2869 
2870   /// See AbstractAttribute::initialize(...).
2871   void initialize(Attributor &A) override {
2872     AAWillReturnImpl::initialize(A);
2873     Function *F = getAssociatedFunction();
2874     if (!F || !A.isFunctionIPOAmendable(*F))
2875       indicatePessimisticFixpoint();
2876   }
2877 
2878   /// See AbstractAttribute::updateImpl(...).
2879   ChangeStatus updateImpl(Attributor &A) override {
2880     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2881       return ChangeStatus::UNCHANGED;
2882 
2883     // TODO: Once we have call site specific value information we can provide
2884     //       call site specific liveness information and then it makes
2885     //       sense to specialize attributes for call sites arguments instead of
2886     //       redirecting requests to the callee argument.
2887     Function *F = getAssociatedFunction();
2888     const IRPosition &FnPos = IRPosition::function(*F);
2889     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2890     return clampStateAndIndicateChange(getState(), FnAA.getState());
2891   }
2892 
2893   /// See AbstractAttribute::trackStatistics()
2894   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2895 };
2896 
2897 /// -------------------AAReachability Attribute--------------------------
2898 
2899 struct AAReachabilityImpl : AAReachability {
2900   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2901       : AAReachability(IRP, A) {}
2902 
2903   const std::string getAsStr() const override {
2904     // TODO: Return the number of reachable queries.
2905     return "reachable";
2906   }
2907 
2908   /// See AbstractAttribute::updateImpl(...).
2909   ChangeStatus updateImpl(Attributor &A) override {
2910     return ChangeStatus::UNCHANGED;
2911   }
2912 };
2913 
2914 struct AAReachabilityFunction final : public AAReachabilityImpl {
2915   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2916       : AAReachabilityImpl(IRP, A) {}
2917 
2918   /// See AbstractAttribute::trackStatistics()
2919   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2920 };
2921 
2922 /// ------------------------ NoAlias Argument Attribute ------------------------
2923 
2924 struct AANoAliasImpl : AANoAlias {
2925   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2926     assert(getAssociatedType()->isPointerTy() &&
2927            "Noalias is a pointer attribute");
2928   }
2929 
2930   const std::string getAsStr() const override {
2931     return getAssumed() ? "noalias" : "may-alias";
2932   }
2933 };
2934 
2935 /// NoAlias attribute for a floating value.
2936 struct AANoAliasFloating final : AANoAliasImpl {
2937   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2938       : AANoAliasImpl(IRP, A) {}
2939 
2940   /// See AbstractAttribute::initialize(...).
2941   void initialize(Attributor &A) override {
2942     AANoAliasImpl::initialize(A);
2943     Value *Val = &getAssociatedValue();
2944     do {
2945       CastInst *CI = dyn_cast<CastInst>(Val);
2946       if (!CI)
2947         break;
2948       Value *Base = CI->getOperand(0);
2949       if (!Base->hasOneUse())
2950         break;
2951       Val = Base;
2952     } while (true);
2953 
2954     if (!Val->getType()->isPointerTy()) {
2955       indicatePessimisticFixpoint();
2956       return;
2957     }
2958 
2959     if (isa<AllocaInst>(Val))
2960       indicateOptimisticFixpoint();
2961     else if (isa<ConstantPointerNull>(Val) &&
2962              !NullPointerIsDefined(getAnchorScope(),
2963                                    Val->getType()->getPointerAddressSpace()))
2964       indicateOptimisticFixpoint();
2965     else if (Val != &getAssociatedValue()) {
2966       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2967           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2968       if (ValNoAliasAA.isKnownNoAlias())
2969         indicateOptimisticFixpoint();
2970     }
2971   }
2972 
2973   /// See AbstractAttribute::updateImpl(...).
2974   ChangeStatus updateImpl(Attributor &A) override {
2975     // TODO: Implement this.
2976     return indicatePessimisticFixpoint();
2977   }
2978 
2979   /// See AbstractAttribute::trackStatistics()
2980   void trackStatistics() const override {
2981     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2982   }
2983 };
2984 
2985 /// NoAlias attribute for an argument.
2986 struct AANoAliasArgument final
2987     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2988   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2989   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2990 
2991   /// See AbstractAttribute::initialize(...).
2992   void initialize(Attributor &A) override {
2993     Base::initialize(A);
2994     // See callsite argument attribute and callee argument attribute.
2995     if (hasAttr({Attribute::ByVal}))
2996       indicateOptimisticFixpoint();
2997   }
2998 
2999   /// See AbstractAttribute::update(...).
3000   ChangeStatus updateImpl(Attributor &A) override {
3001     // We have to make sure no-alias on the argument does not break
3002     // synchronization when this is a callback argument, see also [1] below.
3003     // If synchronization cannot be affected, we delegate to the base updateImpl
3004     // function, otherwise we give up for now.
3005 
3006     // If the function is no-sync, no-alias cannot break synchronization.
3007     const auto &NoSyncAA =
3008         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3009                              DepClassTy::OPTIONAL);
3010     if (NoSyncAA.isAssumedNoSync())
3011       return Base::updateImpl(A);
3012 
3013     // If the argument is read-only, no-alias cannot break synchronization.
3014     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3015         *this, getIRPosition(), DepClassTy::OPTIONAL);
3016     if (MemBehaviorAA.isAssumedReadOnly())
3017       return Base::updateImpl(A);
3018 
3019     // If the argument is never passed through callbacks, no-alias cannot break
3020     // synchronization.
3021     bool AllCallSitesKnown;
3022     if (A.checkForAllCallSites(
3023             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3024             true, AllCallSitesKnown))
3025       return Base::updateImpl(A);
3026 
3027     // TODO: add no-alias but make sure it doesn't break synchronization by
3028     // introducing fake uses. See:
3029     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3030     //     International Workshop on OpenMP 2018,
3031     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3032 
3033     return indicatePessimisticFixpoint();
3034   }
3035 
3036   /// See AbstractAttribute::trackStatistics()
3037   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3038 };
3039 
3040 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3041   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3042       : AANoAliasImpl(IRP, A) {}
3043 
3044   /// See AbstractAttribute::initialize(...).
3045   void initialize(Attributor &A) override {
3046     // See callsite argument attribute and callee argument attribute.
3047     const auto &CB = cast<CallBase>(getAnchorValue());
3048     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3049       indicateOptimisticFixpoint();
3050     Value &Val = getAssociatedValue();
3051     if (isa<ConstantPointerNull>(Val) &&
3052         !NullPointerIsDefined(getAnchorScope(),
3053                               Val.getType()->getPointerAddressSpace()))
3054       indicateOptimisticFixpoint();
3055   }
3056 
3057   /// Determine if the underlying value may alias with the call site argument
3058   /// \p OtherArgNo of \p ICS (= the underlying call site).
3059   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3060                             const AAMemoryBehavior &MemBehaviorAA,
3061                             const CallBase &CB, unsigned OtherArgNo) {
3062     // We do not need to worry about aliasing with the underlying IRP.
3063     if (this->getCalleeArgNo() == (int)OtherArgNo)
3064       return false;
3065 
3066     // If it is not a pointer or pointer vector we do not alias.
3067     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3068     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3069       return false;
3070 
3071     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3072         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3073 
3074     // If the argument is readnone, there is no read-write aliasing.
3075     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3076       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3077       return false;
3078     }
3079 
3080     // If the argument is readonly and the underlying value is readonly, there
3081     // is no read-write aliasing.
3082     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3083     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3084       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3085       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3086       return false;
3087     }
3088 
3089     // We have to utilize actual alias analysis queries so we need the object.
3090     if (!AAR)
3091       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3092 
3093     // Try to rule it out at the call site.
3094     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3095     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3096                          "callsite arguments: "
3097                       << getAssociatedValue() << " " << *ArgOp << " => "
3098                       << (IsAliasing ? "" : "no-") << "alias \n");
3099 
3100     return IsAliasing;
3101   }
3102 
3103   bool
3104   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3105                                          const AAMemoryBehavior &MemBehaviorAA,
3106                                          const AANoAlias &NoAliasAA) {
3107     // We can deduce "noalias" if the following conditions hold.
3108     // (i)   Associated value is assumed to be noalias in the definition.
3109     // (ii)  Associated value is assumed to be no-capture in all the uses
3110     //       possibly executed before this callsite.
3111     // (iii) There is no other pointer argument which could alias with the
3112     //       value.
3113 
3114     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3115     if (!AssociatedValueIsNoAliasAtDef) {
3116       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3117                         << " is not no-alias at the definition\n");
3118       return false;
3119     }
3120 
3121     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3122 
3123     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3124     const Function *ScopeFn = VIRP.getAnchorScope();
3125     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3126     // Check whether the value is captured in the scope using AANoCapture.
3127     //      Look at CFG and check only uses possibly executed before this
3128     //      callsite.
3129     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3130       Instruction *UserI = cast<Instruction>(U.getUser());
3131 
3132       // If UserI is the curr instruction and there is a single potential use of
3133       // the value in UserI we allow the use.
3134       // TODO: We should inspect the operands and allow those that cannot alias
3135       //       with the value.
3136       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3137         return true;
3138 
3139       if (ScopeFn) {
3140         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
3141             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
3142 
3143         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
3144           return true;
3145 
3146         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3147           if (CB->isArgOperand(&U)) {
3148 
3149             unsigned ArgNo = CB->getArgOperandNo(&U);
3150 
3151             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3152                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3153                 DepClassTy::OPTIONAL);
3154 
3155             if (NoCaptureAA.isAssumedNoCapture())
3156               return true;
3157           }
3158         }
3159       }
3160 
3161       // For cases which can potentially have more users
3162       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
3163           isa<SelectInst>(U)) {
3164         Follow = true;
3165         return true;
3166       }
3167 
3168       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
3169       return false;
3170     };
3171 
3172     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3173       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3174         LLVM_DEBUG(
3175             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3176                    << " cannot be noalias as it is potentially captured\n");
3177         return false;
3178       }
3179     }
3180     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3181 
3182     // Check there is no other pointer argument which could alias with the
3183     // value passed at this call site.
3184     // TODO: AbstractCallSite
3185     const auto &CB = cast<CallBase>(getAnchorValue());
3186     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
3187          OtherArgNo++)
3188       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3189         return false;
3190 
3191     return true;
3192   }
3193 
3194   /// See AbstractAttribute::updateImpl(...).
3195   ChangeStatus updateImpl(Attributor &A) override {
3196     // If the argument is readnone we are done as there are no accesses via the
3197     // argument.
3198     auto &MemBehaviorAA =
3199         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3200     if (MemBehaviorAA.isAssumedReadNone()) {
3201       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3202       return ChangeStatus::UNCHANGED;
3203     }
3204 
3205     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3206     const auto &NoAliasAA =
3207         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3208 
3209     AAResults *AAR = nullptr;
3210     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3211                                                NoAliasAA)) {
3212       LLVM_DEBUG(
3213           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3214       return ChangeStatus::UNCHANGED;
3215     }
3216 
3217     return indicatePessimisticFixpoint();
3218   }
3219 
3220   /// See AbstractAttribute::trackStatistics()
3221   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3222 };
3223 
3224 /// NoAlias attribute for function return value.
3225 struct AANoAliasReturned final : AANoAliasImpl {
3226   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3227       : AANoAliasImpl(IRP, A) {}
3228 
3229   /// See AbstractAttribute::initialize(...).
3230   void initialize(Attributor &A) override {
3231     AANoAliasImpl::initialize(A);
3232     Function *F = getAssociatedFunction();
3233     if (!F || F->isDeclaration())
3234       indicatePessimisticFixpoint();
3235   }
3236 
3237   /// See AbstractAttribute::updateImpl(...).
3238   virtual ChangeStatus updateImpl(Attributor &A) override {
3239 
3240     auto CheckReturnValue = [&](Value &RV) -> bool {
3241       if (Constant *C = dyn_cast<Constant>(&RV))
3242         if (C->isNullValue() || isa<UndefValue>(C))
3243           return true;
3244 
3245       /// For now, we can only deduce noalias if we have call sites.
3246       /// FIXME: add more support.
3247       if (!isa<CallBase>(&RV))
3248         return false;
3249 
3250       const IRPosition &RVPos = IRPosition::value(RV);
3251       const auto &NoAliasAA =
3252           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3253       if (!NoAliasAA.isAssumedNoAlias())
3254         return false;
3255 
3256       const auto &NoCaptureAA =
3257           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3258       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3259     };
3260 
3261     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3262       return indicatePessimisticFixpoint();
3263 
3264     return ChangeStatus::UNCHANGED;
3265   }
3266 
3267   /// See AbstractAttribute::trackStatistics()
3268   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3269 };
3270 
3271 /// NoAlias attribute deduction for a call site return value.
3272 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3273   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3274       : AANoAliasImpl(IRP, A) {}
3275 
3276   /// See AbstractAttribute::initialize(...).
3277   void initialize(Attributor &A) override {
3278     AANoAliasImpl::initialize(A);
3279     Function *F = getAssociatedFunction();
3280     if (!F || F->isDeclaration())
3281       indicatePessimisticFixpoint();
3282   }
3283 
3284   /// See AbstractAttribute::updateImpl(...).
3285   ChangeStatus updateImpl(Attributor &A) override {
3286     // TODO: Once we have call site specific value information we can provide
3287     //       call site specific liveness information and then it makes
3288     //       sense to specialize attributes for call sites arguments instead of
3289     //       redirecting requests to the callee argument.
3290     Function *F = getAssociatedFunction();
3291     const IRPosition &FnPos = IRPosition::returned(*F);
3292     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3293     return clampStateAndIndicateChange(getState(), FnAA.getState());
3294   }
3295 
3296   /// See AbstractAttribute::trackStatistics()
3297   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3298 };
3299 
3300 /// -------------------AAIsDead Function Attribute-----------------------
3301 
3302 struct AAIsDeadValueImpl : public AAIsDead {
3303   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3304 
3305   /// See AAIsDead::isAssumedDead().
3306   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3307 
3308   /// See AAIsDead::isKnownDead().
3309   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3310 
3311   /// See AAIsDead::isAssumedDead(BasicBlock *).
3312   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3313 
3314   /// See AAIsDead::isKnownDead(BasicBlock *).
3315   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3316 
3317   /// See AAIsDead::isAssumedDead(Instruction *I).
3318   bool isAssumedDead(const Instruction *I) const override {
3319     return I == getCtxI() && isAssumedDead();
3320   }
3321 
3322   /// See AAIsDead::isKnownDead(Instruction *I).
3323   bool isKnownDead(const Instruction *I) const override {
3324     return isAssumedDead(I) && isKnownDead();
3325   }
3326 
3327   /// See AbstractAttribute::getAsStr().
3328   const std::string getAsStr() const override {
3329     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3330   }
3331 
3332   /// Check if all uses are assumed dead.
3333   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3334     // Callers might not check the type, void has no uses.
3335     if (V.getType()->isVoidTy())
3336       return true;
3337 
3338     // If we replace a value with a constant there are no uses left afterwards.
3339     if (!isa<Constant>(V)) {
3340       bool UsedAssumedInformation = false;
3341       Optional<Constant *> C =
3342           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3343       if (!C.hasValue() || *C)
3344         return true;
3345     }
3346 
3347     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3348     // Explicitly set the dependence class to required because we want a long
3349     // chain of N dependent instructions to be considered live as soon as one is
3350     // without going through N update cycles. This is not required for
3351     // correctness.
3352     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3353                              DepClassTy::REQUIRED);
3354   }
3355 
3356   /// Determine if \p I is assumed to be side-effect free.
3357   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3358     if (!I || wouldInstructionBeTriviallyDead(I))
3359       return true;
3360 
3361     auto *CB = dyn_cast<CallBase>(I);
3362     if (!CB || isa<IntrinsicInst>(CB))
3363       return false;
3364 
3365     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3366     const auto &NoUnwindAA =
3367         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3368     if (!NoUnwindAA.isAssumedNoUnwind())
3369       return false;
3370     if (!NoUnwindAA.isKnownNoUnwind())
3371       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3372 
3373     const auto &MemBehaviorAA =
3374         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
3375     if (MemBehaviorAA.isAssumedReadOnly()) {
3376       if (!MemBehaviorAA.isKnownReadOnly())
3377         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3378       return true;
3379     }
3380     return false;
3381   }
3382 };
3383 
3384 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3385   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3386       : AAIsDeadValueImpl(IRP, A) {}
3387 
3388   /// See AbstractAttribute::initialize(...).
3389   void initialize(Attributor &A) override {
3390     if (isa<UndefValue>(getAssociatedValue())) {
3391       indicatePessimisticFixpoint();
3392       return;
3393     }
3394 
3395     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3396     if (!isAssumedSideEffectFree(A, I)) {
3397       if (!isa_and_nonnull<StoreInst>(I))
3398         indicatePessimisticFixpoint();
3399       else
3400         removeAssumedBits(HAS_NO_EFFECT);
3401     }
3402   }
3403 
3404   bool isDeadStore(Attributor &A, StoreInst &SI) {
3405     // Lang ref now states volatile store is not UB/dead, let's skip them.
3406     if (SI.isVolatile())
3407       return false;
3408 
3409     bool UsedAssumedInformation = false;
3410     SmallSetVector<Value *, 4> PotentialCopies;
3411     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3412                                              UsedAssumedInformation))
3413       return false;
3414     return llvm::all_of(PotentialCopies, [&](Value *V) {
3415       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3416                              UsedAssumedInformation);
3417     });
3418   }
3419 
3420   /// See AbstractAttribute::updateImpl(...).
3421   ChangeStatus updateImpl(Attributor &A) override {
3422     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3423     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3424       if (!isDeadStore(A, *SI))
3425         return indicatePessimisticFixpoint();
3426     } else {
3427       if (!isAssumedSideEffectFree(A, I))
3428         return indicatePessimisticFixpoint();
3429       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3430         return indicatePessimisticFixpoint();
3431     }
3432     return ChangeStatus::UNCHANGED;
3433   }
3434 
3435   /// See AbstractAttribute::manifest(...).
3436   ChangeStatus manifest(Attributor &A) override {
3437     Value &V = getAssociatedValue();
3438     if (auto *I = dyn_cast<Instruction>(&V)) {
3439       // If we get here we basically know the users are all dead. We check if
3440       // isAssumedSideEffectFree returns true here again because it might not be
3441       // the case and only the users are dead but the instruction (=call) is
3442       // still needed.
3443       if (isa<StoreInst>(I) ||
3444           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3445         A.deleteAfterManifest(*I);
3446         return ChangeStatus::CHANGED;
3447       }
3448     }
3449     if (V.use_empty())
3450       return ChangeStatus::UNCHANGED;
3451 
3452     bool UsedAssumedInformation = false;
3453     Optional<Constant *> C =
3454         A.getAssumedConstant(V, *this, UsedAssumedInformation);
3455     if (C.hasValue() && C.getValue())
3456       return ChangeStatus::UNCHANGED;
3457 
3458     // Replace the value with undef as it is dead but keep droppable uses around
3459     // as they provide information we don't want to give up on just yet.
3460     UndefValue &UV = *UndefValue::get(V.getType());
3461     bool AnyChange =
3462         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
3463     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3464   }
3465 
3466   /// See AbstractAttribute::trackStatistics()
3467   void trackStatistics() const override {
3468     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3469   }
3470 };
3471 
3472 struct AAIsDeadArgument : public AAIsDeadFloating {
3473   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3474       : AAIsDeadFloating(IRP, A) {}
3475 
3476   /// See AbstractAttribute::initialize(...).
3477   void initialize(Attributor &A) override {
3478     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3479       indicatePessimisticFixpoint();
3480   }
3481 
3482   /// See AbstractAttribute::manifest(...).
3483   ChangeStatus manifest(Attributor &A) override {
3484     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
3485     Argument &Arg = *getAssociatedArgument();
3486     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3487       if (A.registerFunctionSignatureRewrite(
3488               Arg, /* ReplacementTypes */ {},
3489               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3490               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3491         Arg.dropDroppableUses();
3492         return ChangeStatus::CHANGED;
3493       }
3494     return Changed;
3495   }
3496 
3497   /// See AbstractAttribute::trackStatistics()
3498   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3499 };
3500 
3501 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3502   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3503       : AAIsDeadValueImpl(IRP, A) {}
3504 
3505   /// See AbstractAttribute::initialize(...).
3506   void initialize(Attributor &A) override {
3507     if (isa<UndefValue>(getAssociatedValue()))
3508       indicatePessimisticFixpoint();
3509   }
3510 
3511   /// See AbstractAttribute::updateImpl(...).
3512   ChangeStatus updateImpl(Attributor &A) override {
3513     // TODO: Once we have call site specific value information we can provide
3514     //       call site specific liveness information and then it makes
3515     //       sense to specialize attributes for call sites arguments instead of
3516     //       redirecting requests to the callee argument.
3517     Argument *Arg = getAssociatedArgument();
3518     if (!Arg)
3519       return indicatePessimisticFixpoint();
3520     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3521     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3522     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3523   }
3524 
3525   /// See AbstractAttribute::manifest(...).
3526   ChangeStatus manifest(Attributor &A) override {
3527     CallBase &CB = cast<CallBase>(getAnchorValue());
3528     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3529     assert(!isa<UndefValue>(U.get()) &&
3530            "Expected undef values to be filtered out!");
3531     UndefValue &UV = *UndefValue::get(U->getType());
3532     if (A.changeUseAfterManifest(U, UV))
3533       return ChangeStatus::CHANGED;
3534     return ChangeStatus::UNCHANGED;
3535   }
3536 
3537   /// See AbstractAttribute::trackStatistics()
3538   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3539 };
3540 
3541 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3542   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3543       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
3544 
3545   /// See AAIsDead::isAssumedDead().
3546   bool isAssumedDead() const override {
3547     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3548   }
3549 
3550   /// See AbstractAttribute::initialize(...).
3551   void initialize(Attributor &A) override {
3552     if (isa<UndefValue>(getAssociatedValue())) {
3553       indicatePessimisticFixpoint();
3554       return;
3555     }
3556 
3557     // We track this separately as a secondary state.
3558     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3559   }
3560 
3561   /// See AbstractAttribute::updateImpl(...).
3562   ChangeStatus updateImpl(Attributor &A) override {
3563     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3564     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3565       IsAssumedSideEffectFree = false;
3566       Changed = ChangeStatus::CHANGED;
3567     }
3568     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3569       return indicatePessimisticFixpoint();
3570     return Changed;
3571   }
3572 
3573   /// See AbstractAttribute::trackStatistics()
3574   void trackStatistics() const override {
3575     if (IsAssumedSideEffectFree)
3576       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3577     else
3578       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3579   }
3580 
3581   /// See AbstractAttribute::getAsStr().
3582   const std::string getAsStr() const override {
3583     return isAssumedDead()
3584                ? "assumed-dead"
3585                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3586   }
3587 
3588 private:
3589   bool IsAssumedSideEffectFree;
3590 };
3591 
3592 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3593   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3594       : AAIsDeadValueImpl(IRP, A) {}
3595 
3596   /// See AbstractAttribute::updateImpl(...).
3597   ChangeStatus updateImpl(Attributor &A) override {
3598 
3599     bool UsedAssumedInformation = false;
3600     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3601                               {Instruction::Ret}, UsedAssumedInformation);
3602 
3603     auto PredForCallSite = [&](AbstractCallSite ACS) {
3604       if (ACS.isCallbackCall() || !ACS.getInstruction())
3605         return false;
3606       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3607     };
3608 
3609     bool AllCallSitesKnown;
3610     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3611                                 AllCallSitesKnown))
3612       return indicatePessimisticFixpoint();
3613 
3614     return ChangeStatus::UNCHANGED;
3615   }
3616 
3617   /// See AbstractAttribute::manifest(...).
3618   ChangeStatus manifest(Attributor &A) override {
3619     // TODO: Rewrite the signature to return void?
3620     bool AnyChange = false;
3621     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3622     auto RetInstPred = [&](Instruction &I) {
3623       ReturnInst &RI = cast<ReturnInst>(I);
3624       if (!isa<UndefValue>(RI.getReturnValue()))
3625         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3626       return true;
3627     };
3628     bool UsedAssumedInformation = false;
3629     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3630                               UsedAssumedInformation);
3631     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3632   }
3633 
3634   /// See AbstractAttribute::trackStatistics()
3635   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3636 };
3637 
3638 struct AAIsDeadFunction : public AAIsDead {
3639   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3640 
3641   /// See AbstractAttribute::initialize(...).
3642   void initialize(Attributor &A) override {
3643     const Function *F = getAnchorScope();
3644     if (F && !F->isDeclaration()) {
3645       // We only want to compute liveness once. If the function is not part of
3646       // the SCC, skip it.
3647       if (A.isRunOn(*const_cast<Function *>(F))) {
3648         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3649         assumeLive(A, F->getEntryBlock());
3650       } else {
3651         indicatePessimisticFixpoint();
3652       }
3653     }
3654   }
3655 
3656   /// See AbstractAttribute::getAsStr().
3657   const std::string getAsStr() const override {
3658     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3659            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3660            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3661            std::to_string(KnownDeadEnds.size()) + "]";
3662   }
3663 
3664   /// See AbstractAttribute::manifest(...).
3665   ChangeStatus manifest(Attributor &A) override {
3666     assert(getState().isValidState() &&
3667            "Attempted to manifest an invalid state!");
3668 
3669     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3670     Function &F = *getAnchorScope();
3671 
3672     if (AssumedLiveBlocks.empty()) {
3673       A.deleteAfterManifest(F);
3674       return ChangeStatus::CHANGED;
3675     }
3676 
3677     // Flag to determine if we can change an invoke to a call assuming the
3678     // callee is nounwind. This is not possible if the personality of the
3679     // function allows to catch asynchronous exceptions.
3680     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3681 
3682     KnownDeadEnds.set_union(ToBeExploredFrom);
3683     for (const Instruction *DeadEndI : KnownDeadEnds) {
3684       auto *CB = dyn_cast<CallBase>(DeadEndI);
3685       if (!CB)
3686         continue;
3687       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3688           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3689       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3690       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3691         continue;
3692 
3693       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3694         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3695       else
3696         A.changeToUnreachableAfterManifest(
3697             const_cast<Instruction *>(DeadEndI->getNextNode()));
3698       HasChanged = ChangeStatus::CHANGED;
3699     }
3700 
3701     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3702     for (BasicBlock &BB : F)
3703       if (!AssumedLiveBlocks.count(&BB)) {
3704         A.deleteAfterManifest(BB);
3705         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3706       }
3707 
3708     return HasChanged;
3709   }
3710 
3711   /// See AbstractAttribute::updateImpl(...).
3712   ChangeStatus updateImpl(Attributor &A) override;
3713 
3714   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3715     return !AssumedLiveEdges.count(std::make_pair(From, To));
3716   }
3717 
3718   /// See AbstractAttribute::trackStatistics()
3719   void trackStatistics() const override {}
3720 
3721   /// Returns true if the function is assumed dead.
3722   bool isAssumedDead() const override { return false; }
3723 
3724   /// See AAIsDead::isKnownDead().
3725   bool isKnownDead() const override { return false; }
3726 
3727   /// See AAIsDead::isAssumedDead(BasicBlock *).
3728   bool isAssumedDead(const BasicBlock *BB) const override {
3729     assert(BB->getParent() == getAnchorScope() &&
3730            "BB must be in the same anchor scope function.");
3731 
3732     if (!getAssumed())
3733       return false;
3734     return !AssumedLiveBlocks.count(BB);
3735   }
3736 
3737   /// See AAIsDead::isKnownDead(BasicBlock *).
3738   bool isKnownDead(const BasicBlock *BB) const override {
3739     return getKnown() && isAssumedDead(BB);
3740   }
3741 
3742   /// See AAIsDead::isAssumed(Instruction *I).
3743   bool isAssumedDead(const Instruction *I) const override {
3744     assert(I->getParent()->getParent() == getAnchorScope() &&
3745            "Instruction must be in the same anchor scope function.");
3746 
3747     if (!getAssumed())
3748       return false;
3749 
3750     // If it is not in AssumedLiveBlocks then it for sure dead.
3751     // Otherwise, it can still be after noreturn call in a live block.
3752     if (!AssumedLiveBlocks.count(I->getParent()))
3753       return true;
3754 
3755     // If it is not after a liveness barrier it is live.
3756     const Instruction *PrevI = I->getPrevNode();
3757     while (PrevI) {
3758       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3759         return true;
3760       PrevI = PrevI->getPrevNode();
3761     }
3762     return false;
3763   }
3764 
3765   /// See AAIsDead::isKnownDead(Instruction *I).
3766   bool isKnownDead(const Instruction *I) const override {
3767     return getKnown() && isAssumedDead(I);
3768   }
3769 
3770   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3771   /// that internal function called from \p BB should now be looked at.
3772   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3773     if (!AssumedLiveBlocks.insert(&BB).second)
3774       return false;
3775 
3776     // We assume that all of BB is (probably) live now and if there are calls to
3777     // internal functions we will assume that those are now live as well. This
3778     // is a performance optimization for blocks with calls to a lot of internal
3779     // functions. It can however cause dead functions to be treated as live.
3780     for (const Instruction &I : BB)
3781       if (const auto *CB = dyn_cast<CallBase>(&I))
3782         if (const Function *F = CB->getCalledFunction())
3783           if (F->hasLocalLinkage())
3784             A.markLiveInternalFunction(*F);
3785     return true;
3786   }
3787 
3788   /// Collection of instructions that need to be explored again, e.g., we
3789   /// did assume they do not transfer control to (one of their) successors.
3790   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3791 
3792   /// Collection of instructions that are known to not transfer control.
3793   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3794 
3795   /// Collection of all assumed live edges
3796   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3797 
3798   /// Collection of all assumed live BasicBlocks.
3799   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3800 };
3801 
3802 static bool
3803 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3804                         AbstractAttribute &AA,
3805                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3806   const IRPosition &IPos = IRPosition::callsite_function(CB);
3807 
3808   const auto &NoReturnAA =
3809       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3810   if (NoReturnAA.isAssumedNoReturn())
3811     return !NoReturnAA.isKnownNoReturn();
3812   if (CB.isTerminator())
3813     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3814   else
3815     AliveSuccessors.push_back(CB.getNextNode());
3816   return false;
3817 }
3818 
3819 static bool
3820 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3821                         AbstractAttribute &AA,
3822                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3823   bool UsedAssumedInformation =
3824       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3825 
3826   // First, determine if we can change an invoke to a call assuming the
3827   // callee is nounwind. This is not possible if the personality of the
3828   // function allows to catch asynchronous exceptions.
3829   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3830     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3831   } else {
3832     const IRPosition &IPos = IRPosition::callsite_function(II);
3833     const auto &AANoUnw =
3834         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3835     if (AANoUnw.isAssumedNoUnwind()) {
3836       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3837     } else {
3838       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3839     }
3840   }
3841   return UsedAssumedInformation;
3842 }
3843 
3844 static bool
3845 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3846                         AbstractAttribute &AA,
3847                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3848   bool UsedAssumedInformation = false;
3849   if (BI.getNumSuccessors() == 1) {
3850     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3851   } else {
3852     Optional<Constant *> C =
3853         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3854     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3855       // No value yet, assume both edges are dead.
3856     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3857       const BasicBlock *SuccBB =
3858           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3859       AliveSuccessors.push_back(&SuccBB->front());
3860     } else {
3861       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3862       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3863       UsedAssumedInformation = false;
3864     }
3865   }
3866   return UsedAssumedInformation;
3867 }
3868 
3869 static bool
3870 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3871                         AbstractAttribute &AA,
3872                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3873   bool UsedAssumedInformation = false;
3874   Optional<Constant *> C =
3875       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3876   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3877     // No value yet, assume all edges are dead.
3878   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3879     for (auto &CaseIt : SI.cases()) {
3880       if (CaseIt.getCaseValue() == C.getValue()) {
3881         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3882         return UsedAssumedInformation;
3883       }
3884     }
3885     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3886     return UsedAssumedInformation;
3887   } else {
3888     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3889       AliveSuccessors.push_back(&SuccBB->front());
3890   }
3891   return UsedAssumedInformation;
3892 }
3893 
3894 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3895   ChangeStatus Change = ChangeStatus::UNCHANGED;
3896 
3897   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3898                     << getAnchorScope()->size() << "] BBs and "
3899                     << ToBeExploredFrom.size() << " exploration points and "
3900                     << KnownDeadEnds.size() << " known dead ends\n");
3901 
3902   // Copy and clear the list of instructions we need to explore from. It is
3903   // refilled with instructions the next update has to look at.
3904   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3905                                                ToBeExploredFrom.end());
3906   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3907 
3908   SmallVector<const Instruction *, 8> AliveSuccessors;
3909   while (!Worklist.empty()) {
3910     const Instruction *I = Worklist.pop_back_val();
3911     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3912 
3913     // Fast forward for uninteresting instructions. We could look for UB here
3914     // though.
3915     while (!I->isTerminator() && !isa<CallBase>(I))
3916       I = I->getNextNode();
3917 
3918     AliveSuccessors.clear();
3919 
3920     bool UsedAssumedInformation = false;
3921     switch (I->getOpcode()) {
3922     // TODO: look for (assumed) UB to backwards propagate "deadness".
3923     default:
3924       assert(I->isTerminator() &&
3925              "Expected non-terminators to be handled already!");
3926       for (const BasicBlock *SuccBB : successors(I->getParent()))
3927         AliveSuccessors.push_back(&SuccBB->front());
3928       break;
3929     case Instruction::Call:
3930       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3931                                                        *this, AliveSuccessors);
3932       break;
3933     case Instruction::Invoke:
3934       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3935                                                        *this, AliveSuccessors);
3936       break;
3937     case Instruction::Br:
3938       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3939                                                        *this, AliveSuccessors);
3940       break;
3941     case Instruction::Switch:
3942       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3943                                                        *this, AliveSuccessors);
3944       break;
3945     }
3946 
3947     if (UsedAssumedInformation) {
3948       NewToBeExploredFrom.insert(I);
3949     } else if (AliveSuccessors.empty() ||
3950                (I->isTerminator() &&
3951                 AliveSuccessors.size() < I->getNumSuccessors())) {
3952       if (KnownDeadEnds.insert(I))
3953         Change = ChangeStatus::CHANGED;
3954     }
3955 
3956     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3957                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3958                       << UsedAssumedInformation << "\n");
3959 
3960     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3961       if (!I->isTerminator()) {
3962         assert(AliveSuccessors.size() == 1 &&
3963                "Non-terminator expected to have a single successor!");
3964         Worklist.push_back(AliveSuccessor);
3965       } else {
3966         // record the assumed live edge
3967         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
3968         if (AssumedLiveEdges.insert(Edge).second)
3969           Change = ChangeStatus::CHANGED;
3970         if (assumeLive(A, *AliveSuccessor->getParent()))
3971           Worklist.push_back(AliveSuccessor);
3972       }
3973     }
3974   }
3975 
3976   // Check if the content of ToBeExploredFrom changed, ignore the order.
3977   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
3978       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
3979         return !ToBeExploredFrom.count(I);
3980       })) {
3981     Change = ChangeStatus::CHANGED;
3982     ToBeExploredFrom = std::move(NewToBeExploredFrom);
3983   }
3984 
3985   // If we know everything is live there is no need to query for liveness.
3986   // Instead, indicating a pessimistic fixpoint will cause the state to be
3987   // "invalid" and all queries to be answered conservatively without lookups.
3988   // To be in this state we have to (1) finished the exploration and (3) not
3989   // discovered any non-trivial dead end and (2) not ruled unreachable code
3990   // dead.
3991   if (ToBeExploredFrom.empty() &&
3992       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3993       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3994         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3995       }))
3996     return indicatePessimisticFixpoint();
3997   return Change;
3998 }
3999 
4000 /// Liveness information for a call sites.
4001 struct AAIsDeadCallSite final : AAIsDeadFunction {
4002   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4003       : AAIsDeadFunction(IRP, A) {}
4004 
4005   /// See AbstractAttribute::initialize(...).
4006   void initialize(Attributor &A) override {
4007     // TODO: Once we have call site specific value information we can provide
4008     //       call site specific liveness information and then it makes
4009     //       sense to specialize attributes for call sites instead of
4010     //       redirecting requests to the callee.
4011     llvm_unreachable("Abstract attributes for liveness are not "
4012                      "supported for call sites yet!");
4013   }
4014 
4015   /// See AbstractAttribute::updateImpl(...).
4016   ChangeStatus updateImpl(Attributor &A) override {
4017     return indicatePessimisticFixpoint();
4018   }
4019 
4020   /// See AbstractAttribute::trackStatistics()
4021   void trackStatistics() const override {}
4022 };
4023 
4024 /// -------------------- Dereferenceable Argument Attribute --------------------
4025 
4026 struct AADereferenceableImpl : AADereferenceable {
4027   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4028       : AADereferenceable(IRP, A) {}
4029   using StateType = DerefState;
4030 
4031   /// See AbstractAttribute::initialize(...).
4032   void initialize(Attributor &A) override {
4033     SmallVector<Attribute, 4> Attrs;
4034     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4035              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4036     for (const Attribute &Attr : Attrs)
4037       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4038 
4039     const IRPosition &IRP = this->getIRPosition();
4040     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4041 
4042     bool CanBeNull, CanBeFreed;
4043     takeKnownDerefBytesMaximum(
4044         IRP.getAssociatedValue().getPointerDereferenceableBytes(
4045             A.getDataLayout(), CanBeNull, CanBeFreed));
4046 
4047     bool IsFnInterface = IRP.isFnInterfaceKind();
4048     Function *FnScope = IRP.getAnchorScope();
4049     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4050       indicatePessimisticFixpoint();
4051       return;
4052     }
4053 
4054     if (Instruction *CtxI = getCtxI())
4055       followUsesInMBEC(*this, A, getState(), *CtxI);
4056   }
4057 
4058   /// See AbstractAttribute::getState()
4059   /// {
4060   StateType &getState() override { return *this; }
4061   const StateType &getState() const override { return *this; }
4062   /// }
4063 
4064   /// Helper function for collecting accessed bytes in must-be-executed-context
4065   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4066                               DerefState &State) {
4067     const Value *UseV = U->get();
4068     if (!UseV->getType()->isPointerTy())
4069       return;
4070 
4071     Type *PtrTy = UseV->getType();
4072     const DataLayout &DL = A.getDataLayout();
4073     int64_t Offset;
4074     if (const Value *Base = getBasePointerOfAccessPointerOperand(
4075             I, Offset, DL, /*AllowNonInbounds*/ true)) {
4076       if (Base == &getAssociatedValue() &&
4077           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
4078         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
4079         State.addAccessedBytes(Offset, Size);
4080       }
4081     }
4082   }
4083 
4084   /// See followUsesInMBEC
4085   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4086                        AADereferenceable::StateType &State) {
4087     bool IsNonNull = false;
4088     bool TrackUse = false;
4089     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4090         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4091     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4092                       << " for instruction " << *I << "\n");
4093 
4094     addAccessedBytesForUse(A, U, I, State);
4095     State.takeKnownDerefBytesMaximum(DerefBytes);
4096     return TrackUse;
4097   }
4098 
4099   /// See AbstractAttribute::manifest(...).
4100   ChangeStatus manifest(Attributor &A) override {
4101     ChangeStatus Change = AADereferenceable::manifest(A);
4102     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4103       removeAttrs({Attribute::DereferenceableOrNull});
4104       return ChangeStatus::CHANGED;
4105     }
4106     return Change;
4107   }
4108 
4109   void getDeducedAttributes(LLVMContext &Ctx,
4110                             SmallVectorImpl<Attribute> &Attrs) const override {
4111     // TODO: Add *_globally support
4112     if (isAssumedNonNull())
4113       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4114           Ctx, getAssumedDereferenceableBytes()));
4115     else
4116       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4117           Ctx, getAssumedDereferenceableBytes()));
4118   }
4119 
4120   /// See AbstractAttribute::getAsStr().
4121   const std::string getAsStr() const override {
4122     if (!getAssumedDereferenceableBytes())
4123       return "unknown-dereferenceable";
4124     return std::string("dereferenceable") +
4125            (isAssumedNonNull() ? "" : "_or_null") +
4126            (isAssumedGlobal() ? "_globally" : "") + "<" +
4127            std::to_string(getKnownDereferenceableBytes()) + "-" +
4128            std::to_string(getAssumedDereferenceableBytes()) + ">";
4129   }
4130 };
4131 
4132 /// Dereferenceable attribute for a floating value.
4133 struct AADereferenceableFloating : AADereferenceableImpl {
4134   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4135       : AADereferenceableImpl(IRP, A) {}
4136 
4137   /// See AbstractAttribute::updateImpl(...).
4138   ChangeStatus updateImpl(Attributor &A) override {
4139     const DataLayout &DL = A.getDataLayout();
4140 
4141     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4142                             bool Stripped) -> bool {
4143       unsigned IdxWidth =
4144           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4145       APInt Offset(IdxWidth, 0);
4146       const Value *Base =
4147           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
4148 
4149       const auto &AA = A.getAAFor<AADereferenceable>(
4150           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4151       int64_t DerefBytes = 0;
4152       if (!Stripped && this == &AA) {
4153         // Use IR information if we did not strip anything.
4154         // TODO: track globally.
4155         bool CanBeNull, CanBeFreed;
4156         DerefBytes =
4157             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4158         T.GlobalState.indicatePessimisticFixpoint();
4159       } else {
4160         const DerefState &DS = AA.getState();
4161         DerefBytes = DS.DerefBytesState.getAssumed();
4162         T.GlobalState &= DS.GlobalState;
4163       }
4164 
4165       // For now we do not try to "increase" dereferenceability due to negative
4166       // indices as we first have to come up with code to deal with loops and
4167       // for overflows of the dereferenceable bytes.
4168       int64_t OffsetSExt = Offset.getSExtValue();
4169       if (OffsetSExt < 0)
4170         OffsetSExt = 0;
4171 
4172       T.takeAssumedDerefBytesMinimum(
4173           std::max(int64_t(0), DerefBytes - OffsetSExt));
4174 
4175       if (this == &AA) {
4176         if (!Stripped) {
4177           // If nothing was stripped IR information is all we got.
4178           T.takeKnownDerefBytesMaximum(
4179               std::max(int64_t(0), DerefBytes - OffsetSExt));
4180           T.indicatePessimisticFixpoint();
4181         } else if (OffsetSExt > 0) {
4182           // If something was stripped but there is circular reasoning we look
4183           // for the offset. If it is positive we basically decrease the
4184           // dereferenceable bytes in a circluar loop now, which will simply
4185           // drive them down to the known value in a very slow way which we
4186           // can accelerate.
4187           T.indicatePessimisticFixpoint();
4188         }
4189       }
4190 
4191       return T.isValidState();
4192     };
4193 
4194     DerefState T;
4195     if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4196                                            VisitValueCB, getCtxI()))
4197       return indicatePessimisticFixpoint();
4198 
4199     return clampStateAndIndicateChange(getState(), T);
4200   }
4201 
4202   /// See AbstractAttribute::trackStatistics()
4203   void trackStatistics() const override {
4204     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4205   }
4206 };
4207 
4208 /// Dereferenceable attribute for a return value.
4209 struct AADereferenceableReturned final
4210     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4211   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4212       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4213             IRP, A) {}
4214 
4215   /// See AbstractAttribute::trackStatistics()
4216   void trackStatistics() const override {
4217     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4218   }
4219 };
4220 
4221 /// Dereferenceable attribute for an argument
4222 struct AADereferenceableArgument final
4223     : AAArgumentFromCallSiteArguments<AADereferenceable,
4224                                       AADereferenceableImpl> {
4225   using Base =
4226       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4227   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4228       : Base(IRP, A) {}
4229 
4230   /// See AbstractAttribute::trackStatistics()
4231   void trackStatistics() const override {
4232     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4233   }
4234 };
4235 
4236 /// Dereferenceable attribute for a call site argument.
4237 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4238   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4239       : AADereferenceableFloating(IRP, A) {}
4240 
4241   /// See AbstractAttribute::trackStatistics()
4242   void trackStatistics() const override {
4243     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4244   }
4245 };
4246 
4247 /// Dereferenceable attribute deduction for a call site return value.
4248 struct AADereferenceableCallSiteReturned final
4249     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4250   using Base =
4251       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4252   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4253       : Base(IRP, A) {}
4254 
4255   /// See AbstractAttribute::trackStatistics()
4256   void trackStatistics() const override {
4257     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4258   }
4259 };
4260 
4261 // ------------------------ Align Argument Attribute ------------------------
4262 
4263 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4264                                     Value &AssociatedValue, const Use *U,
4265                                     const Instruction *I, bool &TrackUse) {
4266   // We need to follow common pointer manipulation uses to the accesses they
4267   // feed into.
4268   if (isa<CastInst>(I)) {
4269     // Follow all but ptr2int casts.
4270     TrackUse = !isa<PtrToIntInst>(I);
4271     return 0;
4272   }
4273   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4274     if (GEP->hasAllConstantIndices())
4275       TrackUse = true;
4276     return 0;
4277   }
4278 
4279   MaybeAlign MA;
4280   if (const auto *CB = dyn_cast<CallBase>(I)) {
4281     if (CB->isBundleOperand(U) || CB->isCallee(U))
4282       return 0;
4283 
4284     unsigned ArgNo = CB->getArgOperandNo(U);
4285     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4286     // As long as we only use known information there is no need to track
4287     // dependences here.
4288     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4289     MA = MaybeAlign(AlignAA.getKnownAlign());
4290   }
4291 
4292   const DataLayout &DL = A.getDataLayout();
4293   const Value *UseV = U->get();
4294   if (auto *SI = dyn_cast<StoreInst>(I)) {
4295     if (SI->getPointerOperand() == UseV)
4296       MA = SI->getAlign();
4297   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4298     if (LI->getPointerOperand() == UseV)
4299       MA = LI->getAlign();
4300   }
4301 
4302   if (!MA || *MA <= QueryingAA.getKnownAlign())
4303     return 0;
4304 
4305   unsigned Alignment = MA->value();
4306   int64_t Offset;
4307 
4308   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4309     if (Base == &AssociatedValue) {
4310       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4311       // So we can say that the maximum power of two which is a divisor of
4312       // gcd(Offset, Alignment) is an alignment.
4313 
4314       uint32_t gcd =
4315           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4316       Alignment = llvm::PowerOf2Floor(gcd);
4317     }
4318   }
4319 
4320   return Alignment;
4321 }
4322 
4323 struct AAAlignImpl : AAAlign {
4324   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4325 
4326   /// See AbstractAttribute::initialize(...).
4327   void initialize(Attributor &A) override {
4328     SmallVector<Attribute, 4> Attrs;
4329     getAttrs({Attribute::Alignment}, Attrs);
4330     for (const Attribute &Attr : Attrs)
4331       takeKnownMaximum(Attr.getValueAsInt());
4332 
4333     Value &V = getAssociatedValue();
4334     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
4335     //       use of the function pointer. This was caused by D73131. We want to
4336     //       avoid this for function pointers especially because we iterate
4337     //       their uses and int2ptr is not handled. It is not a correctness
4338     //       problem though!
4339     if (!V.getType()->getPointerElementType()->isFunctionTy())
4340       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4341 
4342     if (getIRPosition().isFnInterfaceKind() &&
4343         (!getAnchorScope() ||
4344          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4345       indicatePessimisticFixpoint();
4346       return;
4347     }
4348 
4349     if (Instruction *CtxI = getCtxI())
4350       followUsesInMBEC(*this, A, getState(), *CtxI);
4351   }
4352 
4353   /// See AbstractAttribute::manifest(...).
4354   ChangeStatus manifest(Attributor &A) override {
4355     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4356 
4357     // Check for users that allow alignment annotations.
4358     Value &AssociatedValue = getAssociatedValue();
4359     for (const Use &U : AssociatedValue.uses()) {
4360       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4361         if (SI->getPointerOperand() == &AssociatedValue)
4362           if (SI->getAlignment() < getAssumedAlign()) {
4363             STATS_DECLTRACK(AAAlign, Store,
4364                             "Number of times alignment added to a store");
4365             SI->setAlignment(Align(getAssumedAlign()));
4366             LoadStoreChanged = ChangeStatus::CHANGED;
4367           }
4368       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4369         if (LI->getPointerOperand() == &AssociatedValue)
4370           if (LI->getAlignment() < getAssumedAlign()) {
4371             LI->setAlignment(Align(getAssumedAlign()));
4372             STATS_DECLTRACK(AAAlign, Load,
4373                             "Number of times alignment added to a load");
4374             LoadStoreChanged = ChangeStatus::CHANGED;
4375           }
4376       }
4377     }
4378 
4379     ChangeStatus Changed = AAAlign::manifest(A);
4380 
4381     Align InheritAlign =
4382         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4383     if (InheritAlign >= getAssumedAlign())
4384       return LoadStoreChanged;
4385     return Changed | LoadStoreChanged;
4386   }
4387 
4388   // TODO: Provide a helper to determine the implied ABI alignment and check in
4389   //       the existing manifest method and a new one for AAAlignImpl that value
4390   //       to avoid making the alignment explicit if it did not improve.
4391 
4392   /// See AbstractAttribute::getDeducedAttributes
4393   virtual void
4394   getDeducedAttributes(LLVMContext &Ctx,
4395                        SmallVectorImpl<Attribute> &Attrs) const override {
4396     if (getAssumedAlign() > 1)
4397       Attrs.emplace_back(
4398           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4399   }
4400 
4401   /// See followUsesInMBEC
4402   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4403                        AAAlign::StateType &State) {
4404     bool TrackUse = false;
4405 
4406     unsigned int KnownAlign =
4407         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4408     State.takeKnownMaximum(KnownAlign);
4409 
4410     return TrackUse;
4411   }
4412 
4413   /// See AbstractAttribute::getAsStr().
4414   const std::string getAsStr() const override {
4415     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4416                                 "-" + std::to_string(getAssumedAlign()) + ">")
4417                              : "unknown-align";
4418   }
4419 };
4420 
4421 /// Align attribute for a floating value.
4422 struct AAAlignFloating : AAAlignImpl {
4423   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4424 
4425   /// See AbstractAttribute::updateImpl(...).
4426   ChangeStatus updateImpl(Attributor &A) override {
4427     const DataLayout &DL = A.getDataLayout();
4428 
4429     auto VisitValueCB = [&](Value &V, const Instruction *,
4430                             AAAlign::StateType &T, bool Stripped) -> bool {
4431       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4432                                            DepClassTy::REQUIRED);
4433       if (!Stripped && this == &AA) {
4434         int64_t Offset;
4435         unsigned Alignment = 1;
4436         if (const Value *Base =
4437                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4438           Align PA = Base->getPointerAlignment(DL);
4439           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4440           // So we can say that the maximum power of two which is a divisor of
4441           // gcd(Offset, Alignment) is an alignment.
4442 
4443           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4444                                                uint32_t(PA.value()));
4445           Alignment = llvm::PowerOf2Floor(gcd);
4446         } else {
4447           Alignment = V.getPointerAlignment(DL).value();
4448         }
4449         // Use only IR information if we did not strip anything.
4450         T.takeKnownMaximum(Alignment);
4451         T.indicatePessimisticFixpoint();
4452       } else {
4453         // Use abstract attribute information.
4454         const AAAlign::StateType &DS = AA.getState();
4455         T ^= DS;
4456       }
4457       return T.isValidState();
4458     };
4459 
4460     StateType T;
4461     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4462                                           VisitValueCB, getCtxI()))
4463       return indicatePessimisticFixpoint();
4464 
4465     // TODO: If we know we visited all incoming values, thus no are assumed
4466     // dead, we can take the known information from the state T.
4467     return clampStateAndIndicateChange(getState(), T);
4468   }
4469 
4470   /// See AbstractAttribute::trackStatistics()
4471   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4472 };
4473 
4474 /// Align attribute for function return value.
4475 struct AAAlignReturned final
4476     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4477   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4478   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4479 
4480   /// See AbstractAttribute::initialize(...).
4481   void initialize(Attributor &A) override {
4482     Base::initialize(A);
4483     Function *F = getAssociatedFunction();
4484     if (!F || F->isDeclaration())
4485       indicatePessimisticFixpoint();
4486   }
4487 
4488   /// See AbstractAttribute::trackStatistics()
4489   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4490 };
4491 
4492 /// Align attribute for function argument.
4493 struct AAAlignArgument final
4494     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4495   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4496   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4497 
4498   /// See AbstractAttribute::manifest(...).
4499   ChangeStatus manifest(Attributor &A) override {
4500     // If the associated argument is involved in a must-tail call we give up
4501     // because we would need to keep the argument alignments of caller and
4502     // callee in-sync. Just does not seem worth the trouble right now.
4503     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4504       return ChangeStatus::UNCHANGED;
4505     return Base::manifest(A);
4506   }
4507 
4508   /// See AbstractAttribute::trackStatistics()
4509   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4510 };
4511 
4512 struct AAAlignCallSiteArgument final : AAAlignFloating {
4513   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4514       : AAAlignFloating(IRP, A) {}
4515 
4516   /// See AbstractAttribute::manifest(...).
4517   ChangeStatus manifest(Attributor &A) override {
4518     // If the associated argument is involved in a must-tail call we give up
4519     // because we would need to keep the argument alignments of caller and
4520     // callee in-sync. Just does not seem worth the trouble right now.
4521     if (Argument *Arg = getAssociatedArgument())
4522       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4523         return ChangeStatus::UNCHANGED;
4524     ChangeStatus Changed = AAAlignImpl::manifest(A);
4525     Align InheritAlign =
4526         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4527     if (InheritAlign >= getAssumedAlign())
4528       Changed = ChangeStatus::UNCHANGED;
4529     return Changed;
4530   }
4531 
4532   /// See AbstractAttribute::updateImpl(Attributor &A).
4533   ChangeStatus updateImpl(Attributor &A) override {
4534     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4535     if (Argument *Arg = getAssociatedArgument()) {
4536       // We only take known information from the argument
4537       // so we do not need to track a dependence.
4538       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4539           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4540       takeKnownMaximum(ArgAlignAA.getKnownAlign());
4541     }
4542     return Changed;
4543   }
4544 
4545   /// See AbstractAttribute::trackStatistics()
4546   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4547 };
4548 
4549 /// Align attribute deduction for a call site return value.
4550 struct AAAlignCallSiteReturned final
4551     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4552   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4553   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4554       : Base(IRP, A) {}
4555 
4556   /// See AbstractAttribute::initialize(...).
4557   void initialize(Attributor &A) override {
4558     Base::initialize(A);
4559     Function *F = getAssociatedFunction();
4560     if (!F || F->isDeclaration())
4561       indicatePessimisticFixpoint();
4562   }
4563 
4564   /// See AbstractAttribute::trackStatistics()
4565   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4566 };
4567 
4568 /// ------------------ Function No-Return Attribute ----------------------------
4569 struct AANoReturnImpl : public AANoReturn {
4570   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4571 
4572   /// See AbstractAttribute::initialize(...).
4573   void initialize(Attributor &A) override {
4574     AANoReturn::initialize(A);
4575     Function *F = getAssociatedFunction();
4576     if (!F || F->isDeclaration())
4577       indicatePessimisticFixpoint();
4578   }
4579 
4580   /// See AbstractAttribute::getAsStr().
4581   const std::string getAsStr() const override {
4582     return getAssumed() ? "noreturn" : "may-return";
4583   }
4584 
4585   /// See AbstractAttribute::updateImpl(Attributor &A).
4586   virtual ChangeStatus updateImpl(Attributor &A) override {
4587     auto CheckForNoReturn = [](Instruction &) { return false; };
4588     bool UsedAssumedInformation = false;
4589     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4590                                    {(unsigned)Instruction::Ret},
4591                                    UsedAssumedInformation))
4592       return indicatePessimisticFixpoint();
4593     return ChangeStatus::UNCHANGED;
4594   }
4595 };
4596 
4597 struct AANoReturnFunction final : AANoReturnImpl {
4598   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4599       : AANoReturnImpl(IRP, A) {}
4600 
4601   /// See AbstractAttribute::trackStatistics()
4602   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4603 };
4604 
4605 /// NoReturn attribute deduction for a call sites.
4606 struct AANoReturnCallSite final : AANoReturnImpl {
4607   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4608       : AANoReturnImpl(IRP, A) {}
4609 
4610   /// See AbstractAttribute::initialize(...).
4611   void initialize(Attributor &A) override {
4612     AANoReturnImpl::initialize(A);
4613     if (Function *F = getAssociatedFunction()) {
4614       const IRPosition &FnPos = IRPosition::function(*F);
4615       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4616       if (!FnAA.isAssumedNoReturn())
4617         indicatePessimisticFixpoint();
4618     }
4619   }
4620 
4621   /// See AbstractAttribute::updateImpl(...).
4622   ChangeStatus updateImpl(Attributor &A) override {
4623     // TODO: Once we have call site specific value information we can provide
4624     //       call site specific liveness information and then it makes
4625     //       sense to specialize attributes for call sites arguments instead of
4626     //       redirecting requests to the callee argument.
4627     Function *F = getAssociatedFunction();
4628     const IRPosition &FnPos = IRPosition::function(*F);
4629     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4630     return clampStateAndIndicateChange(getState(), FnAA.getState());
4631   }
4632 
4633   /// See AbstractAttribute::trackStatistics()
4634   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4635 };
4636 
4637 /// ----------------------- Variable Capturing ---------------------------------
4638 
4639 /// A class to hold the state of for no-capture attributes.
4640 struct AANoCaptureImpl : public AANoCapture {
4641   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4642 
4643   /// See AbstractAttribute::initialize(...).
4644   void initialize(Attributor &A) override {
4645     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4646       indicateOptimisticFixpoint();
4647       return;
4648     }
4649     Function *AnchorScope = getAnchorScope();
4650     if (isFnInterfaceKind() &&
4651         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4652       indicatePessimisticFixpoint();
4653       return;
4654     }
4655 
4656     // You cannot "capture" null in the default address space.
4657     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4658         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4659       indicateOptimisticFixpoint();
4660       return;
4661     }
4662 
4663     const Function *F =
4664         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4665 
4666     // Check what state the associated function can actually capture.
4667     if (F)
4668       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4669     else
4670       indicatePessimisticFixpoint();
4671   }
4672 
4673   /// See AbstractAttribute::updateImpl(...).
4674   ChangeStatus updateImpl(Attributor &A) override;
4675 
4676   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4677   virtual void
4678   getDeducedAttributes(LLVMContext &Ctx,
4679                        SmallVectorImpl<Attribute> &Attrs) const override {
4680     if (!isAssumedNoCaptureMaybeReturned())
4681       return;
4682 
4683     if (isArgumentPosition()) {
4684       if (isAssumedNoCapture())
4685         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4686       else if (ManifestInternal)
4687         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4688     }
4689   }
4690 
4691   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4692   /// depending on the ability of the function associated with \p IRP to capture
4693   /// state in memory and through "returning/throwing", respectively.
4694   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4695                                                    const Function &F,
4696                                                    BitIntegerState &State) {
4697     // TODO: Once we have memory behavior attributes we should use them here.
4698 
4699     // If we know we cannot communicate or write to memory, we do not care about
4700     // ptr2int anymore.
4701     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4702         F.getReturnType()->isVoidTy()) {
4703       State.addKnownBits(NO_CAPTURE);
4704       return;
4705     }
4706 
4707     // A function cannot capture state in memory if it only reads memory, it can
4708     // however return/throw state and the state might be influenced by the
4709     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4710     if (F.onlyReadsMemory())
4711       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4712 
4713     // A function cannot communicate state back if it does not through
4714     // exceptions and doesn not return values.
4715     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4716       State.addKnownBits(NOT_CAPTURED_IN_RET);
4717 
4718     // Check existing "returned" attributes.
4719     int ArgNo = IRP.getCalleeArgNo();
4720     if (F.doesNotThrow() && ArgNo >= 0) {
4721       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4722         if (F.hasParamAttribute(u, Attribute::Returned)) {
4723           if (u == unsigned(ArgNo))
4724             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4725           else if (F.onlyReadsMemory())
4726             State.addKnownBits(NO_CAPTURE);
4727           else
4728             State.addKnownBits(NOT_CAPTURED_IN_RET);
4729           break;
4730         }
4731     }
4732   }
4733 
4734   /// See AbstractState::getAsStr().
4735   const std::string getAsStr() const override {
4736     if (isKnownNoCapture())
4737       return "known not-captured";
4738     if (isAssumedNoCapture())
4739       return "assumed not-captured";
4740     if (isKnownNoCaptureMaybeReturned())
4741       return "known not-captured-maybe-returned";
4742     if (isAssumedNoCaptureMaybeReturned())
4743       return "assumed not-captured-maybe-returned";
4744     return "assumed-captured";
4745   }
4746 };
4747 
4748 /// Attributor-aware capture tracker.
4749 struct AACaptureUseTracker final : public CaptureTracker {
4750 
4751   /// Create a capture tracker that can lookup in-flight abstract attributes
4752   /// through the Attributor \p A.
4753   ///
4754   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4755   /// search is stopped. If a use leads to a return instruction,
4756   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4757   /// If a use leads to a ptr2int which may capture the value,
4758   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4759   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4760   /// set. All values in \p PotentialCopies are later tracked as well. For every
4761   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4762   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4763   /// conservatively set to true.
4764   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4765                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4766                       SmallSetVector<Value *, 4> &PotentialCopies,
4767                       unsigned &RemainingUsesToExplore)
4768       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4769         PotentialCopies(PotentialCopies),
4770         RemainingUsesToExplore(RemainingUsesToExplore) {}
4771 
4772   /// Determine if \p V maybe captured. *Also updates the state!*
4773   bool valueMayBeCaptured(const Value *V) {
4774     if (V->getType()->isPointerTy()) {
4775       PointerMayBeCaptured(V, this);
4776     } else {
4777       State.indicatePessimisticFixpoint();
4778     }
4779     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4780   }
4781 
4782   /// See CaptureTracker::tooManyUses().
4783   void tooManyUses() override {
4784     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4785   }
4786 
4787   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4788     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4789       return true;
4790     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4791         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4792     return DerefAA.getAssumedDereferenceableBytes();
4793   }
4794 
4795   /// See CaptureTracker::captured(...).
4796   bool captured(const Use *U) override {
4797     Instruction *UInst = cast<Instruction>(U->getUser());
4798     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4799                       << "\n");
4800 
4801     // Because we may reuse the tracker multiple times we keep track of the
4802     // number of explored uses ourselves as well.
4803     if (RemainingUsesToExplore-- == 0) {
4804       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4805       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4806                           /* Return */ true);
4807     }
4808 
4809     // Deal with ptr2int by following uses.
4810     if (isa<PtrToIntInst>(UInst)) {
4811       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4812       return valueMayBeCaptured(UInst);
4813     }
4814 
4815     // For stores we check if we can follow the value through memory or not.
4816     if (auto *SI = dyn_cast<StoreInst>(UInst)) {
4817       if (SI->isVolatile())
4818         return isCapturedIn(/* Memory */ true, /* Integer */ false,
4819                             /* Return */ false);
4820       bool UsedAssumedInformation = false;
4821       if (!AA::getPotentialCopiesOfStoredValue(
4822               A, *SI, PotentialCopies, NoCaptureAA, UsedAssumedInformation))
4823         return isCapturedIn(/* Memory */ true, /* Integer */ false,
4824                             /* Return */ false);
4825       // Not captured directly, potential copies will be checked.
4826       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4827                           /* Return */ false);
4828     }
4829 
4830     // Explicitly catch return instructions.
4831     if (isa<ReturnInst>(UInst)) {
4832       if (UInst->getFunction() == NoCaptureAA.getAnchorScope())
4833         return isCapturedIn(/* Memory */ false, /* Integer */ false,
4834                             /* Return */ true);
4835       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4836                           /* Return */ true);
4837     }
4838 
4839     // For now we only use special logic for call sites. However, the tracker
4840     // itself knows about a lot of other non-capturing cases already.
4841     auto *CB = dyn_cast<CallBase>(UInst);
4842     if (!CB || !CB->isArgOperand(U))
4843       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4844                           /* Return */ true);
4845 
4846     unsigned ArgNo = CB->getArgOperandNo(U);
4847     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4848     // If we have a abstract no-capture attribute for the argument we can use
4849     // it to justify a non-capture attribute here. This allows recursion!
4850     auto &ArgNoCaptureAA =
4851         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4852     if (ArgNoCaptureAA.isAssumedNoCapture())
4853       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4854                           /* Return */ false);
4855     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4856       addPotentialCopy(*CB);
4857       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4858                           /* Return */ false);
4859     }
4860 
4861     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4862     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4863                         /* Return */ true);
4864   }
4865 
4866   /// Register \p CS as potential copy of the value we are checking.
4867   void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); }
4868 
4869   /// See CaptureTracker::shouldExplore(...).
4870   bool shouldExplore(const Use *U) override {
4871     // Check liveness and ignore droppable users.
4872     bool UsedAssumedInformation = false;
4873     return !U->getUser()->isDroppable() &&
4874            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA,
4875                             UsedAssumedInformation);
4876   }
4877 
4878   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4879   /// \p CapturedInRet, then return the appropriate value for use in the
4880   /// CaptureTracker::captured() interface.
4881   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4882                     bool CapturedInRet) {
4883     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4884                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4885     if (CapturedInMem)
4886       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4887     if (CapturedInInt)
4888       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4889     if (CapturedInRet)
4890       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4891     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4892   }
4893 
4894 private:
4895   /// The attributor providing in-flight abstract attributes.
4896   Attributor &A;
4897 
4898   /// The abstract attribute currently updated.
4899   AANoCapture &NoCaptureAA;
4900 
4901   /// The abstract liveness state.
4902   const AAIsDead &IsDeadAA;
4903 
4904   /// The state currently updated.
4905   AANoCapture::StateType &State;
4906 
4907   /// Set of potential copies of the tracked value.
4908   SmallSetVector<Value *, 4> &PotentialCopies;
4909 
4910   /// Global counter to limit the number of explored uses.
4911   unsigned &RemainingUsesToExplore;
4912 };
4913 
4914 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4915   const IRPosition &IRP = getIRPosition();
4916   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4917                                   : &IRP.getAssociatedValue();
4918   if (!V)
4919     return indicatePessimisticFixpoint();
4920 
4921   const Function *F =
4922       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4923   assert(F && "Expected a function!");
4924   const IRPosition &FnPos = IRPosition::function(*F);
4925   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4926 
4927   AANoCapture::StateType T;
4928 
4929   // Readonly means we cannot capture through memory.
4930   const auto &FnMemAA =
4931       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4932   if (FnMemAA.isAssumedReadOnly()) {
4933     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4934     if (FnMemAA.isKnownReadOnly())
4935       addKnownBits(NOT_CAPTURED_IN_MEM);
4936     else
4937       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4938   }
4939 
4940   // Make sure all returned values are different than the underlying value.
4941   // TODO: we could do this in a more sophisticated way inside
4942   //       AAReturnedValues, e.g., track all values that escape through returns
4943   //       directly somehow.
4944   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4945     bool SeenConstant = false;
4946     for (auto &It : RVAA.returned_values()) {
4947       if (isa<Constant>(It.first)) {
4948         if (SeenConstant)
4949           return false;
4950         SeenConstant = true;
4951       } else if (!isa<Argument>(It.first) ||
4952                  It.first == getAssociatedArgument())
4953         return false;
4954     }
4955     return true;
4956   };
4957 
4958   const auto &NoUnwindAA =
4959       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4960   if (NoUnwindAA.isAssumedNoUnwind()) {
4961     bool IsVoidTy = F->getReturnType()->isVoidTy();
4962     const AAReturnedValues *RVAA =
4963         IsVoidTy ? nullptr
4964                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4965 
4966                                                  DepClassTy::OPTIONAL);
4967     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4968       T.addKnownBits(NOT_CAPTURED_IN_RET);
4969       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4970         return ChangeStatus::UNCHANGED;
4971       if (NoUnwindAA.isKnownNoUnwind() &&
4972           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4973         addKnownBits(NOT_CAPTURED_IN_RET);
4974         if (isKnown(NOT_CAPTURED_IN_MEM))
4975           return indicateOptimisticFixpoint();
4976       }
4977     }
4978   }
4979 
4980   // Use the CaptureTracker interface and logic with the specialized tracker,
4981   // defined in AACaptureUseTracker, that can look at in-flight abstract
4982   // attributes and directly updates the assumed state.
4983   SmallSetVector<Value *, 4> PotentialCopies;
4984   unsigned RemainingUsesToExplore =
4985       getDefaultMaxUsesToExploreForCaptureTracking();
4986   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4987                               RemainingUsesToExplore);
4988 
4989   // Check all potential copies of the associated value until we can assume
4990   // none will be captured or we have to assume at least one might be.
4991   unsigned Idx = 0;
4992   PotentialCopies.insert(V);
4993   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4994     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4995 
4996   AANoCapture::StateType &S = getState();
4997   auto Assumed = S.getAssumed();
4998   S.intersectAssumedBits(T.getAssumed());
4999   if (!isAssumedNoCaptureMaybeReturned())
5000     return indicatePessimisticFixpoint();
5001   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5002                                    : ChangeStatus::CHANGED;
5003 }
5004 
5005 /// NoCapture attribute for function arguments.
5006 struct AANoCaptureArgument final : AANoCaptureImpl {
5007   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5008       : AANoCaptureImpl(IRP, A) {}
5009 
5010   /// See AbstractAttribute::trackStatistics()
5011   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5012 };
5013 
5014 /// NoCapture attribute for call site arguments.
5015 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5016   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5017       : AANoCaptureImpl(IRP, A) {}
5018 
5019   /// See AbstractAttribute::initialize(...).
5020   void initialize(Attributor &A) override {
5021     if (Argument *Arg = getAssociatedArgument())
5022       if (Arg->hasByValAttr())
5023         indicateOptimisticFixpoint();
5024     AANoCaptureImpl::initialize(A);
5025   }
5026 
5027   /// See AbstractAttribute::updateImpl(...).
5028   ChangeStatus updateImpl(Attributor &A) override {
5029     // TODO: Once we have call site specific value information we can provide
5030     //       call site specific liveness information and then it makes
5031     //       sense to specialize attributes for call sites arguments instead of
5032     //       redirecting requests to the callee argument.
5033     Argument *Arg = getAssociatedArgument();
5034     if (!Arg)
5035       return indicatePessimisticFixpoint();
5036     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5037     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5038     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5039   }
5040 
5041   /// See AbstractAttribute::trackStatistics()
5042   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5043 };
5044 
5045 /// NoCapture attribute for floating values.
5046 struct AANoCaptureFloating final : AANoCaptureImpl {
5047   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5048       : AANoCaptureImpl(IRP, A) {}
5049 
5050   /// See AbstractAttribute::trackStatistics()
5051   void trackStatistics() const override {
5052     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5053   }
5054 };
5055 
5056 /// NoCapture attribute for function return value.
5057 struct AANoCaptureReturned final : AANoCaptureImpl {
5058   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5059       : AANoCaptureImpl(IRP, A) {
5060     llvm_unreachable("NoCapture is not applicable to function returns!");
5061   }
5062 
5063   /// See AbstractAttribute::initialize(...).
5064   void initialize(Attributor &A) override {
5065     llvm_unreachable("NoCapture is not applicable to function returns!");
5066   }
5067 
5068   /// See AbstractAttribute::updateImpl(...).
5069   ChangeStatus updateImpl(Attributor &A) override {
5070     llvm_unreachable("NoCapture is not applicable to function returns!");
5071   }
5072 
5073   /// See AbstractAttribute::trackStatistics()
5074   void trackStatistics() const override {}
5075 };
5076 
5077 /// NoCapture attribute deduction for a call site return value.
5078 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5079   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5080       : AANoCaptureImpl(IRP, A) {}
5081 
5082   /// See AbstractAttribute::initialize(...).
5083   void initialize(Attributor &A) override {
5084     const Function *F = getAnchorScope();
5085     // Check what state the associated function can actually capture.
5086     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5087   }
5088 
5089   /// See AbstractAttribute::trackStatistics()
5090   void trackStatistics() const override {
5091     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5092   }
5093 };
5094 
5095 /// ------------------ Value Simplify Attribute ----------------------------
5096 
5097 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5098   // FIXME: Add a typecast support.
5099   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5100       SimplifiedAssociatedValue, Other, Ty);
5101   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5102     return false;
5103 
5104   LLVM_DEBUG({
5105     if (SimplifiedAssociatedValue.hasValue())
5106       dbgs() << "[ValueSimplify] is assumed to be "
5107              << **SimplifiedAssociatedValue << "\n";
5108     else
5109       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5110   });
5111   return true;
5112 }
5113 
5114 struct AAValueSimplifyImpl : AAValueSimplify {
5115   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5116       : AAValueSimplify(IRP, A) {}
5117 
5118   /// See AbstractAttribute::initialize(...).
5119   void initialize(Attributor &A) override {
5120     if (getAssociatedValue().getType()->isVoidTy())
5121       indicatePessimisticFixpoint();
5122     if (A.hasSimplificationCallback(getIRPosition()))
5123       indicatePessimisticFixpoint();
5124   }
5125 
5126   /// See AbstractAttribute::getAsStr().
5127   const std::string getAsStr() const override {
5128     LLVM_DEBUG({
5129       errs() << "SAV: " << SimplifiedAssociatedValue << " ";
5130       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5131         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5132     });
5133     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5134                           : "not-simple";
5135   }
5136 
5137   /// See AbstractAttribute::trackStatistics()
5138   void trackStatistics() const override {}
5139 
5140   /// See AAValueSimplify::getAssumedSimplifiedValue()
5141   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5142     return SimplifiedAssociatedValue;
5143   }
5144 
5145   /// Return a value we can use as replacement for the associated one, or
5146   /// nullptr if we don't have one that makes sense.
5147   Value *getReplacementValue(Attributor &A) const {
5148     Value *NewV;
5149     NewV = SimplifiedAssociatedValue.hasValue()
5150                ? SimplifiedAssociatedValue.getValue()
5151                : UndefValue::get(getAssociatedType());
5152     if (!NewV)
5153       return nullptr;
5154     NewV = AA::getWithType(*NewV, *getAssociatedType());
5155     if (!NewV || NewV == &getAssociatedValue())
5156       return nullptr;
5157     const Instruction *CtxI = getCtxI();
5158     if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache()))
5159       return nullptr;
5160     if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope()))
5161       return nullptr;
5162     return NewV;
5163   }
5164 
5165   /// Helper function for querying AAValueSimplify and updating candicate.
5166   /// \param IRP The value position we are trying to unify with SimplifiedValue
5167   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5168                       const IRPosition &IRP, bool Simplify = true) {
5169     bool UsedAssumedInformation = false;
5170     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5171     if (Simplify)
5172       QueryingValueSimplified =
5173           A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5174     return unionAssumed(QueryingValueSimplified);
5175   }
5176 
5177   /// Returns a candidate is found or not
5178   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5179     if (!getAssociatedValue().getType()->isIntegerTy())
5180       return false;
5181 
5182     // This will also pass the call base context.
5183     const auto &AA =
5184         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5185 
5186     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
5187 
5188     if (!COpt.hasValue()) {
5189       SimplifiedAssociatedValue = llvm::None;
5190       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5191       return true;
5192     }
5193     if (auto *C = COpt.getValue()) {
5194       SimplifiedAssociatedValue = C;
5195       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5196       return true;
5197     }
5198     return false;
5199   }
5200 
5201   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5202     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5203       return true;
5204     if (askSimplifiedValueFor<AAPotentialValues>(A))
5205       return true;
5206     return false;
5207   }
5208 
5209   /// See AbstractAttribute::manifest(...).
5210   ChangeStatus manifest(Attributor &A) override {
5211     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5212     if (getAssociatedValue().user_empty())
5213       return Changed;
5214 
5215     if (auto *NewV = getReplacementValue(A)) {
5216       LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> "
5217                         << *NewV << " :: " << *this << "\n");
5218       if (A.changeValueAfterManifest(getAssociatedValue(), *NewV))
5219         Changed = ChangeStatus::CHANGED;
5220     }
5221 
5222     return Changed | AAValueSimplify::manifest(A);
5223   }
5224 
5225   /// See AbstractState::indicatePessimisticFixpoint(...).
5226   ChangeStatus indicatePessimisticFixpoint() override {
5227     SimplifiedAssociatedValue = &getAssociatedValue();
5228     return AAValueSimplify::indicatePessimisticFixpoint();
5229   }
5230 
5231   static bool handleLoad(Attributor &A, const AbstractAttribute &AA,
5232                          LoadInst &L, function_ref<bool(Value &)> Union) {
5233     auto UnionWrapper = [&](Value &V, Value &Obj) {
5234       if (isa<AllocaInst>(Obj))
5235         return Union(V);
5236       if (!AA::isDynamicallyUnique(A, AA, V))
5237         return false;
5238       if (!AA::isValidAtPosition(V, L, A.getInfoCache()))
5239         return false;
5240       return Union(V);
5241     };
5242 
5243     Value &Ptr = *L.getPointerOperand();
5244     SmallVector<Value *, 8> Objects;
5245     if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L))
5246       return false;
5247 
5248     for (Value *Obj : Objects) {
5249       LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n");
5250       if (isa<UndefValue>(Obj))
5251         continue;
5252       if (isa<ConstantPointerNull>(Obj)) {
5253         // A null pointer access can be undefined but any offset from null may
5254         // be OK. We do not try to optimize the latter.
5255         bool UsedAssumedInformation = false;
5256         if (!NullPointerIsDefined(L.getFunction(),
5257                                   Ptr.getType()->getPointerAddressSpace()) &&
5258             A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj)
5259           continue;
5260         return false;
5261       }
5262       if (!isa<AllocaInst>(Obj) && !isa<GlobalVariable>(Obj))
5263         return false;
5264       Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType());
5265       if (!InitialVal || !Union(*InitialVal))
5266         return false;
5267 
5268       LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store "
5269                            "propagation, checking accesses next.\n");
5270 
5271       auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
5272         LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n");
5273         if (!Acc.isWrite())
5274           return true;
5275         if (Acc.isWrittenValueYetUndetermined())
5276           return true;
5277         Value *Content = Acc.getWrittenValue();
5278         if (!Content)
5279           return false;
5280         Value *CastedContent =
5281             AA::getWithType(*Content, *AA.getAssociatedType());
5282         if (!CastedContent)
5283           return false;
5284         if (IsExact)
5285           return UnionWrapper(*CastedContent, *Obj);
5286         if (auto *C = dyn_cast<Constant>(CastedContent))
5287           if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C))
5288             return UnionWrapper(*CastedContent, *Obj);
5289         return false;
5290       };
5291 
5292       auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj),
5293                                            DepClassTy::REQUIRED);
5294       if (!PI.forallInterferingAccesses(L, CheckAccess))
5295         return false;
5296     }
5297     return true;
5298   }
5299 };
5300 
5301 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5302   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5303       : AAValueSimplifyImpl(IRP, A) {}
5304 
5305   void initialize(Attributor &A) override {
5306     AAValueSimplifyImpl::initialize(A);
5307     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5308       indicatePessimisticFixpoint();
5309     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5310                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5311                 /* IgnoreSubsumingPositions */ true))
5312       indicatePessimisticFixpoint();
5313 
5314     // FIXME: This is a hack to prevent us from propagating function poiner in
5315     // the new pass manager CGSCC pass as it creates call edges the
5316     // CallGraphUpdater cannot handle yet.
5317     Value &V = getAssociatedValue();
5318     if (V.getType()->isPointerTy() &&
5319         V.getType()->getPointerElementType()->isFunctionTy() &&
5320         !A.isModulePass())
5321       indicatePessimisticFixpoint();
5322   }
5323 
5324   /// See AbstractAttribute::updateImpl(...).
5325   ChangeStatus updateImpl(Attributor &A) override {
5326     // Byval is only replacable if it is readonly otherwise we would write into
5327     // the replaced value and not the copy that byval creates implicitly.
5328     Argument *Arg = getAssociatedArgument();
5329     if (Arg->hasByValAttr()) {
5330       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5331       //       there is no race by not copying a constant byval.
5332       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
5333                                                        DepClassTy::REQUIRED);
5334       if (!MemAA.isAssumedReadOnly())
5335         return indicatePessimisticFixpoint();
5336     }
5337 
5338     auto Before = SimplifiedAssociatedValue;
5339 
5340     auto PredForCallSite = [&](AbstractCallSite ACS) {
5341       const IRPosition &ACSArgPos =
5342           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5343       // Check if a coresponding argument was found or if it is on not
5344       // associated (which can happen for callback calls).
5345       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5346         return false;
5347 
5348       // Simplify the argument operand explicitly and check if the result is
5349       // valid in the current scope. This avoids refering to simplified values
5350       // in other functions, e.g., we don't want to say a an argument in a
5351       // static function is actually an argument in a different function.
5352       bool UsedAssumedInformation = false;
5353       Optional<Constant *> SimpleArgOp =
5354           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5355       if (!SimpleArgOp.hasValue())
5356         return true;
5357       if (!SimpleArgOp.getValue())
5358         return false;
5359       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5360         return false;
5361       return unionAssumed(*SimpleArgOp);
5362     };
5363 
5364     // Generate a answer specific to a call site context.
5365     bool Success;
5366     bool AllCallSitesKnown;
5367     if (hasCallBaseContext() &&
5368         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5369       Success = PredForCallSite(
5370           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5371     else
5372       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5373                                        AllCallSitesKnown);
5374 
5375     if (!Success)
5376       if (!askSimplifiedValueForOtherAAs(A))
5377         return indicatePessimisticFixpoint();
5378 
5379     // If a candicate was found in this update, return CHANGED.
5380     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5381                                                : ChangeStatus ::CHANGED;
5382   }
5383 
5384   /// See AbstractAttribute::trackStatistics()
5385   void trackStatistics() const override {
5386     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5387   }
5388 };
5389 
5390 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5391   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5392       : AAValueSimplifyImpl(IRP, A) {}
5393 
5394   /// See AAValueSimplify::getAssumedSimplifiedValue()
5395   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5396     if (!isValidState())
5397       return nullptr;
5398     return SimplifiedAssociatedValue;
5399   }
5400 
5401   /// See AbstractAttribute::updateImpl(...).
5402   ChangeStatus updateImpl(Attributor &A) override {
5403     auto Before = SimplifiedAssociatedValue;
5404 
5405     auto PredForReturned = [&](Value &V) {
5406       return checkAndUpdate(A, *this,
5407                             IRPosition::value(V, getCallBaseContext()));
5408     };
5409 
5410     if (!A.checkForAllReturnedValues(PredForReturned, *this))
5411       if (!askSimplifiedValueForOtherAAs(A))
5412         return indicatePessimisticFixpoint();
5413 
5414     // If a candicate was found in this update, return CHANGED.
5415     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5416                                                : ChangeStatus ::CHANGED;
5417   }
5418 
5419   ChangeStatus manifest(Attributor &A) override {
5420     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5421 
5422     if (auto *NewV = getReplacementValue(A)) {
5423       auto PredForReturned =
5424           [&](Value &, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5425             for (ReturnInst *RI : RetInsts) {
5426               Value *ReturnedVal = RI->getReturnValue();
5427               if (ReturnedVal == NewV || isa<UndefValue>(ReturnedVal))
5428                 return true;
5429               assert(RI->getFunction() == getAnchorScope() &&
5430                      "ReturnInst in wrong function!");
5431               LLVM_DEBUG(dbgs()
5432                          << "[ValueSimplify] " << *ReturnedVal << " -> "
5433                          << *NewV << " in " << *RI << " :: " << *this << "\n");
5434               if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV))
5435                 Changed = ChangeStatus::CHANGED;
5436             }
5437             return true;
5438           };
5439       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
5440     }
5441 
5442     return Changed | AAValueSimplify::manifest(A);
5443   }
5444 
5445   /// See AbstractAttribute::trackStatistics()
5446   void trackStatistics() const override {
5447     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5448   }
5449 };
5450 
5451 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5452   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5453       : AAValueSimplifyImpl(IRP, A) {}
5454 
5455   /// See AbstractAttribute::initialize(...).
5456   void initialize(Attributor &A) override {
5457     AAValueSimplifyImpl::initialize(A);
5458     Value &V = getAnchorValue();
5459 
5460     // TODO: add other stuffs
5461     if (isa<Constant>(V))
5462       indicatePessimisticFixpoint();
5463   }
5464 
5465   /// Check if \p Cmp is a comparison we can simplify.
5466   ///
5467   /// We handle multiple cases, one in which at least one operand is an
5468   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5469   /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5470   /// will be updated.
5471   bool handleCmp(Attributor &A, CmpInst &Cmp) {
5472     auto Union = [&](Value &V) {
5473       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5474           SimplifiedAssociatedValue, &V, V.getType());
5475       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5476     };
5477 
5478     Value *LHS = Cmp.getOperand(0);
5479     Value *RHS = Cmp.getOperand(1);
5480 
5481     // Simplify the operands first.
5482     bool UsedAssumedInformation = false;
5483     const auto &SimplifiedLHS =
5484         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5485                                *this, UsedAssumedInformation);
5486     if (!SimplifiedLHS.hasValue())
5487       return true;
5488     if (!SimplifiedLHS.getValue())
5489       return false;
5490     LHS = *SimplifiedLHS;
5491 
5492     const auto &SimplifiedRHS =
5493         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5494                                *this, UsedAssumedInformation);
5495     if (!SimplifiedRHS.hasValue())
5496       return true;
5497     if (!SimplifiedRHS.getValue())
5498       return false;
5499     RHS = *SimplifiedRHS;
5500 
5501     LLVMContext &Ctx = Cmp.getContext();
5502     // Handle the trivial case first in which we don't even need to think about
5503     // null or non-null.
5504     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5505       Constant *NewVal =
5506           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5507       if (!Union(*NewVal))
5508         return false;
5509       if (!UsedAssumedInformation)
5510         indicateOptimisticFixpoint();
5511       return true;
5512     }
5513 
5514     // From now on we only handle equalities (==, !=).
5515     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5516     if (!ICmp || !ICmp->isEquality())
5517       return false;
5518 
5519     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5520     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5521     if (!LHSIsNull && !RHSIsNull)
5522       return false;
5523 
5524     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5525     // non-nullptr operand and if we assume it's non-null we can conclude the
5526     // result of the comparison.
5527     assert((LHSIsNull || RHSIsNull) &&
5528            "Expected nullptr versus non-nullptr comparison at this point");
5529 
5530     // The index is the operand that we assume is not null.
5531     unsigned PtrIdx = LHSIsNull;
5532     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5533         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5534         DepClassTy::REQUIRED);
5535     if (!PtrNonNullAA.isAssumedNonNull())
5536       return false;
5537     UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5538 
5539     // The new value depends on the predicate, true for != and false for ==.
5540     Constant *NewVal = ConstantInt::get(
5541         Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5542     if (!Union(*NewVal))
5543       return false;
5544 
5545     if (!UsedAssumedInformation)
5546       indicateOptimisticFixpoint();
5547 
5548     return true;
5549   }
5550 
5551   bool updateWithLoad(Attributor &A, LoadInst &L) {
5552     auto Union = [&](Value &V) {
5553       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5554           SimplifiedAssociatedValue, &V, L.getType());
5555       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5556     };
5557     return handleLoad(A, *this, L, Union);
5558   }
5559 
5560   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
5561   /// simplify any operand of the instruction \p I. Return true if successful,
5562   /// in that case SimplifiedAssociatedValue will be updated.
5563   bool handleGenericInst(Attributor &A, Instruction &I) {
5564     bool SomeSimplified = false;
5565     bool UsedAssumedInformation = false;
5566 
5567     SmallVector<Value *, 8> NewOps(I.getNumOperands());
5568     int Idx = 0;
5569     for (Value *Op : I.operands()) {
5570       const auto &SimplifiedOp =
5571           A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
5572                                  *this, UsedAssumedInformation);
5573       // If we are not sure about any operand we are not sure about the entire
5574       // instruction, we'll wait.
5575       if (!SimplifiedOp.hasValue())
5576         return true;
5577 
5578       if (SimplifiedOp.getValue())
5579         NewOps[Idx] = SimplifiedOp.getValue();
5580       else
5581         NewOps[Idx] = Op;
5582 
5583       SomeSimplified |= (NewOps[Idx] != Op);
5584       ++Idx;
5585     }
5586 
5587     // We won't bother with the InstSimplify interface if we didn't simplify any
5588     // operand ourselves.
5589     if (!SomeSimplified)
5590       return false;
5591 
5592     InformationCache &InfoCache = A.getInfoCache();
5593     Function *F = I.getFunction();
5594     const auto *DT =
5595         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
5596     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5597     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
5598     OptimizationRemarkEmitter *ORE = nullptr;
5599 
5600     const DataLayout &DL = I.getModule()->getDataLayout();
5601     SimplifyQuery Q(DL, TLI, DT, AC, &I);
5602     if (Value *SimplifiedI =
5603             SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
5604       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5605           SimplifiedAssociatedValue, SimplifiedI, I.getType());
5606       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5607     }
5608     return false;
5609   }
5610 
5611   /// See AbstractAttribute::updateImpl(...).
5612   ChangeStatus updateImpl(Attributor &A) override {
5613     auto Before = SimplifiedAssociatedValue;
5614 
5615     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5616                             bool Stripped) -> bool {
5617       auto &AA = A.getAAFor<AAValueSimplify>(
5618           *this, IRPosition::value(V, getCallBaseContext()),
5619           DepClassTy::REQUIRED);
5620       if (!Stripped && this == &AA) {
5621 
5622         if (auto *I = dyn_cast<Instruction>(&V)) {
5623           if (auto *LI = dyn_cast<LoadInst>(&V))
5624             if (updateWithLoad(A, *LI))
5625               return true;
5626           if (auto *Cmp = dyn_cast<CmpInst>(&V))
5627             if (handleCmp(A, *Cmp))
5628               return true;
5629           if (handleGenericInst(A, *I))
5630             return true;
5631         }
5632         // TODO: Look the instruction and check recursively.
5633 
5634         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
5635                           << "\n");
5636         return false;
5637       }
5638       return checkAndUpdate(A, *this,
5639                             IRPosition::value(V, getCallBaseContext()));
5640     };
5641 
5642     bool Dummy = false;
5643     if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5644                                      VisitValueCB, getCtxI(),
5645                                      /* UseValueSimplify */ false))
5646       if (!askSimplifiedValueForOtherAAs(A))
5647         return indicatePessimisticFixpoint();
5648 
5649     // If a candicate was found in this update, return CHANGED.
5650     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5651                                                : ChangeStatus ::CHANGED;
5652   }
5653 
5654   /// See AbstractAttribute::trackStatistics()
5655   void trackStatistics() const override {
5656     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5657   }
5658 };
5659 
5660 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5661   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5662       : AAValueSimplifyImpl(IRP, A) {}
5663 
5664   /// See AbstractAttribute::initialize(...).
5665   void initialize(Attributor &A) override {
5666     SimplifiedAssociatedValue = nullptr;
5667     indicateOptimisticFixpoint();
5668   }
5669   /// See AbstractAttribute::initialize(...).
5670   ChangeStatus updateImpl(Attributor &A) override {
5671     llvm_unreachable(
5672         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5673   }
5674   /// See AbstractAttribute::trackStatistics()
5675   void trackStatistics() const override {
5676     STATS_DECLTRACK_FN_ATTR(value_simplify)
5677   }
5678 };
5679 
5680 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5681   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5682       : AAValueSimplifyFunction(IRP, A) {}
5683   /// See AbstractAttribute::trackStatistics()
5684   void trackStatistics() const override {
5685     STATS_DECLTRACK_CS_ATTR(value_simplify)
5686   }
5687 };
5688 
5689 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5690   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5691       : AAValueSimplifyImpl(IRP, A) {}
5692 
5693   void initialize(Attributor &A) override {
5694     AAValueSimplifyImpl::initialize(A);
5695     if (!getAssociatedFunction())
5696       indicatePessimisticFixpoint();
5697   }
5698 
5699   /// See AbstractAttribute::updateImpl(...).
5700   ChangeStatus updateImpl(Attributor &A) override {
5701     auto Before = SimplifiedAssociatedValue;
5702     auto &RetAA = A.getAAFor<AAReturnedValues>(
5703         *this, IRPosition::function(*getAssociatedFunction()),
5704         DepClassTy::REQUIRED);
5705     auto PredForReturned =
5706         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5707           bool UsedAssumedInformation = false;
5708           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
5709               &RetVal, *cast<CallBase>(getCtxI()), *this,
5710               UsedAssumedInformation);
5711           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5712               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
5713           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5714         };
5715     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
5716       if (!askSimplifiedValueForOtherAAs(A))
5717         return indicatePessimisticFixpoint();
5718     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5719                                                : ChangeStatus ::CHANGED;
5720   }
5721 
5722   void trackStatistics() const override {
5723     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
5724   }
5725 };
5726 
5727 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
5728   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
5729       : AAValueSimplifyFloating(IRP, A) {}
5730 
5731   /// See AbstractAttribute::manifest(...).
5732   ChangeStatus manifest(Attributor &A) override {
5733     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5734 
5735     if (auto *NewV = getReplacementValue(A)) {
5736       Use &U = cast<CallBase>(&getAnchorValue())
5737                    ->getArgOperandUse(getCallSiteArgNo());
5738       if (A.changeUseAfterManifest(U, *NewV))
5739         Changed = ChangeStatus::CHANGED;
5740     }
5741 
5742     return Changed | AAValueSimplify::manifest(A);
5743   }
5744 
5745   void trackStatistics() const override {
5746     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5747   }
5748 };
5749 
5750 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5751 struct AAHeapToStackFunction final : public AAHeapToStack {
5752 
5753   struct AllocationInfo {
5754     /// The call that allocates the memory.
5755     CallBase *const CB;
5756 
5757     /// The kind of allocation.
5758     const enum class AllocationKind {
5759       MALLOC,
5760       CALLOC,
5761       ALIGNED_ALLOC,
5762     } Kind;
5763 
5764     /// The library function id for the allocation.
5765     LibFunc LibraryFunctionId = NotLibFunc;
5766 
5767     /// The status wrt. a rewrite.
5768     enum {
5769       STACK_DUE_TO_USE,
5770       STACK_DUE_TO_FREE,
5771       INVALID,
5772     } Status = STACK_DUE_TO_USE;
5773 
5774     /// Flag to indicate if we encountered a use that might free this allocation
5775     /// but which is not in the deallocation infos.
5776     bool HasPotentiallyFreeingUnknownUses = false;
5777 
5778     /// The set of free calls that use this allocation.
5779     SmallPtrSet<CallBase *, 1> PotentialFreeCalls{};
5780   };
5781 
5782   struct DeallocationInfo {
5783     /// The call that deallocates the memory.
5784     CallBase *const CB;
5785 
5786     /// Flag to indicate if we don't know all objects this deallocation might
5787     /// free.
5788     bool MightFreeUnknownObjects = false;
5789 
5790     /// The set of allocation calls that are potentially freed.
5791     SmallPtrSet<CallBase *, 1> PotentialAllocationCalls{};
5792   };
5793 
5794   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5795       : AAHeapToStack(IRP, A) {}
5796 
5797   ~AAHeapToStackFunction() {
5798     // Ensure we call the destructor so we release any memory allocated in the
5799     // sets.
5800     for (auto &It : AllocationInfos)
5801       It.getSecond()->~AllocationInfo();
5802     for (auto &It : DeallocationInfos)
5803       It.getSecond()->~DeallocationInfo();
5804   }
5805 
5806   void initialize(Attributor &A) override {
5807     AAHeapToStack::initialize(A);
5808 
5809     const Function *F = getAnchorScope();
5810     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5811 
5812     auto AllocationIdentifierCB = [&](Instruction &I) {
5813       CallBase *CB = dyn_cast<CallBase>(&I);
5814       if (!CB)
5815         return true;
5816       if (isFreeCall(CB, TLI)) {
5817         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
5818         return true;
5819       }
5820       bool IsMalloc = isMallocLikeFn(CB, TLI);
5821       bool IsAlignedAllocLike = !IsMalloc && isAlignedAllocLikeFn(CB, TLI);
5822       bool IsCalloc =
5823           !IsMalloc && !IsAlignedAllocLike && isCallocLikeFn(CB, TLI);
5824       if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc)
5825         return true;
5826       auto Kind =
5827           IsMalloc ? AllocationInfo::AllocationKind::MALLOC
5828                    : (IsCalloc ? AllocationInfo::AllocationKind::CALLOC
5829                                : AllocationInfo::AllocationKind::ALIGNED_ALLOC);
5830 
5831       AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB, Kind};
5832       AllocationInfos[CB] = AI;
5833       TLI->getLibFunc(*CB, AI->LibraryFunctionId);
5834       return true;
5835     };
5836 
5837     bool UsedAssumedInformation = false;
5838     bool Success = A.checkForAllCallLikeInstructions(
5839         AllocationIdentifierCB, *this, UsedAssumedInformation,
5840         /* CheckBBLivenessOnly */ false,
5841         /* CheckPotentiallyDead */ true);
5842     (void)Success;
5843     assert(Success && "Did not expect the call base visit callback to fail!");
5844   }
5845 
5846   const std::string getAsStr() const override {
5847     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
5848     for (const auto &It : AllocationInfos) {
5849       if (It.second->Status == AllocationInfo::INVALID)
5850         ++NumInvalidMallocs;
5851       else
5852         ++NumH2SMallocs;
5853     }
5854     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
5855            std::to_string(NumInvalidMallocs);
5856   }
5857 
5858   /// See AbstractAttribute::trackStatistics().
5859   void trackStatistics() const override {
5860     STATS_DECL(
5861         MallocCalls, Function,
5862         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5863     for (auto &It : AllocationInfos)
5864       if (It.second->Status != AllocationInfo::INVALID)
5865         ++BUILD_STAT_NAME(MallocCalls, Function);
5866   }
5867 
5868   bool isAssumedHeapToStack(const CallBase &CB) const override {
5869     if (isValidState())
5870       if (AllocationInfo *AI = AllocationInfos.lookup(&CB))
5871         return AI->Status != AllocationInfo::INVALID;
5872     return false;
5873   }
5874 
5875   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
5876     if (!isValidState())
5877       return false;
5878 
5879     for (auto &It : AllocationInfos) {
5880       AllocationInfo &AI = *It.second;
5881       if (AI.Status == AllocationInfo::INVALID)
5882         continue;
5883 
5884       if (AI.PotentialFreeCalls.count(&CB))
5885         return true;
5886     }
5887 
5888     return false;
5889   }
5890 
5891   ChangeStatus manifest(Attributor &A) override {
5892     assert(getState().isValidState() &&
5893            "Attempted to manifest an invalid state!");
5894 
5895     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5896     Function *F = getAnchorScope();
5897     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5898 
5899     for (auto &It : AllocationInfos) {
5900       AllocationInfo &AI = *It.second;
5901       if (AI.Status == AllocationInfo::INVALID)
5902         continue;
5903 
5904       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
5905         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5906         A.deleteAfterManifest(*FreeCall);
5907         HasChanged = ChangeStatus::CHANGED;
5908       }
5909 
5910       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
5911                         << "\n");
5912 
5913       auto Remark = [&](OptimizationRemark OR) {
5914         LibFunc IsAllocShared;
5915         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
5916           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
5917             return OR << "Moving globalized variable to the stack.";
5918         return OR << "Moving memory allocation from the heap to the stack.";
5919       };
5920       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
5921         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
5922       else
5923         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
5924 
5925       Value *Size;
5926       Optional<APInt> SizeAPI = getSize(A, *this, AI);
5927       if (SizeAPI.hasValue()) {
5928         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
5929       } else if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
5930         auto *Num = AI.CB->getOperand(0);
5931         auto *SizeT = AI.CB->getOperand(1);
5932         IRBuilder<> B(AI.CB);
5933         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5934       } else if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
5935         Size = AI.CB->getOperand(1);
5936       } else {
5937         Size = AI.CB->getOperand(0);
5938       }
5939 
5940       Align Alignment(1);
5941       if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
5942         Optional<APInt> AlignmentAPI =
5943             getAPInt(A, *this, *AI.CB->getArgOperand(0));
5944         assert(AlignmentAPI.hasValue() &&
5945                "Expected an alignment during manifest!");
5946         Alignment =
5947             max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue()));
5948       }
5949 
5950       unsigned AS = cast<PointerType>(AI.CB->getType())->getAddressSpace();
5951       Instruction *Alloca =
5952           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5953                          "", AI.CB->getNextNode());
5954 
5955       if (Alloca->getType() != AI.CB->getType())
5956         Alloca = new BitCastInst(Alloca, AI.CB->getType(), "malloc_bc",
5957                                  Alloca->getNextNode());
5958 
5959       A.changeValueAfterManifest(*AI.CB, *Alloca);
5960 
5961       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
5962         auto *NBB = II->getNormalDest();
5963         BranchInst::Create(NBB, AI.CB->getParent());
5964         A.deleteAfterManifest(*AI.CB);
5965       } else {
5966         A.deleteAfterManifest(*AI.CB);
5967       }
5968 
5969       // Zero out the allocated memory if it was a calloc.
5970       if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
5971         auto *BI = new BitCastInst(Alloca, AI.CB->getType(), "calloc_bc",
5972                                    Alloca->getNextNode());
5973         Value *Ops[] = {
5974             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5975             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5976 
5977         Type *Tys[] = {BI->getType(), AI.CB->getOperand(0)->getType()};
5978         Module *M = F->getParent();
5979         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5980         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5981       }
5982       HasChanged = ChangeStatus::CHANGED;
5983     }
5984 
5985     return HasChanged;
5986   }
5987 
5988   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
5989                            Value &V) {
5990     bool UsedAssumedInformation = false;
5991     Optional<Constant *> SimpleV =
5992         A.getAssumedConstant(V, AA, UsedAssumedInformation);
5993     if (!SimpleV.hasValue())
5994       return APInt(64, 0);
5995     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
5996       return CI->getValue();
5997     return llvm::None;
5998   }
5999 
6000   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6001                           AllocationInfo &AI) {
6002 
6003     if (AI.Kind == AllocationInfo::AllocationKind::MALLOC)
6004       return getAPInt(A, AA, *AI.CB->getArgOperand(0));
6005 
6006     if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
6007       // Only if the alignment is also constant we return a size.
6008       return getAPInt(A, AA, *AI.CB->getArgOperand(0)).hasValue()
6009                  ? getAPInt(A, AA, *AI.CB->getArgOperand(1))
6010                  : llvm::None;
6011 
6012     assert(AI.Kind == AllocationInfo::AllocationKind::CALLOC &&
6013            "Expected only callocs are left");
6014     Optional<APInt> Num = getAPInt(A, AA, *AI.CB->getArgOperand(0));
6015     Optional<APInt> Size = getAPInt(A, AA, *AI.CB->getArgOperand(1));
6016     if (!Num.hasValue() || !Size.hasValue())
6017       return llvm::None;
6018     bool Overflow = false;
6019     Size = Size.getValue().umul_ov(Num.getValue(), Overflow);
6020     return Overflow ? llvm::None : Size;
6021   }
6022 
6023   /// Collection of all malloc-like calls in a function with associated
6024   /// information.
6025   DenseMap<CallBase *, AllocationInfo *> AllocationInfos;
6026 
6027   /// Collection of all free-like calls in a function with associated
6028   /// information.
6029   DenseMap<CallBase *, DeallocationInfo *> DeallocationInfos;
6030 
6031   ChangeStatus updateImpl(Attributor &A) override;
6032 };
6033 
6034 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6035   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6036   const Function *F = getAnchorScope();
6037 
6038   const auto &LivenessAA =
6039       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6040 
6041   MustBeExecutedContextExplorer &Explorer =
6042       A.getInfoCache().getMustBeExecutedContextExplorer();
6043 
6044   bool StackIsAccessibleByOtherThreads =
6045       A.getInfoCache().stackIsAccessibleByOtherThreads();
6046 
6047   // Flag to ensure we update our deallocation information at most once per
6048   // updateImpl call and only if we use the free check reasoning.
6049   bool HasUpdatedFrees = false;
6050 
6051   auto UpdateFrees = [&]() {
6052     HasUpdatedFrees = true;
6053 
6054     for (auto &It : DeallocationInfos) {
6055       DeallocationInfo &DI = *It.second;
6056       // For now we cannot use deallocations that have unknown inputs, skip
6057       // them.
6058       if (DI.MightFreeUnknownObjects)
6059         continue;
6060 
6061       // No need to analyze dead calls, ignore them instead.
6062       bool UsedAssumedInformation = false;
6063       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6064                           /* CheckBBLivenessOnly */ true))
6065         continue;
6066 
6067       // Use the optimistic version to get the freed objects, ignoring dead
6068       // branches etc.
6069       SmallVector<Value *, 8> Objects;
6070       if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
6071                                            *this, DI.CB)) {
6072         LLVM_DEBUG(
6073             dbgs()
6074             << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
6075         DI.MightFreeUnknownObjects = true;
6076         continue;
6077       }
6078 
6079       // Check each object explicitly.
6080       for (auto *Obj : Objects) {
6081         // Free of null and undef can be ignored as no-ops (or UB in the latter
6082         // case).
6083         if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6084           continue;
6085 
6086         CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6087         if (!ObjCB) {
6088           LLVM_DEBUG(dbgs()
6089                      << "[H2S] Free of a non-call object: " << *Obj << "\n");
6090           DI.MightFreeUnknownObjects = true;
6091           continue;
6092         }
6093 
6094         AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6095         if (!AI) {
6096           LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6097                             << "\n");
6098           DI.MightFreeUnknownObjects = true;
6099           continue;
6100         }
6101 
6102         DI.PotentialAllocationCalls.insert(ObjCB);
6103       }
6104     }
6105   };
6106 
6107   auto FreeCheck = [&](AllocationInfo &AI) {
6108     // If the stack is not accessible by other threads, the "must-free" logic
6109     // doesn't apply as the pointer could be shared and needs to be places in
6110     // "shareable" memory.
6111     if (!StackIsAccessibleByOtherThreads) {
6112       auto &NoSyncAA =
6113           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6114       if (!NoSyncAA.isAssumedNoSync()) {
6115         LLVM_DEBUG(
6116             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6117                       "other threads and function is not nosync:\n");
6118         return false;
6119       }
6120     }
6121     if (!HasUpdatedFrees)
6122       UpdateFrees();
6123 
6124     // TODO: Allow multi exit functions that have different free calls.
6125     if (AI.PotentialFreeCalls.size() != 1) {
6126       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6127                         << AI.PotentialFreeCalls.size() << "\n");
6128       return false;
6129     }
6130     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6131     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6132     if (!DI) {
6133       LLVM_DEBUG(
6134           dbgs() << "[H2S] unique free call was not known as deallocation call "
6135                  << *UniqueFree << "\n");
6136       return false;
6137     }
6138     if (DI->MightFreeUnknownObjects) {
6139       LLVM_DEBUG(
6140           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6141       return false;
6142     }
6143     if (DI->PotentialAllocationCalls.size() > 1) {
6144       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6145                         << DI->PotentialAllocationCalls.size()
6146                         << " different allocations\n");
6147       return false;
6148     }
6149     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6150       LLVM_DEBUG(
6151           dbgs()
6152           << "[H2S] unique free call not known to free this allocation but "
6153           << **DI->PotentialAllocationCalls.begin() << "\n");
6154       return false;
6155     }
6156     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6157     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6158       LLVM_DEBUG(
6159           dbgs()
6160           << "[H2S] unique free call might not be executed with the allocation "
6161           << *UniqueFree << "\n");
6162       return false;
6163     }
6164     return true;
6165   };
6166 
6167   auto UsesCheck = [&](AllocationInfo &AI) {
6168     bool ValidUsesOnly = true;
6169 
6170     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6171       Instruction *UserI = cast<Instruction>(U.getUser());
6172       if (isa<LoadInst>(UserI))
6173         return true;
6174       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6175         if (SI->getValueOperand() == U.get()) {
6176           LLVM_DEBUG(dbgs()
6177                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6178           ValidUsesOnly = false;
6179         } else {
6180           // A store into the malloc'ed memory is fine.
6181         }
6182         return true;
6183       }
6184       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6185         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6186           return true;
6187         if (DeallocationInfos.count(CB)) {
6188           AI.PotentialFreeCalls.insert(CB);
6189           return true;
6190         }
6191 
6192         unsigned ArgNo = CB->getArgOperandNo(&U);
6193 
6194         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6195             *this, IRPosition::callsite_argument(*CB, ArgNo),
6196             DepClassTy::OPTIONAL);
6197 
6198         // If a call site argument use is nofree, we are fine.
6199         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6200             *this, IRPosition::callsite_argument(*CB, ArgNo),
6201             DepClassTy::OPTIONAL);
6202 
6203         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6204         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6205         if (MaybeCaptured ||
6206             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6207              MaybeFreed)) {
6208           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6209 
6210           // Emit a missed remark if this is missed OpenMP globalization.
6211           auto Remark = [&](OptimizationRemarkMissed ORM) {
6212             return ORM
6213                    << "Could not move globalized variable to the stack. "
6214                       "Variable is potentially captured in call. Mark "
6215                       "parameter as `__attribute__((noescape))` to override.";
6216           };
6217 
6218           if (ValidUsesOnly &&
6219               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6220             A.emitRemark<OptimizationRemarkMissed>(AI.CB, "OMP113", Remark);
6221 
6222           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6223           ValidUsesOnly = false;
6224         }
6225         return true;
6226       }
6227 
6228       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6229           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6230         Follow = true;
6231         return true;
6232       }
6233       // Unknown user for which we can not track uses further (in a way that
6234       // makes sense).
6235       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6236       ValidUsesOnly = false;
6237       return true;
6238     };
6239     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6240       return false;
6241     return ValidUsesOnly;
6242   };
6243 
6244   // The actual update starts here. We look at all allocations and depending on
6245   // their status perform the appropriate check(s).
6246   for (auto &It : AllocationInfos) {
6247     AllocationInfo &AI = *It.second;
6248     if (AI.Status == AllocationInfo::INVALID)
6249       continue;
6250 
6251     if (MaxHeapToStackSize == -1) {
6252       if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
6253         if (!getAPInt(A, *this, *AI.CB->getArgOperand(0)).hasValue()) {
6254           LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6255                             << "\n");
6256           AI.Status = AllocationInfo::INVALID;
6257           Changed = ChangeStatus::CHANGED;
6258           continue;
6259         }
6260     } else {
6261       Optional<APInt> Size = getSize(A, *this, AI);
6262       if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) {
6263         LLVM_DEBUG({
6264           if (!Size.hasValue())
6265             dbgs() << "[H2S] Unknown allocation size (or alignment): " << *AI.CB
6266                    << "\n";
6267           else
6268             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6269                    << MaxHeapToStackSize << "\n";
6270         });
6271 
6272         AI.Status = AllocationInfo::INVALID;
6273         Changed = ChangeStatus::CHANGED;
6274         continue;
6275       }
6276     }
6277 
6278     switch (AI.Status) {
6279     case AllocationInfo::STACK_DUE_TO_USE:
6280       if (UsesCheck(AI))
6281         continue;
6282       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6283       LLVM_FALLTHROUGH;
6284     case AllocationInfo::STACK_DUE_TO_FREE:
6285       if (FreeCheck(AI))
6286         continue;
6287       AI.Status = AllocationInfo::INVALID;
6288       Changed = ChangeStatus::CHANGED;
6289       continue;
6290     case AllocationInfo::INVALID:
6291       llvm_unreachable("Invalid allocations should never reach this point!");
6292     };
6293   }
6294 
6295   return Changed;
6296 }
6297 
6298 /// ----------------------- Privatizable Pointers ------------------------------
6299 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6300   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6301       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6302 
6303   ChangeStatus indicatePessimisticFixpoint() override {
6304     AAPrivatizablePtr::indicatePessimisticFixpoint();
6305     PrivatizableType = nullptr;
6306     return ChangeStatus::CHANGED;
6307   }
6308 
6309   /// Identify the type we can chose for a private copy of the underlying
6310   /// argument. None means it is not clear yet, nullptr means there is none.
6311   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6312 
6313   /// Return a privatizable type that encloses both T0 and T1.
6314   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6315   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6316     if (!T0.hasValue())
6317       return T1;
6318     if (!T1.hasValue())
6319       return T0;
6320     if (T0 == T1)
6321       return T0;
6322     return nullptr;
6323   }
6324 
6325   Optional<Type *> getPrivatizableType() const override {
6326     return PrivatizableType;
6327   }
6328 
6329   const std::string getAsStr() const override {
6330     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6331   }
6332 
6333 protected:
6334   Optional<Type *> PrivatizableType;
6335 };
6336 
6337 // TODO: Do this for call site arguments (probably also other values) as well.
6338 
6339 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6340   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6341       : AAPrivatizablePtrImpl(IRP, A) {}
6342 
6343   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6344   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6345     // If this is a byval argument and we know all the call sites (so we can
6346     // rewrite them), there is no need to check them explicitly.
6347     bool AllCallSitesKnown;
6348     if (getIRPosition().hasAttr(Attribute::ByVal) &&
6349         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6350                                true, AllCallSitesKnown))
6351       return getAssociatedValue().getType()->getPointerElementType();
6352 
6353     Optional<Type *> Ty;
6354     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6355 
6356     // Make sure the associated call site argument has the same type at all call
6357     // sites and it is an allocation we know is safe to privatize, for now that
6358     // means we only allow alloca instructions.
6359     // TODO: We can additionally analyze the accesses in the callee to  create
6360     //       the type from that information instead. That is a little more
6361     //       involved and will be done in a follow up patch.
6362     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6363       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6364       // Check if a coresponding argument was found or if it is one not
6365       // associated (which can happen for callback calls).
6366       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6367         return false;
6368 
6369       // Check that all call sites agree on a type.
6370       auto &PrivCSArgAA =
6371           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6372       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6373 
6374       LLVM_DEBUG({
6375         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6376         if (CSTy.hasValue() && CSTy.getValue())
6377           CSTy.getValue()->print(dbgs());
6378         else if (CSTy.hasValue())
6379           dbgs() << "<nullptr>";
6380         else
6381           dbgs() << "<none>";
6382       });
6383 
6384       Ty = combineTypes(Ty, CSTy);
6385 
6386       LLVM_DEBUG({
6387         dbgs() << " : New Type: ";
6388         if (Ty.hasValue() && Ty.getValue())
6389           Ty.getValue()->print(dbgs());
6390         else if (Ty.hasValue())
6391           dbgs() << "<nullptr>";
6392         else
6393           dbgs() << "<none>";
6394         dbgs() << "\n";
6395       });
6396 
6397       return !Ty.hasValue() || Ty.getValue();
6398     };
6399 
6400     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
6401       return nullptr;
6402     return Ty;
6403   }
6404 
6405   /// See AbstractAttribute::updateImpl(...).
6406   ChangeStatus updateImpl(Attributor &A) override {
6407     PrivatizableType = identifyPrivatizableType(A);
6408     if (!PrivatizableType.hasValue())
6409       return ChangeStatus::UNCHANGED;
6410     if (!PrivatizableType.getValue())
6411       return indicatePessimisticFixpoint();
6412 
6413     // The dependence is optional so we don't give up once we give up on the
6414     // alignment.
6415     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6416                         DepClassTy::OPTIONAL);
6417 
6418     // Avoid arguments with padding for now.
6419     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6420         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
6421                                                 A.getInfoCache().getDL())) {
6422       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6423       return indicatePessimisticFixpoint();
6424     }
6425 
6426     // Verify callee and caller agree on how the promoted argument would be
6427     // passed.
6428     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
6429     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
6430     // which doesn't require the arguments ArgumentPromotion wanted to pass.
6431     Function &Fn = *getIRPosition().getAnchorScope();
6432     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
6433     ArgsToPromote.insert(getAssociatedArgument());
6434     const auto *TTI =
6435         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6436     if (!TTI ||
6437         !ArgumentPromotionPass::areFunctionArgsABICompatible(
6438             Fn, *TTI, ArgsToPromote, Dummy) ||
6439         ArgsToPromote.empty()) {
6440       LLVM_DEBUG(
6441           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6442                  << Fn.getName() << "\n");
6443       return indicatePessimisticFixpoint();
6444     }
6445 
6446     // Collect the types that will replace the privatizable type in the function
6447     // signature.
6448     SmallVector<Type *, 16> ReplacementTypes;
6449     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6450 
6451     // Register a rewrite of the argument.
6452     Argument *Arg = getAssociatedArgument();
6453     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6454       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6455       return indicatePessimisticFixpoint();
6456     }
6457 
6458     unsigned ArgNo = Arg->getArgNo();
6459 
6460     // Helper to check if for the given call site the associated argument is
6461     // passed to a callback where the privatization would be different.
6462     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6463       SmallVector<const Use *, 4> CallbackUses;
6464       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6465       for (const Use *U : CallbackUses) {
6466         AbstractCallSite CBACS(U);
6467         assert(CBACS && CBACS.isCallbackCall());
6468         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6469           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6470 
6471           LLVM_DEBUG({
6472             dbgs()
6473                 << "[AAPrivatizablePtr] Argument " << *Arg
6474                 << "check if can be privatized in the context of its parent ("
6475                 << Arg->getParent()->getName()
6476                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6477                    "callback ("
6478                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6479                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6480                 << CBACS.getCallArgOperand(CBArg) << " vs "
6481                 << CB.getArgOperand(ArgNo) << "\n"
6482                 << "[AAPrivatizablePtr] " << CBArg << " : "
6483                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6484           });
6485 
6486           if (CBArgNo != int(ArgNo))
6487             continue;
6488           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6489               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6490           if (CBArgPrivAA.isValidState()) {
6491             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6492             if (!CBArgPrivTy.hasValue())
6493               continue;
6494             if (CBArgPrivTy.getValue() == PrivatizableType)
6495               continue;
6496           }
6497 
6498           LLVM_DEBUG({
6499             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6500                    << " cannot be privatized in the context of its parent ("
6501                    << Arg->getParent()->getName()
6502                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6503                       "callback ("
6504                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6505                    << ").\n[AAPrivatizablePtr] for which the argument "
6506                       "privatization is not compatible.\n";
6507           });
6508           return false;
6509         }
6510       }
6511       return true;
6512     };
6513 
6514     // Helper to check if for the given call site the associated argument is
6515     // passed to a direct call where the privatization would be different.
6516     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6517       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6518       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6519       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
6520              "Expected a direct call operand for callback call operand");
6521 
6522       LLVM_DEBUG({
6523         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6524                << " check if be privatized in the context of its parent ("
6525                << Arg->getParent()->getName()
6526                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6527                   "direct call of ("
6528                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6529                << ").\n";
6530       });
6531 
6532       Function *DCCallee = DC->getCalledFunction();
6533       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6534         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6535             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6536             DepClassTy::REQUIRED);
6537         if (DCArgPrivAA.isValidState()) {
6538           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6539           if (!DCArgPrivTy.hasValue())
6540             return true;
6541           if (DCArgPrivTy.getValue() == PrivatizableType)
6542             return true;
6543         }
6544       }
6545 
6546       LLVM_DEBUG({
6547         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6548                << " cannot be privatized in the context of its parent ("
6549                << Arg->getParent()->getName()
6550                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6551                   "direct call of ("
6552                << ACS.getInstruction()->getCalledFunction()->getName()
6553                << ").\n[AAPrivatizablePtr] for which the argument "
6554                   "privatization is not compatible.\n";
6555       });
6556       return false;
6557     };
6558 
6559     // Helper to check if the associated argument is used at the given abstract
6560     // call site in a way that is incompatible with the privatization assumed
6561     // here.
6562     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6563       if (ACS.isDirectCall())
6564         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6565       if (ACS.isCallbackCall())
6566         return IsCompatiblePrivArgOfDirectCS(ACS);
6567       return false;
6568     };
6569 
6570     bool AllCallSitesKnown;
6571     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6572                                 AllCallSitesKnown))
6573       return indicatePessimisticFixpoint();
6574 
6575     return ChangeStatus::UNCHANGED;
6576   }
6577 
6578   /// Given a type to private \p PrivType, collect the constituates (which are
6579   /// used) in \p ReplacementTypes.
6580   static void
6581   identifyReplacementTypes(Type *PrivType,
6582                            SmallVectorImpl<Type *> &ReplacementTypes) {
6583     // TODO: For now we expand the privatization type to the fullest which can
6584     //       lead to dead arguments that need to be removed later.
6585     assert(PrivType && "Expected privatizable type!");
6586 
6587     // Traverse the type, extract constituate types on the outermost level.
6588     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6589       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6590         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6591     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6592       ReplacementTypes.append(PrivArrayType->getNumElements(),
6593                               PrivArrayType->getElementType());
6594     } else {
6595       ReplacementTypes.push_back(PrivType);
6596     }
6597   }
6598 
6599   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6600   /// The values needed are taken from the arguments of \p F starting at
6601   /// position \p ArgNo.
6602   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6603                                    unsigned ArgNo, Instruction &IP) {
6604     assert(PrivType && "Expected privatizable type!");
6605 
6606     IRBuilder<NoFolder> IRB(&IP);
6607     const DataLayout &DL = F.getParent()->getDataLayout();
6608 
6609     // Traverse the type, build GEPs and stores.
6610     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6611       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6612       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6613         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6614         Value *Ptr =
6615             constructPointer(PointeeTy, PrivType, &Base,
6616                              PrivStructLayout->getElementOffset(u), IRB, DL);
6617         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6618       }
6619     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6620       Type *PointeeTy = PrivArrayType->getElementType();
6621       Type *PointeePtrTy = PointeeTy->getPointerTo();
6622       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6623       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6624         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6625                                       u * PointeeTySize, IRB, DL);
6626         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6627       }
6628     } else {
6629       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6630     }
6631   }
6632 
6633   /// Extract values from \p Base according to the type \p PrivType at the
6634   /// call position \p ACS. The values are appended to \p ReplacementValues.
6635   void createReplacementValues(Align Alignment, Type *PrivType,
6636                                AbstractCallSite ACS, Value *Base,
6637                                SmallVectorImpl<Value *> &ReplacementValues) {
6638     assert(Base && "Expected base value!");
6639     assert(PrivType && "Expected privatizable type!");
6640     Instruction *IP = ACS.getInstruction();
6641 
6642     IRBuilder<NoFolder> IRB(IP);
6643     const DataLayout &DL = IP->getModule()->getDataLayout();
6644 
6645     if (Base->getType()->getPointerElementType() != PrivType)
6646       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
6647                                                  "", ACS.getInstruction());
6648 
6649     // Traverse the type, build GEPs and loads.
6650     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6651       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6652       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6653         Type *PointeeTy = PrivStructType->getElementType(u);
6654         Value *Ptr =
6655             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6656                              PrivStructLayout->getElementOffset(u), IRB, DL);
6657         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6658         L->setAlignment(Alignment);
6659         ReplacementValues.push_back(L);
6660       }
6661     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6662       Type *PointeeTy = PrivArrayType->getElementType();
6663       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6664       Type *PointeePtrTy = PointeeTy->getPointerTo();
6665       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6666         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6667                                       u * PointeeTySize, IRB, DL);
6668         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6669         L->setAlignment(Alignment);
6670         ReplacementValues.push_back(L);
6671       }
6672     } else {
6673       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
6674       L->setAlignment(Alignment);
6675       ReplacementValues.push_back(L);
6676     }
6677   }
6678 
6679   /// See AbstractAttribute::manifest(...)
6680   ChangeStatus manifest(Attributor &A) override {
6681     if (!PrivatizableType.hasValue())
6682       return ChangeStatus::UNCHANGED;
6683     assert(PrivatizableType.getValue() && "Expected privatizable type!");
6684 
6685     // Collect all tail calls in the function as we cannot allow new allocas to
6686     // escape into tail recursion.
6687     // TODO: Be smarter about new allocas escaping into tail calls.
6688     SmallVector<CallInst *, 16> TailCalls;
6689     bool UsedAssumedInformation = false;
6690     if (!A.checkForAllInstructions(
6691             [&](Instruction &I) {
6692               CallInst &CI = cast<CallInst>(I);
6693               if (CI.isTailCall())
6694                 TailCalls.push_back(&CI);
6695               return true;
6696             },
6697             *this, {Instruction::Call}, UsedAssumedInformation))
6698       return ChangeStatus::UNCHANGED;
6699 
6700     Argument *Arg = getAssociatedArgument();
6701     // Query AAAlign attribute for alignment of associated argument to
6702     // determine the best alignment of loads.
6703     const auto &AlignAA =
6704         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
6705 
6706     // Callback to repair the associated function. A new alloca is placed at the
6707     // beginning and initialized with the values passed through arguments. The
6708     // new alloca replaces the use of the old pointer argument.
6709     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
6710         [=](const Attributor::ArgumentReplacementInfo &ARI,
6711             Function &ReplacementFn, Function::arg_iterator ArgIt) {
6712           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
6713           Instruction *IP = &*EntryBB.getFirstInsertionPt();
6714           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
6715                                            Arg->getName() + ".priv", IP);
6716           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
6717                                ArgIt->getArgNo(), *IP);
6718 
6719           if (AI->getType() != Arg->getType())
6720             AI =
6721                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
6722           Arg->replaceAllUsesWith(AI);
6723 
6724           for (CallInst *CI : TailCalls)
6725             CI->setTailCall(false);
6726         };
6727 
6728     // Callback to repair a call site of the associated function. The elements
6729     // of the privatizable type are loaded prior to the call and passed to the
6730     // new function version.
6731     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
6732         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
6733                       AbstractCallSite ACS,
6734                       SmallVectorImpl<Value *> &NewArgOperands) {
6735           // When no alignment is specified for the load instruction,
6736           // natural alignment is assumed.
6737           createReplacementValues(
6738               assumeAligned(AlignAA.getAssumedAlign()),
6739               PrivatizableType.getValue(), ACS,
6740               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
6741               NewArgOperands);
6742         };
6743 
6744     // Collect the types that will replace the privatizable type in the function
6745     // signature.
6746     SmallVector<Type *, 16> ReplacementTypes;
6747     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6748 
6749     // Register a rewrite of the argument.
6750     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
6751                                            std::move(FnRepairCB),
6752                                            std::move(ACSRepairCB)))
6753       return ChangeStatus::CHANGED;
6754     return ChangeStatus::UNCHANGED;
6755   }
6756 
6757   /// See AbstractAttribute::trackStatistics()
6758   void trackStatistics() const override {
6759     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
6760   }
6761 };
6762 
6763 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
6764   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
6765       : AAPrivatizablePtrImpl(IRP, A) {}
6766 
6767   /// See AbstractAttribute::initialize(...).
6768   virtual void initialize(Attributor &A) override {
6769     // TODO: We can privatize more than arguments.
6770     indicatePessimisticFixpoint();
6771   }
6772 
6773   ChangeStatus updateImpl(Attributor &A) override {
6774     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
6775                      "updateImpl will not be called");
6776   }
6777 
6778   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6779   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6780     Value *Obj = getUnderlyingObject(&getAssociatedValue());
6781     if (!Obj) {
6782       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
6783       return nullptr;
6784     }
6785 
6786     if (auto *AI = dyn_cast<AllocaInst>(Obj))
6787       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
6788         if (CI->isOne())
6789           return Obj->getType()->getPointerElementType();
6790     if (auto *Arg = dyn_cast<Argument>(Obj)) {
6791       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
6792           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
6793       if (PrivArgAA.isAssumedPrivatizablePtr())
6794         return Obj->getType()->getPointerElementType();
6795     }
6796 
6797     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
6798                          "alloca nor privatizable argument: "
6799                       << *Obj << "!\n");
6800     return nullptr;
6801   }
6802 
6803   /// See AbstractAttribute::trackStatistics()
6804   void trackStatistics() const override {
6805     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
6806   }
6807 };
6808 
6809 struct AAPrivatizablePtrCallSiteArgument final
6810     : public AAPrivatizablePtrFloating {
6811   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
6812       : AAPrivatizablePtrFloating(IRP, A) {}
6813 
6814   /// See AbstractAttribute::initialize(...).
6815   void initialize(Attributor &A) override {
6816     if (getIRPosition().hasAttr(Attribute::ByVal))
6817       indicateOptimisticFixpoint();
6818   }
6819 
6820   /// See AbstractAttribute::updateImpl(...).
6821   ChangeStatus updateImpl(Attributor &A) override {
6822     PrivatizableType = identifyPrivatizableType(A);
6823     if (!PrivatizableType.hasValue())
6824       return ChangeStatus::UNCHANGED;
6825     if (!PrivatizableType.getValue())
6826       return indicatePessimisticFixpoint();
6827 
6828     const IRPosition &IRP = getIRPosition();
6829     auto &NoCaptureAA =
6830         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
6831     if (!NoCaptureAA.isAssumedNoCapture()) {
6832       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
6833       return indicatePessimisticFixpoint();
6834     }
6835 
6836     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
6837     if (!NoAliasAA.isAssumedNoAlias()) {
6838       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
6839       return indicatePessimisticFixpoint();
6840     }
6841 
6842     const auto &MemBehaviorAA =
6843         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
6844     if (!MemBehaviorAA.isAssumedReadOnly()) {
6845       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
6846       return indicatePessimisticFixpoint();
6847     }
6848 
6849     return ChangeStatus::UNCHANGED;
6850   }
6851 
6852   /// See AbstractAttribute::trackStatistics()
6853   void trackStatistics() const override {
6854     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
6855   }
6856 };
6857 
6858 struct AAPrivatizablePtrCallSiteReturned final
6859     : public AAPrivatizablePtrFloating {
6860   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
6861       : AAPrivatizablePtrFloating(IRP, A) {}
6862 
6863   /// See AbstractAttribute::initialize(...).
6864   void initialize(Attributor &A) override {
6865     // TODO: We can privatize more than arguments.
6866     indicatePessimisticFixpoint();
6867   }
6868 
6869   /// See AbstractAttribute::trackStatistics()
6870   void trackStatistics() const override {
6871     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
6872   }
6873 };
6874 
6875 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
6876   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
6877       : AAPrivatizablePtrFloating(IRP, A) {}
6878 
6879   /// See AbstractAttribute::initialize(...).
6880   void initialize(Attributor &A) override {
6881     // TODO: We can privatize more than arguments.
6882     indicatePessimisticFixpoint();
6883   }
6884 
6885   /// See AbstractAttribute::trackStatistics()
6886   void trackStatistics() const override {
6887     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
6888   }
6889 };
6890 
6891 /// -------------------- Memory Behavior Attributes ----------------------------
6892 /// Includes read-none, read-only, and write-only.
6893 /// ----------------------------------------------------------------------------
6894 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
6895   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
6896       : AAMemoryBehavior(IRP, A) {}
6897 
6898   /// See AbstractAttribute::initialize(...).
6899   void initialize(Attributor &A) override {
6900     intersectAssumedBits(BEST_STATE);
6901     getKnownStateFromValue(getIRPosition(), getState());
6902     AAMemoryBehavior::initialize(A);
6903   }
6904 
6905   /// Return the memory behavior information encoded in the IR for \p IRP.
6906   static void getKnownStateFromValue(const IRPosition &IRP,
6907                                      BitIntegerState &State,
6908                                      bool IgnoreSubsumingPositions = false) {
6909     SmallVector<Attribute, 2> Attrs;
6910     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6911     for (const Attribute &Attr : Attrs) {
6912       switch (Attr.getKindAsEnum()) {
6913       case Attribute::ReadNone:
6914         State.addKnownBits(NO_ACCESSES);
6915         break;
6916       case Attribute::ReadOnly:
6917         State.addKnownBits(NO_WRITES);
6918         break;
6919       case Attribute::WriteOnly:
6920         State.addKnownBits(NO_READS);
6921         break;
6922       default:
6923         llvm_unreachable("Unexpected attribute!");
6924       }
6925     }
6926 
6927     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
6928       if (!I->mayReadFromMemory())
6929         State.addKnownBits(NO_READS);
6930       if (!I->mayWriteToMemory())
6931         State.addKnownBits(NO_WRITES);
6932     }
6933   }
6934 
6935   /// See AbstractAttribute::getDeducedAttributes(...).
6936   void getDeducedAttributes(LLVMContext &Ctx,
6937                             SmallVectorImpl<Attribute> &Attrs) const override {
6938     assert(Attrs.size() == 0);
6939     if (isAssumedReadNone())
6940       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6941     else if (isAssumedReadOnly())
6942       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
6943     else if (isAssumedWriteOnly())
6944       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
6945     assert(Attrs.size() <= 1);
6946   }
6947 
6948   /// See AbstractAttribute::manifest(...).
6949   ChangeStatus manifest(Attributor &A) override {
6950     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
6951       return ChangeStatus::UNCHANGED;
6952 
6953     const IRPosition &IRP = getIRPosition();
6954 
6955     // Check if we would improve the existing attributes first.
6956     SmallVector<Attribute, 4> DeducedAttrs;
6957     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6958     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6959           return IRP.hasAttr(Attr.getKindAsEnum(),
6960                              /* IgnoreSubsumingPositions */ true);
6961         }))
6962       return ChangeStatus::UNCHANGED;
6963 
6964     // Clear existing attributes.
6965     IRP.removeAttrs(AttrKinds);
6966 
6967     // Use the generic manifest method.
6968     return IRAttribute::manifest(A);
6969   }
6970 
6971   /// See AbstractState::getAsStr().
6972   const std::string getAsStr() const override {
6973     if (isAssumedReadNone())
6974       return "readnone";
6975     if (isAssumedReadOnly())
6976       return "readonly";
6977     if (isAssumedWriteOnly())
6978       return "writeonly";
6979     return "may-read/write";
6980   }
6981 
6982   /// The set of IR attributes AAMemoryBehavior deals with.
6983   static const Attribute::AttrKind AttrKinds[3];
6984 };
6985 
6986 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
6987     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
6988 
6989 /// Memory behavior attribute for a floating value.
6990 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
6991   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
6992       : AAMemoryBehaviorImpl(IRP, A) {}
6993 
6994   /// See AbstractAttribute::updateImpl(...).
6995   ChangeStatus updateImpl(Attributor &A) override;
6996 
6997   /// See AbstractAttribute::trackStatistics()
6998   void trackStatistics() const override {
6999     if (isAssumedReadNone())
7000       STATS_DECLTRACK_FLOATING_ATTR(readnone)
7001     else if (isAssumedReadOnly())
7002       STATS_DECLTRACK_FLOATING_ATTR(readonly)
7003     else if (isAssumedWriteOnly())
7004       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7005   }
7006 
7007 private:
7008   /// Return true if users of \p UserI might access the underlying
7009   /// variable/location described by \p U and should therefore be analyzed.
7010   bool followUsersOfUseIn(Attributor &A, const Use &U,
7011                           const Instruction *UserI);
7012 
7013   /// Update the state according to the effect of use \p U in \p UserI.
7014   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7015 };
7016 
7017 /// Memory behavior attribute for function argument.
7018 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7019   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7020       : AAMemoryBehaviorFloating(IRP, A) {}
7021 
7022   /// See AbstractAttribute::initialize(...).
7023   void initialize(Attributor &A) override {
7024     intersectAssumedBits(BEST_STATE);
7025     const IRPosition &IRP = getIRPosition();
7026     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7027     // can query it when we use has/getAttr. That would allow us to reuse the
7028     // initialize of the base class here.
7029     bool HasByVal =
7030         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7031     getKnownStateFromValue(IRP, getState(),
7032                            /* IgnoreSubsumingPositions */ HasByVal);
7033 
7034     // Initialize the use vector with all direct uses of the associated value.
7035     Argument *Arg = getAssociatedArgument();
7036     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7037       indicatePessimisticFixpoint();
7038   }
7039 
7040   ChangeStatus manifest(Attributor &A) override {
7041     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7042     if (!getAssociatedValue().getType()->isPointerTy())
7043       return ChangeStatus::UNCHANGED;
7044 
7045     // TODO: From readattrs.ll: "inalloca parameters are always
7046     //                           considered written"
7047     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7048       removeKnownBits(NO_WRITES);
7049       removeAssumedBits(NO_WRITES);
7050     }
7051     return AAMemoryBehaviorFloating::manifest(A);
7052   }
7053 
7054   /// See AbstractAttribute::trackStatistics()
7055   void trackStatistics() const override {
7056     if (isAssumedReadNone())
7057       STATS_DECLTRACK_ARG_ATTR(readnone)
7058     else if (isAssumedReadOnly())
7059       STATS_DECLTRACK_ARG_ATTR(readonly)
7060     else if (isAssumedWriteOnly())
7061       STATS_DECLTRACK_ARG_ATTR(writeonly)
7062   }
7063 };
7064 
7065 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7066   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7067       : AAMemoryBehaviorArgument(IRP, A) {}
7068 
7069   /// See AbstractAttribute::initialize(...).
7070   void initialize(Attributor &A) override {
7071     // If we don't have an associated attribute this is either a variadic call
7072     // or an indirect call, either way, nothing to do here.
7073     Argument *Arg = getAssociatedArgument();
7074     if (!Arg) {
7075       indicatePessimisticFixpoint();
7076       return;
7077     }
7078     if (Arg->hasByValAttr()) {
7079       addKnownBits(NO_WRITES);
7080       removeKnownBits(NO_READS);
7081       removeAssumedBits(NO_READS);
7082     }
7083     AAMemoryBehaviorArgument::initialize(A);
7084     if (getAssociatedFunction()->isDeclaration())
7085       indicatePessimisticFixpoint();
7086   }
7087 
7088   /// See AbstractAttribute::updateImpl(...).
7089   ChangeStatus updateImpl(Attributor &A) override {
7090     // TODO: Once we have call site specific value information we can provide
7091     //       call site specific liveness liveness information and then it makes
7092     //       sense to specialize attributes for call sites arguments instead of
7093     //       redirecting requests to the callee argument.
7094     Argument *Arg = getAssociatedArgument();
7095     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7096     auto &ArgAA =
7097         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7098     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7099   }
7100 
7101   /// See AbstractAttribute::trackStatistics()
7102   void trackStatistics() const override {
7103     if (isAssumedReadNone())
7104       STATS_DECLTRACK_CSARG_ATTR(readnone)
7105     else if (isAssumedReadOnly())
7106       STATS_DECLTRACK_CSARG_ATTR(readonly)
7107     else if (isAssumedWriteOnly())
7108       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7109   }
7110 };
7111 
7112 /// Memory behavior attribute for a call site return position.
7113 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7114   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7115       : AAMemoryBehaviorFloating(IRP, A) {}
7116 
7117   /// See AbstractAttribute::initialize(...).
7118   void initialize(Attributor &A) override {
7119     AAMemoryBehaviorImpl::initialize(A);
7120     Function *F = getAssociatedFunction();
7121     if (!F || F->isDeclaration())
7122       indicatePessimisticFixpoint();
7123   }
7124 
7125   /// See AbstractAttribute::manifest(...).
7126   ChangeStatus manifest(Attributor &A) override {
7127     // We do not annotate returned values.
7128     return ChangeStatus::UNCHANGED;
7129   }
7130 
7131   /// See AbstractAttribute::trackStatistics()
7132   void trackStatistics() const override {}
7133 };
7134 
7135 /// An AA to represent the memory behavior function attributes.
7136 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7137   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7138       : AAMemoryBehaviorImpl(IRP, A) {}
7139 
7140   /// See AbstractAttribute::updateImpl(Attributor &A).
7141   virtual ChangeStatus updateImpl(Attributor &A) override;
7142 
7143   /// See AbstractAttribute::manifest(...).
7144   ChangeStatus manifest(Attributor &A) override {
7145     Function &F = cast<Function>(getAnchorValue());
7146     if (isAssumedReadNone()) {
7147       F.removeFnAttr(Attribute::ArgMemOnly);
7148       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7149       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7150     }
7151     return AAMemoryBehaviorImpl::manifest(A);
7152   }
7153 
7154   /// See AbstractAttribute::trackStatistics()
7155   void trackStatistics() const override {
7156     if (isAssumedReadNone())
7157       STATS_DECLTRACK_FN_ATTR(readnone)
7158     else if (isAssumedReadOnly())
7159       STATS_DECLTRACK_FN_ATTR(readonly)
7160     else if (isAssumedWriteOnly())
7161       STATS_DECLTRACK_FN_ATTR(writeonly)
7162   }
7163 };
7164 
7165 /// AAMemoryBehavior attribute for call sites.
7166 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7167   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7168       : AAMemoryBehaviorImpl(IRP, A) {}
7169 
7170   /// See AbstractAttribute::initialize(...).
7171   void initialize(Attributor &A) override {
7172     AAMemoryBehaviorImpl::initialize(A);
7173     Function *F = getAssociatedFunction();
7174     if (!F || F->isDeclaration())
7175       indicatePessimisticFixpoint();
7176   }
7177 
7178   /// See AbstractAttribute::updateImpl(...).
7179   ChangeStatus updateImpl(Attributor &A) override {
7180     // TODO: Once we have call site specific value information we can provide
7181     //       call site specific liveness liveness information and then it makes
7182     //       sense to specialize attributes for call sites arguments instead of
7183     //       redirecting requests to the callee argument.
7184     Function *F = getAssociatedFunction();
7185     const IRPosition &FnPos = IRPosition::function(*F);
7186     auto &FnAA =
7187         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7188     return clampStateAndIndicateChange(getState(), FnAA.getState());
7189   }
7190 
7191   /// See AbstractAttribute::trackStatistics()
7192   void trackStatistics() const override {
7193     if (isAssumedReadNone())
7194       STATS_DECLTRACK_CS_ATTR(readnone)
7195     else if (isAssumedReadOnly())
7196       STATS_DECLTRACK_CS_ATTR(readonly)
7197     else if (isAssumedWriteOnly())
7198       STATS_DECLTRACK_CS_ATTR(writeonly)
7199   }
7200 };
7201 
7202 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7203 
7204   // The current assumed state used to determine a change.
7205   auto AssumedState = getAssumed();
7206 
7207   auto CheckRWInst = [&](Instruction &I) {
7208     // If the instruction has an own memory behavior state, use it to restrict
7209     // the local state. No further analysis is required as the other memory
7210     // state is as optimistic as it gets.
7211     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7212       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7213           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7214       intersectAssumedBits(MemBehaviorAA.getAssumed());
7215       return !isAtFixpoint();
7216     }
7217 
7218     // Remove access kind modifiers if necessary.
7219     if (I.mayReadFromMemory())
7220       removeAssumedBits(NO_READS);
7221     if (I.mayWriteToMemory())
7222       removeAssumedBits(NO_WRITES);
7223     return !isAtFixpoint();
7224   };
7225 
7226   bool UsedAssumedInformation = false;
7227   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7228                                           UsedAssumedInformation))
7229     return indicatePessimisticFixpoint();
7230 
7231   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7232                                         : ChangeStatus::UNCHANGED;
7233 }
7234 
7235 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7236 
7237   const IRPosition &IRP = getIRPosition();
7238   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7239   AAMemoryBehavior::StateType &S = getState();
7240 
7241   // First, check the function scope. We take the known information and we avoid
7242   // work if the assumed information implies the current assumed information for
7243   // this attribute. This is a valid for all but byval arguments.
7244   Argument *Arg = IRP.getAssociatedArgument();
7245   AAMemoryBehavior::base_t FnMemAssumedState =
7246       AAMemoryBehavior::StateType::getWorstState();
7247   if (!Arg || !Arg->hasByValAttr()) {
7248     const auto &FnMemAA =
7249         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7250     FnMemAssumedState = FnMemAA.getAssumed();
7251     S.addKnownBits(FnMemAA.getKnown());
7252     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7253       return ChangeStatus::UNCHANGED;
7254   }
7255 
7256   // The current assumed state used to determine a change.
7257   auto AssumedState = S.getAssumed();
7258 
7259   // Make sure the value is not captured (except through "return"), if
7260   // it is, any information derived would be irrelevant anyway as we cannot
7261   // check the potential aliases introduced by the capture. However, no need
7262   // to fall back to anythign less optimistic than the function state.
7263   const auto &ArgNoCaptureAA =
7264       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7265   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7266     S.intersectAssumedBits(FnMemAssumedState);
7267     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7268                                           : ChangeStatus::UNCHANGED;
7269   }
7270 
7271   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7272   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7273     Instruction *UserI = cast<Instruction>(U.getUser());
7274     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7275                       << " \n");
7276 
7277     // Droppable users, e.g., llvm::assume does not actually perform any action.
7278     if (UserI->isDroppable())
7279       return true;
7280 
7281     // Check if the users of UserI should also be visited.
7282     Follow = followUsersOfUseIn(A, U, UserI);
7283 
7284     // If UserI might touch memory we analyze the use in detail.
7285     if (UserI->mayReadOrWriteMemory())
7286       analyzeUseIn(A, U, UserI);
7287 
7288     return !isAtFixpoint();
7289   };
7290 
7291   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7292     return indicatePessimisticFixpoint();
7293 
7294   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7295                                         : ChangeStatus::UNCHANGED;
7296 }
7297 
7298 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7299                                                   const Instruction *UserI) {
7300   // The loaded value is unrelated to the pointer argument, no need to
7301   // follow the users of the load.
7302   if (isa<LoadInst>(UserI))
7303     return false;
7304 
7305   // By default we follow all uses assuming UserI might leak information on U,
7306   // we have special handling for call sites operands though.
7307   const auto *CB = dyn_cast<CallBase>(UserI);
7308   if (!CB || !CB->isArgOperand(&U))
7309     return true;
7310 
7311   // If the use is a call argument known not to be captured, the users of
7312   // the call do not need to be visited because they have to be unrelated to
7313   // the input. Note that this check is not trivial even though we disallow
7314   // general capturing of the underlying argument. The reason is that the
7315   // call might the argument "through return", which we allow and for which we
7316   // need to check call users.
7317   if (U.get()->getType()->isPointerTy()) {
7318     unsigned ArgNo = CB->getArgOperandNo(&U);
7319     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7320         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7321     return !ArgNoCaptureAA.isAssumedNoCapture();
7322   }
7323 
7324   return true;
7325 }
7326 
7327 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7328                                             const Instruction *UserI) {
7329   assert(UserI->mayReadOrWriteMemory());
7330 
7331   switch (UserI->getOpcode()) {
7332   default:
7333     // TODO: Handle all atomics and other side-effect operations we know of.
7334     break;
7335   case Instruction::Load:
7336     // Loads cause the NO_READS property to disappear.
7337     removeAssumedBits(NO_READS);
7338     return;
7339 
7340   case Instruction::Store:
7341     // Stores cause the NO_WRITES property to disappear if the use is the
7342     // pointer operand. Note that we do assume that capturing was taken care of
7343     // somewhere else.
7344     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7345       removeAssumedBits(NO_WRITES);
7346     return;
7347 
7348   case Instruction::Call:
7349   case Instruction::CallBr:
7350   case Instruction::Invoke: {
7351     // For call sites we look at the argument memory behavior attribute (this
7352     // could be recursive!) in order to restrict our own state.
7353     const auto *CB = cast<CallBase>(UserI);
7354 
7355     // Give up on operand bundles.
7356     if (CB->isBundleOperand(&U)) {
7357       indicatePessimisticFixpoint();
7358       return;
7359     }
7360 
7361     // Calling a function does read the function pointer, maybe write it if the
7362     // function is self-modifying.
7363     if (CB->isCallee(&U)) {
7364       removeAssumedBits(NO_READS);
7365       break;
7366     }
7367 
7368     // Adjust the possible access behavior based on the information on the
7369     // argument.
7370     IRPosition Pos;
7371     if (U.get()->getType()->isPointerTy())
7372       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7373     else
7374       Pos = IRPosition::callsite_function(*CB);
7375     const auto &MemBehaviorAA =
7376         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7377     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7378     // and at least "known".
7379     intersectAssumedBits(MemBehaviorAA.getAssumed());
7380     return;
7381   }
7382   };
7383 
7384   // Generally, look at the "may-properties" and adjust the assumed state if we
7385   // did not trigger special handling before.
7386   if (UserI->mayReadFromMemory())
7387     removeAssumedBits(NO_READS);
7388   if (UserI->mayWriteToMemory())
7389     removeAssumedBits(NO_WRITES);
7390 }
7391 
7392 /// -------------------- Memory Locations Attributes ---------------------------
7393 /// Includes read-none, argmemonly, inaccessiblememonly,
7394 /// inaccessiblememorargmemonly
7395 /// ----------------------------------------------------------------------------
7396 
7397 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7398     AAMemoryLocation::MemoryLocationsKind MLK) {
7399   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7400     return "all memory";
7401   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7402     return "no memory";
7403   std::string S = "memory:";
7404   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7405     S += "stack,";
7406   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7407     S += "constant,";
7408   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7409     S += "internal global,";
7410   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7411     S += "external global,";
7412   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7413     S += "argument,";
7414   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7415     S += "inaccessible,";
7416   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7417     S += "malloced,";
7418   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7419     S += "unknown,";
7420   S.pop_back();
7421   return S;
7422 }
7423 
7424 namespace {
7425 struct AAMemoryLocationImpl : public AAMemoryLocation {
7426 
7427   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7428       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7429     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7430       AccessKind2Accesses[u] = nullptr;
7431   }
7432 
7433   ~AAMemoryLocationImpl() {
7434     // The AccessSets are allocated via a BumpPtrAllocator, we call
7435     // the destructor manually.
7436     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7437       if (AccessKind2Accesses[u])
7438         AccessKind2Accesses[u]->~AccessSet();
7439   }
7440 
7441   /// See AbstractAttribute::initialize(...).
7442   void initialize(Attributor &A) override {
7443     intersectAssumedBits(BEST_STATE);
7444     getKnownStateFromValue(A, getIRPosition(), getState());
7445     AAMemoryLocation::initialize(A);
7446   }
7447 
7448   /// Return the memory behavior information encoded in the IR for \p IRP.
7449   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7450                                      BitIntegerState &State,
7451                                      bool IgnoreSubsumingPositions = false) {
7452     // For internal functions we ignore `argmemonly` and
7453     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7454     // constant propagation. It is unclear if this is the best way but it is
7455     // unlikely this will cause real performance problems. If we are deriving
7456     // attributes for the anchor function we even remove the attribute in
7457     // addition to ignoring it.
7458     bool UseArgMemOnly = true;
7459     Function *AnchorFn = IRP.getAnchorScope();
7460     if (AnchorFn && A.isRunOn(*AnchorFn))
7461       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7462 
7463     SmallVector<Attribute, 2> Attrs;
7464     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7465     for (const Attribute &Attr : Attrs) {
7466       switch (Attr.getKindAsEnum()) {
7467       case Attribute::ReadNone:
7468         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7469         break;
7470       case Attribute::InaccessibleMemOnly:
7471         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7472         break;
7473       case Attribute::ArgMemOnly:
7474         if (UseArgMemOnly)
7475           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7476         else
7477           IRP.removeAttrs({Attribute::ArgMemOnly});
7478         break;
7479       case Attribute::InaccessibleMemOrArgMemOnly:
7480         if (UseArgMemOnly)
7481           State.addKnownBits(inverseLocation(
7482               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7483         else
7484           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7485         break;
7486       default:
7487         llvm_unreachable("Unexpected attribute!");
7488       }
7489     }
7490   }
7491 
7492   /// See AbstractAttribute::getDeducedAttributes(...).
7493   void getDeducedAttributes(LLVMContext &Ctx,
7494                             SmallVectorImpl<Attribute> &Attrs) const override {
7495     assert(Attrs.size() == 0);
7496     if (isAssumedReadNone()) {
7497       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7498     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7499       if (isAssumedInaccessibleMemOnly())
7500         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7501       else if (isAssumedArgMemOnly())
7502         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7503       else if (isAssumedInaccessibleOrArgMemOnly())
7504         Attrs.push_back(
7505             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7506     }
7507     assert(Attrs.size() <= 1);
7508   }
7509 
7510   /// See AbstractAttribute::manifest(...).
7511   ChangeStatus manifest(Attributor &A) override {
7512     const IRPosition &IRP = getIRPosition();
7513 
7514     // Check if we would improve the existing attributes first.
7515     SmallVector<Attribute, 4> DeducedAttrs;
7516     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7517     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7518           return IRP.hasAttr(Attr.getKindAsEnum(),
7519                              /* IgnoreSubsumingPositions */ true);
7520         }))
7521       return ChangeStatus::UNCHANGED;
7522 
7523     // Clear existing attributes.
7524     IRP.removeAttrs(AttrKinds);
7525     if (isAssumedReadNone())
7526       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7527 
7528     // Use the generic manifest method.
7529     return IRAttribute::manifest(A);
7530   }
7531 
7532   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7533   bool checkForAllAccessesToMemoryKind(
7534       function_ref<bool(const Instruction *, const Value *, AccessKind,
7535                         MemoryLocationsKind)>
7536           Pred,
7537       MemoryLocationsKind RequestedMLK) const override {
7538     if (!isValidState())
7539       return false;
7540 
7541     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7542     if (AssumedMLK == NO_LOCATIONS)
7543       return true;
7544 
7545     unsigned Idx = 0;
7546     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7547          CurMLK *= 2, ++Idx) {
7548       if (CurMLK & RequestedMLK)
7549         continue;
7550 
7551       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7552         for (const AccessInfo &AI : *Accesses)
7553           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7554             return false;
7555     }
7556 
7557     return true;
7558   }
7559 
7560   ChangeStatus indicatePessimisticFixpoint() override {
7561     // If we give up and indicate a pessimistic fixpoint this instruction will
7562     // become an access for all potential access kinds:
7563     // TODO: Add pointers for argmemonly and globals to improve the results of
7564     //       checkForAllAccessesToMemoryKind.
7565     bool Changed = false;
7566     MemoryLocationsKind KnownMLK = getKnown();
7567     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7568     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7569       if (!(CurMLK & KnownMLK))
7570         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7571                                   getAccessKindFromInst(I));
7572     return AAMemoryLocation::indicatePessimisticFixpoint();
7573   }
7574 
7575 protected:
7576   /// Helper struct to tie together an instruction that has a read or write
7577   /// effect with the pointer it accesses (if any).
7578   struct AccessInfo {
7579 
7580     /// The instruction that caused the access.
7581     const Instruction *I;
7582 
7583     /// The base pointer that is accessed, or null if unknown.
7584     const Value *Ptr;
7585 
7586     /// The kind of access (read/write/read+write).
7587     AccessKind Kind;
7588 
7589     bool operator==(const AccessInfo &RHS) const {
7590       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7591     }
7592     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7593       if (LHS.I != RHS.I)
7594         return LHS.I < RHS.I;
7595       if (LHS.Ptr != RHS.Ptr)
7596         return LHS.Ptr < RHS.Ptr;
7597       if (LHS.Kind != RHS.Kind)
7598         return LHS.Kind < RHS.Kind;
7599       return false;
7600     }
7601   };
7602 
7603   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7604   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7605   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7606   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7607 
7608   /// Categorize the pointer arguments of CB that might access memory in
7609   /// AccessedLoc and update the state and access map accordingly.
7610   void
7611   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7612                                      AAMemoryLocation::StateType &AccessedLocs,
7613                                      bool &Changed);
7614 
7615   /// Return the kind(s) of location that may be accessed by \p V.
7616   AAMemoryLocation::MemoryLocationsKind
7617   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7618 
7619   /// Return the access kind as determined by \p I.
7620   AccessKind getAccessKindFromInst(const Instruction *I) {
7621     AccessKind AK = READ_WRITE;
7622     if (I) {
7623       AK = I->mayReadFromMemory() ? READ : NONE;
7624       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7625     }
7626     return AK;
7627   }
7628 
7629   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7630   /// an access of kind \p AK to a \p MLK memory location with the access
7631   /// pointer \p Ptr.
7632   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7633                                  MemoryLocationsKind MLK, const Instruction *I,
7634                                  const Value *Ptr, bool &Changed,
7635                                  AccessKind AK = READ_WRITE) {
7636 
7637     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7638     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7639     if (!Accesses)
7640       Accesses = new (Allocator) AccessSet();
7641     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7642     State.removeAssumedBits(MLK);
7643   }
7644 
7645   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7646   /// arguments, and update the state and access map accordingly.
7647   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7648                           AAMemoryLocation::StateType &State, bool &Changed);
7649 
7650   /// Used to allocate access sets.
7651   BumpPtrAllocator &Allocator;
7652 
7653   /// The set of IR attributes AAMemoryLocation deals with.
7654   static const Attribute::AttrKind AttrKinds[4];
7655 };
7656 
7657 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7658     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7659     Attribute::InaccessibleMemOrArgMemOnly};
7660 
7661 void AAMemoryLocationImpl::categorizePtrValue(
7662     Attributor &A, const Instruction &I, const Value &Ptr,
7663     AAMemoryLocation::StateType &State, bool &Changed) {
7664   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7665                     << Ptr << " ["
7666                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
7667 
7668   SmallVector<Value *, 8> Objects;
7669   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I)) {
7670     LLVM_DEBUG(
7671         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
7672     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
7673                               getAccessKindFromInst(&I));
7674     return;
7675   }
7676 
7677   for (Value *Obj : Objects) {
7678     // TODO: recognize the TBAA used for constant accesses.
7679     MemoryLocationsKind MLK = NO_LOCATIONS;
7680     assert(!isa<GEPOperator>(Obj) && "GEPs should have been stripped.");
7681     if (isa<UndefValue>(Obj))
7682       continue;
7683     if (auto *Arg = dyn_cast<Argument>(Obj)) {
7684       if (Arg->hasByValAttr())
7685         MLK = NO_LOCAL_MEM;
7686       else
7687         MLK = NO_ARGUMENT_MEM;
7688     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
7689       // Reading constant memory is not treated as a read "effect" by the
7690       // function attr pass so we won't neither. Constants defined by TBAA are
7691       // similar. (We know we do not write it because it is constant.)
7692       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
7693         if (GVar->isConstant())
7694           continue;
7695 
7696       if (GV->hasLocalLinkage())
7697         MLK = NO_GLOBAL_INTERNAL_MEM;
7698       else
7699         MLK = NO_GLOBAL_EXTERNAL_MEM;
7700     } else if (isa<ConstantPointerNull>(Obj) &&
7701                !NullPointerIsDefined(getAssociatedFunction(),
7702                                      Ptr.getType()->getPointerAddressSpace())) {
7703       continue;
7704     } else if (isa<AllocaInst>(Obj)) {
7705       MLK = NO_LOCAL_MEM;
7706     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
7707       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
7708           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
7709       if (NoAliasAA.isAssumedNoAlias())
7710         MLK = NO_MALLOCED_MEM;
7711       else
7712         MLK = NO_UNKOWN_MEM;
7713     } else {
7714       MLK = NO_UNKOWN_MEM;
7715     }
7716 
7717     assert(MLK != NO_LOCATIONS && "No location specified!");
7718     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
7719                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
7720                       << "\n");
7721     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
7722                               getAccessKindFromInst(&I));
7723   }
7724 
7725   LLVM_DEBUG(
7726       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
7727              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
7728 }
7729 
7730 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
7731     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
7732     bool &Changed) {
7733   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
7734 
7735     // Skip non-pointer arguments.
7736     const Value *ArgOp = CB.getArgOperand(ArgNo);
7737     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
7738       continue;
7739 
7740     // Skip readnone arguments.
7741     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
7742     const auto &ArgOpMemLocationAA =
7743         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
7744 
7745     if (ArgOpMemLocationAA.isAssumedReadNone())
7746       continue;
7747 
7748     // Categorize potentially accessed pointer arguments as if there was an
7749     // access instruction with them as pointer.
7750     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
7751   }
7752 }
7753 
7754 AAMemoryLocation::MemoryLocationsKind
7755 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
7756                                                   bool &Changed) {
7757   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
7758                     << I << "\n");
7759 
7760   AAMemoryLocation::StateType AccessedLocs;
7761   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
7762 
7763   if (auto *CB = dyn_cast<CallBase>(&I)) {
7764 
7765     // First check if we assume any memory is access is visible.
7766     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
7767         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
7768     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
7769                       << " [" << CBMemLocationAA << "]\n");
7770 
7771     if (CBMemLocationAA.isAssumedReadNone())
7772       return NO_LOCATIONS;
7773 
7774     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
7775       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
7776                                 Changed, getAccessKindFromInst(&I));
7777       return AccessedLocs.getAssumed();
7778     }
7779 
7780     uint32_t CBAssumedNotAccessedLocs =
7781         CBMemLocationAA.getAssumedNotAccessedLocation();
7782 
7783     // Set the argmemonly and global bit as we handle them separately below.
7784     uint32_t CBAssumedNotAccessedLocsNoArgMem =
7785         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
7786 
7787     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
7788       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
7789         continue;
7790       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
7791                                 getAccessKindFromInst(&I));
7792     }
7793 
7794     // Now handle global memory if it might be accessed. This is slightly tricky
7795     // as NO_GLOBAL_MEM has multiple bits set.
7796     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
7797     if (HasGlobalAccesses) {
7798       auto AccessPred = [&](const Instruction *, const Value *Ptr,
7799                             AccessKind Kind, MemoryLocationsKind MLK) {
7800         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
7801                                   getAccessKindFromInst(&I));
7802         return true;
7803       };
7804       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
7805               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
7806         return AccessedLocs.getWorstState();
7807     }
7808 
7809     LLVM_DEBUG(
7810         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
7811                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7812 
7813     // Now handle argument memory if it might be accessed.
7814     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
7815     if (HasArgAccesses)
7816       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
7817 
7818     LLVM_DEBUG(
7819         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
7820                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7821 
7822     return AccessedLocs.getAssumed();
7823   }
7824 
7825   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
7826     LLVM_DEBUG(
7827         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
7828                << I << " [" << *Ptr << "]\n");
7829     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
7830     return AccessedLocs.getAssumed();
7831   }
7832 
7833   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
7834                     << I << "\n");
7835   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
7836                             getAccessKindFromInst(&I));
7837   return AccessedLocs.getAssumed();
7838 }
7839 
7840 /// An AA to represent the memory behavior function attributes.
7841 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
7842   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
7843       : AAMemoryLocationImpl(IRP, A) {}
7844 
7845   /// See AbstractAttribute::updateImpl(Attributor &A).
7846   virtual ChangeStatus updateImpl(Attributor &A) override {
7847 
7848     const auto &MemBehaviorAA =
7849         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
7850     if (MemBehaviorAA.isAssumedReadNone()) {
7851       if (MemBehaviorAA.isKnownReadNone())
7852         return indicateOptimisticFixpoint();
7853       assert(isAssumedReadNone() &&
7854              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
7855       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
7856       return ChangeStatus::UNCHANGED;
7857     }
7858 
7859     // The current assumed state used to determine a change.
7860     auto AssumedState = getAssumed();
7861     bool Changed = false;
7862 
7863     auto CheckRWInst = [&](Instruction &I) {
7864       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
7865       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
7866                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
7867       removeAssumedBits(inverseLocation(MLK, false, false));
7868       // Stop once only the valid bit set in the *not assumed location*, thus
7869       // once we don't actually exclude any memory locations in the state.
7870       return getAssumedNotAccessedLocation() != VALID_STATE;
7871     };
7872 
7873     bool UsedAssumedInformation = false;
7874     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7875                                             UsedAssumedInformation))
7876       return indicatePessimisticFixpoint();
7877 
7878     Changed |= AssumedState != getAssumed();
7879     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7880   }
7881 
7882   /// See AbstractAttribute::trackStatistics()
7883   void trackStatistics() const override {
7884     if (isAssumedReadNone())
7885       STATS_DECLTRACK_FN_ATTR(readnone)
7886     else if (isAssumedArgMemOnly())
7887       STATS_DECLTRACK_FN_ATTR(argmemonly)
7888     else if (isAssumedInaccessibleMemOnly())
7889       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
7890     else if (isAssumedInaccessibleOrArgMemOnly())
7891       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
7892   }
7893 };
7894 
7895 /// AAMemoryLocation attribute for call sites.
7896 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
7897   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
7898       : AAMemoryLocationImpl(IRP, A) {}
7899 
7900   /// See AbstractAttribute::initialize(...).
7901   void initialize(Attributor &A) override {
7902     AAMemoryLocationImpl::initialize(A);
7903     Function *F = getAssociatedFunction();
7904     if (!F || F->isDeclaration())
7905       indicatePessimisticFixpoint();
7906   }
7907 
7908   /// See AbstractAttribute::updateImpl(...).
7909   ChangeStatus updateImpl(Attributor &A) override {
7910     // TODO: Once we have call site specific value information we can provide
7911     //       call site specific liveness liveness information and then it makes
7912     //       sense to specialize attributes for call sites arguments instead of
7913     //       redirecting requests to the callee argument.
7914     Function *F = getAssociatedFunction();
7915     const IRPosition &FnPos = IRPosition::function(*F);
7916     auto &FnAA =
7917         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
7918     bool Changed = false;
7919     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
7920                           AccessKind Kind, MemoryLocationsKind MLK) {
7921       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
7922                                 getAccessKindFromInst(I));
7923       return true;
7924     };
7925     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
7926       return indicatePessimisticFixpoint();
7927     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7928   }
7929 
7930   /// See AbstractAttribute::trackStatistics()
7931   void trackStatistics() const override {
7932     if (isAssumedReadNone())
7933       STATS_DECLTRACK_CS_ATTR(readnone)
7934   }
7935 };
7936 
7937 /// ------------------ Value Constant Range Attribute -------------------------
7938 
7939 struct AAValueConstantRangeImpl : AAValueConstantRange {
7940   using StateType = IntegerRangeState;
7941   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
7942       : AAValueConstantRange(IRP, A) {}
7943 
7944   /// See AbstractAttribute::initialize(..).
7945   void initialize(Attributor &A) override {
7946     if (A.hasSimplificationCallback(getIRPosition())) {
7947       indicatePessimisticFixpoint();
7948       return;
7949     }
7950 
7951     // Intersect a range given by SCEV.
7952     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7953 
7954     // Intersect a range given by LVI.
7955     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7956   }
7957 
7958   /// See AbstractAttribute::getAsStr().
7959   const std::string getAsStr() const override {
7960     std::string Str;
7961     llvm::raw_string_ostream OS(Str);
7962     OS << "range(" << getBitWidth() << ")<";
7963     getKnown().print(OS);
7964     OS << " / ";
7965     getAssumed().print(OS);
7966     OS << ">";
7967     return OS.str();
7968   }
7969 
7970   /// Helper function to get a SCEV expr for the associated value at program
7971   /// point \p I.
7972   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7973     if (!getAnchorScope())
7974       return nullptr;
7975 
7976     ScalarEvolution *SE =
7977         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7978             *getAnchorScope());
7979 
7980     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7981         *getAnchorScope());
7982 
7983     if (!SE || !LI)
7984       return nullptr;
7985 
7986     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7987     if (!I)
7988       return S;
7989 
7990     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7991   }
7992 
7993   /// Helper function to get a range from SCEV for the associated value at
7994   /// program point \p I.
7995   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7996                                          const Instruction *I = nullptr) const {
7997     if (!getAnchorScope())
7998       return getWorstState(getBitWidth());
7999 
8000     ScalarEvolution *SE =
8001         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8002             *getAnchorScope());
8003 
8004     const SCEV *S = getSCEV(A, I);
8005     if (!SE || !S)
8006       return getWorstState(getBitWidth());
8007 
8008     return SE->getUnsignedRange(S);
8009   }
8010 
8011   /// Helper function to get a range from LVI for the associated value at
8012   /// program point \p I.
8013   ConstantRange
8014   getConstantRangeFromLVI(Attributor &A,
8015                           const Instruction *CtxI = nullptr) const {
8016     if (!getAnchorScope())
8017       return getWorstState(getBitWidth());
8018 
8019     LazyValueInfo *LVI =
8020         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8021             *getAnchorScope());
8022 
8023     if (!LVI || !CtxI)
8024       return getWorstState(getBitWidth());
8025     return LVI->getConstantRange(&getAssociatedValue(),
8026                                  const_cast<Instruction *>(CtxI));
8027   }
8028 
8029   /// Return true if \p CtxI is valid for querying outside analyses.
8030   /// This basically makes sure we do not ask intra-procedural analysis
8031   /// about a context in the wrong function or a context that violates
8032   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8033   /// if the original context of this AA is OK or should be considered invalid.
8034   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8035                                                const Instruction *CtxI,
8036                                                bool AllowAACtxI) const {
8037     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8038       return false;
8039 
8040     // Our context might be in a different function, neither intra-procedural
8041     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8042     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8043       return false;
8044 
8045     // If the context is not dominated by the value there are paths to the
8046     // context that do not define the value. This cannot be handled by
8047     // LazyValueInfo so we need to bail.
8048     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8049       InformationCache &InfoCache = A.getInfoCache();
8050       const DominatorTree *DT =
8051           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8052               *I->getFunction());
8053       return DT && DT->dominates(I, CtxI);
8054     }
8055 
8056     return true;
8057   }
8058 
8059   /// See AAValueConstantRange::getKnownConstantRange(..).
8060   ConstantRange
8061   getKnownConstantRange(Attributor &A,
8062                         const Instruction *CtxI = nullptr) const override {
8063     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8064                                                  /* AllowAACtxI */ false))
8065       return getKnown();
8066 
8067     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8068     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8069     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8070   }
8071 
8072   /// See AAValueConstantRange::getAssumedConstantRange(..).
8073   ConstantRange
8074   getAssumedConstantRange(Attributor &A,
8075                           const Instruction *CtxI = nullptr) const override {
8076     // TODO: Make SCEV use Attributor assumption.
8077     //       We may be able to bound a variable range via assumptions in
8078     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8079     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8080     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8081                                                  /* AllowAACtxI */ false))
8082       return getAssumed();
8083 
8084     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8085     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8086     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8087   }
8088 
8089   /// Helper function to create MDNode for range metadata.
8090   static MDNode *
8091   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8092                             const ConstantRange &AssumedConstantRange) {
8093     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8094                                   Ty, AssumedConstantRange.getLower())),
8095                               ConstantAsMetadata::get(ConstantInt::get(
8096                                   Ty, AssumedConstantRange.getUpper()))};
8097     return MDNode::get(Ctx, LowAndHigh);
8098   }
8099 
8100   /// Return true if \p Assumed is included in \p KnownRanges.
8101   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8102 
8103     if (Assumed.isFullSet())
8104       return false;
8105 
8106     if (!KnownRanges)
8107       return true;
8108 
8109     // If multiple ranges are annotated in IR, we give up to annotate assumed
8110     // range for now.
8111 
8112     // TODO:  If there exists a known range which containts assumed range, we
8113     // can say assumed range is better.
8114     if (KnownRanges->getNumOperands() > 2)
8115       return false;
8116 
8117     ConstantInt *Lower =
8118         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8119     ConstantInt *Upper =
8120         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8121 
8122     ConstantRange Known(Lower->getValue(), Upper->getValue());
8123     return Known.contains(Assumed) && Known != Assumed;
8124   }
8125 
8126   /// Helper function to set range metadata.
8127   static bool
8128   setRangeMetadataIfisBetterRange(Instruction *I,
8129                                   const ConstantRange &AssumedConstantRange) {
8130     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8131     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8132       if (!AssumedConstantRange.isEmptySet()) {
8133         I->setMetadata(LLVMContext::MD_range,
8134                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8135                                                  AssumedConstantRange));
8136         return true;
8137       }
8138     }
8139     return false;
8140   }
8141 
8142   /// See AbstractAttribute::manifest()
8143   ChangeStatus manifest(Attributor &A) override {
8144     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8145     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8146     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8147 
8148     auto &V = getAssociatedValue();
8149     if (!AssumedConstantRange.isEmptySet() &&
8150         !AssumedConstantRange.isSingleElement()) {
8151       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8152         assert(I == getCtxI() && "Should not annotate an instruction which is "
8153                                  "not the context instruction");
8154         if (isa<CallInst>(I) || isa<LoadInst>(I))
8155           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8156             Changed = ChangeStatus::CHANGED;
8157       }
8158     }
8159 
8160     return Changed;
8161   }
8162 };
8163 
8164 struct AAValueConstantRangeArgument final
8165     : AAArgumentFromCallSiteArguments<
8166           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8167           true /* BridgeCallBaseContext */> {
8168   using Base = AAArgumentFromCallSiteArguments<
8169       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8170       true /* BridgeCallBaseContext */>;
8171   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8172       : Base(IRP, A) {}
8173 
8174   /// See AbstractAttribute::initialize(..).
8175   void initialize(Attributor &A) override {
8176     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8177       indicatePessimisticFixpoint();
8178     } else {
8179       Base::initialize(A);
8180     }
8181   }
8182 
8183   /// See AbstractAttribute::trackStatistics()
8184   void trackStatistics() const override {
8185     STATS_DECLTRACK_ARG_ATTR(value_range)
8186   }
8187 };
8188 
8189 struct AAValueConstantRangeReturned
8190     : AAReturnedFromReturnedValues<AAValueConstantRange,
8191                                    AAValueConstantRangeImpl,
8192                                    AAValueConstantRangeImpl::StateType,
8193                                    /* PropogateCallBaseContext */ true> {
8194   using Base =
8195       AAReturnedFromReturnedValues<AAValueConstantRange,
8196                                    AAValueConstantRangeImpl,
8197                                    AAValueConstantRangeImpl::StateType,
8198                                    /* PropogateCallBaseContext */ true>;
8199   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8200       : Base(IRP, A) {}
8201 
8202   /// See AbstractAttribute::initialize(...).
8203   void initialize(Attributor &A) override {}
8204 
8205   /// See AbstractAttribute::trackStatistics()
8206   void trackStatistics() const override {
8207     STATS_DECLTRACK_FNRET_ATTR(value_range)
8208   }
8209 };
8210 
8211 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8212   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8213       : AAValueConstantRangeImpl(IRP, A) {}
8214 
8215   /// See AbstractAttribute::initialize(...).
8216   void initialize(Attributor &A) override {
8217     AAValueConstantRangeImpl::initialize(A);
8218     if (isAtFixpoint())
8219       return;
8220 
8221     Value &V = getAssociatedValue();
8222 
8223     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8224       unionAssumed(ConstantRange(C->getValue()));
8225       indicateOptimisticFixpoint();
8226       return;
8227     }
8228 
8229     if (isa<UndefValue>(&V)) {
8230       // Collapse the undef state to 0.
8231       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8232       indicateOptimisticFixpoint();
8233       return;
8234     }
8235 
8236     if (isa<CallBase>(&V))
8237       return;
8238 
8239     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8240       return;
8241 
8242     // If it is a load instruction with range metadata, use it.
8243     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8244       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8245         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8246         return;
8247       }
8248 
8249     // We can work with PHI and select instruction as we traverse their operands
8250     // during update.
8251     if (isa<SelectInst>(V) || isa<PHINode>(V))
8252       return;
8253 
8254     // Otherwise we give up.
8255     indicatePessimisticFixpoint();
8256 
8257     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8258                       << getAssociatedValue() << "\n");
8259   }
8260 
8261   bool calculateBinaryOperator(
8262       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8263       const Instruction *CtxI,
8264       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8265     Value *LHS = BinOp->getOperand(0);
8266     Value *RHS = BinOp->getOperand(1);
8267 
8268     // Simplify the operands first.
8269     bool UsedAssumedInformation = false;
8270     const auto &SimplifiedLHS =
8271         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8272                                *this, UsedAssumedInformation);
8273     if (!SimplifiedLHS.hasValue())
8274       return true;
8275     if (!SimplifiedLHS.getValue())
8276       return false;
8277     LHS = *SimplifiedLHS;
8278 
8279     const auto &SimplifiedRHS =
8280         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8281                                *this, UsedAssumedInformation);
8282     if (!SimplifiedRHS.hasValue())
8283       return true;
8284     if (!SimplifiedRHS.getValue())
8285       return false;
8286     RHS = *SimplifiedRHS;
8287 
8288     // TODO: Allow non integers as well.
8289     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8290       return false;
8291 
8292     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8293         *this, IRPosition::value(*LHS, getCallBaseContext()),
8294         DepClassTy::REQUIRED);
8295     QuerriedAAs.push_back(&LHSAA);
8296     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8297 
8298     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8299         *this, IRPosition::value(*RHS, getCallBaseContext()),
8300         DepClassTy::REQUIRED);
8301     QuerriedAAs.push_back(&RHSAA);
8302     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8303 
8304     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8305 
8306     T.unionAssumed(AssumedRange);
8307 
8308     // TODO: Track a known state too.
8309 
8310     return T.isValidState();
8311   }
8312 
8313   bool calculateCastInst(
8314       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8315       const Instruction *CtxI,
8316       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8317     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8318     // TODO: Allow non integers as well.
8319     Value *OpV = CastI->getOperand(0);
8320 
8321     // Simplify the operand first.
8322     bool UsedAssumedInformation = false;
8323     const auto &SimplifiedOpV =
8324         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8325                                *this, UsedAssumedInformation);
8326     if (!SimplifiedOpV.hasValue())
8327       return true;
8328     if (!SimplifiedOpV.getValue())
8329       return false;
8330     OpV = *SimplifiedOpV;
8331 
8332     if (!OpV->getType()->isIntegerTy())
8333       return false;
8334 
8335     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8336         *this, IRPosition::value(*OpV, getCallBaseContext()),
8337         DepClassTy::REQUIRED);
8338     QuerriedAAs.push_back(&OpAA);
8339     T.unionAssumed(
8340         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8341     return T.isValidState();
8342   }
8343 
8344   bool
8345   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8346                    const Instruction *CtxI,
8347                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8348     Value *LHS = CmpI->getOperand(0);
8349     Value *RHS = CmpI->getOperand(1);
8350 
8351     // Simplify the operands first.
8352     bool UsedAssumedInformation = false;
8353     const auto &SimplifiedLHS =
8354         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8355                                *this, UsedAssumedInformation);
8356     if (!SimplifiedLHS.hasValue())
8357       return true;
8358     if (!SimplifiedLHS.getValue())
8359       return false;
8360     LHS = *SimplifiedLHS;
8361 
8362     const auto &SimplifiedRHS =
8363         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8364                                *this, UsedAssumedInformation);
8365     if (!SimplifiedRHS.hasValue())
8366       return true;
8367     if (!SimplifiedRHS.getValue())
8368       return false;
8369     RHS = *SimplifiedRHS;
8370 
8371     // TODO: Allow non integers as well.
8372     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8373       return false;
8374 
8375     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8376         *this, IRPosition::value(*LHS, getCallBaseContext()),
8377         DepClassTy::REQUIRED);
8378     QuerriedAAs.push_back(&LHSAA);
8379     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8380         *this, IRPosition::value(*RHS, getCallBaseContext()),
8381         DepClassTy::REQUIRED);
8382     QuerriedAAs.push_back(&RHSAA);
8383     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8384     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8385 
8386     // If one of them is empty set, we can't decide.
8387     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8388       return true;
8389 
8390     bool MustTrue = false, MustFalse = false;
8391 
8392     auto AllowedRegion =
8393         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8394 
8395     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8396       MustFalse = true;
8397 
8398     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8399       MustTrue = true;
8400 
8401     assert((!MustTrue || !MustFalse) &&
8402            "Either MustTrue or MustFalse should be false!");
8403 
8404     if (MustTrue)
8405       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8406     else if (MustFalse)
8407       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8408     else
8409       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8410 
8411     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8412                       << " " << RHSAA << "\n");
8413 
8414     // TODO: Track a known state too.
8415     return T.isValidState();
8416   }
8417 
8418   /// See AbstractAttribute::updateImpl(...).
8419   ChangeStatus updateImpl(Attributor &A) override {
8420     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8421                             IntegerRangeState &T, bool Stripped) -> bool {
8422       Instruction *I = dyn_cast<Instruction>(&V);
8423       if (!I || isa<CallBase>(I)) {
8424 
8425         // Simplify the operand first.
8426         bool UsedAssumedInformation = false;
8427         const auto &SimplifiedOpV =
8428             A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8429                                    *this, UsedAssumedInformation);
8430         if (!SimplifiedOpV.hasValue())
8431           return true;
8432         if (!SimplifiedOpV.getValue())
8433           return false;
8434         Value *VPtr = *SimplifiedOpV;
8435 
8436         // If the value is not instruction, we query AA to Attributor.
8437         const auto &AA = A.getAAFor<AAValueConstantRange>(
8438             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8439             DepClassTy::REQUIRED);
8440 
8441         // Clamp operator is not used to utilize a program point CtxI.
8442         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8443 
8444         return T.isValidState();
8445       }
8446 
8447       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8448       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8449         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8450           return false;
8451       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8452         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8453           return false;
8454       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8455         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8456           return false;
8457       } else {
8458         // Give up with other instructions.
8459         // TODO: Add other instructions
8460 
8461         T.indicatePessimisticFixpoint();
8462         return false;
8463       }
8464 
8465       // Catch circular reasoning in a pessimistic way for now.
8466       // TODO: Check how the range evolves and if we stripped anything, see also
8467       //       AADereferenceable or AAAlign for similar situations.
8468       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8469         if (QueriedAA != this)
8470           continue;
8471         // If we are in a stady state we do not need to worry.
8472         if (T.getAssumed() == getState().getAssumed())
8473           continue;
8474         T.indicatePessimisticFixpoint();
8475       }
8476 
8477       return T.isValidState();
8478     };
8479 
8480     IntegerRangeState T(getBitWidth());
8481 
8482     if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8483                                                   VisitValueCB, getCtxI(),
8484                                                   /* UseValueSimplify */ false))
8485       return indicatePessimisticFixpoint();
8486 
8487     return clampStateAndIndicateChange(getState(), T);
8488   }
8489 
8490   /// See AbstractAttribute::trackStatistics()
8491   void trackStatistics() const override {
8492     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8493   }
8494 };
8495 
8496 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8497   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8498       : AAValueConstantRangeImpl(IRP, A) {}
8499 
8500   /// See AbstractAttribute::initialize(...).
8501   ChangeStatus updateImpl(Attributor &A) override {
8502     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8503                      "not be called");
8504   }
8505 
8506   /// See AbstractAttribute::trackStatistics()
8507   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8508 };
8509 
8510 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8511   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8512       : AAValueConstantRangeFunction(IRP, A) {}
8513 
8514   /// See AbstractAttribute::trackStatistics()
8515   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8516 };
8517 
8518 struct AAValueConstantRangeCallSiteReturned
8519     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8520                                      AAValueConstantRangeImpl,
8521                                      AAValueConstantRangeImpl::StateType,
8522                                      /* IntroduceCallBaseContext */ true> {
8523   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8524       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8525                                        AAValueConstantRangeImpl,
8526                                        AAValueConstantRangeImpl::StateType,
8527                                        /* IntroduceCallBaseContext */ true>(IRP,
8528                                                                             A) {
8529   }
8530 
8531   /// See AbstractAttribute::initialize(...).
8532   void initialize(Attributor &A) override {
8533     // If it is a load instruction with range metadata, use the metadata.
8534     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8535       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8536         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8537 
8538     AAValueConstantRangeImpl::initialize(A);
8539   }
8540 
8541   /// See AbstractAttribute::trackStatistics()
8542   void trackStatistics() const override {
8543     STATS_DECLTRACK_CSRET_ATTR(value_range)
8544   }
8545 };
8546 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8547   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8548       : AAValueConstantRangeFloating(IRP, A) {}
8549 
8550   /// See AbstractAttribute::manifest()
8551   ChangeStatus manifest(Attributor &A) override {
8552     return ChangeStatus::UNCHANGED;
8553   }
8554 
8555   /// See AbstractAttribute::trackStatistics()
8556   void trackStatistics() const override {
8557     STATS_DECLTRACK_CSARG_ATTR(value_range)
8558   }
8559 };
8560 
8561 /// ------------------ Potential Values Attribute -------------------------
8562 
8563 struct AAPotentialValuesImpl : AAPotentialValues {
8564   using StateType = PotentialConstantIntValuesState;
8565 
8566   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
8567       : AAPotentialValues(IRP, A) {}
8568 
8569   /// See AbstractAttribute::initialize(..).
8570   void initialize(Attributor &A) override {
8571     if (A.hasSimplificationCallback(getIRPosition()))
8572       indicatePessimisticFixpoint();
8573     else
8574       AAPotentialValues::initialize(A);
8575   }
8576 
8577   /// See AbstractAttribute::getAsStr().
8578   const std::string getAsStr() const override {
8579     std::string Str;
8580     llvm::raw_string_ostream OS(Str);
8581     OS << getState();
8582     return OS.str();
8583   }
8584 
8585   /// See AbstractAttribute::updateImpl(...).
8586   ChangeStatus updateImpl(Attributor &A) override {
8587     return indicatePessimisticFixpoint();
8588   }
8589 };
8590 
8591 struct AAPotentialValuesArgument final
8592     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8593                                       PotentialConstantIntValuesState> {
8594   using Base =
8595       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8596                                       PotentialConstantIntValuesState>;
8597   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
8598       : Base(IRP, A) {}
8599 
8600   /// See AbstractAttribute::initialize(..).
8601   void initialize(Attributor &A) override {
8602     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8603       indicatePessimisticFixpoint();
8604     } else {
8605       Base::initialize(A);
8606     }
8607   }
8608 
8609   /// See AbstractAttribute::trackStatistics()
8610   void trackStatistics() const override {
8611     STATS_DECLTRACK_ARG_ATTR(potential_values)
8612   }
8613 };
8614 
8615 struct AAPotentialValuesReturned
8616     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
8617   using Base =
8618       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
8619   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
8620       : Base(IRP, A) {}
8621 
8622   /// See AbstractAttribute::trackStatistics()
8623   void trackStatistics() const override {
8624     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8625   }
8626 };
8627 
8628 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
8629   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
8630       : AAPotentialValuesImpl(IRP, A) {}
8631 
8632   /// See AbstractAttribute::initialize(..).
8633   void initialize(Attributor &A) override {
8634     AAPotentialValuesImpl::initialize(A);
8635     if (isAtFixpoint())
8636       return;
8637 
8638     Value &V = getAssociatedValue();
8639 
8640     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8641       unionAssumed(C->getValue());
8642       indicateOptimisticFixpoint();
8643       return;
8644     }
8645 
8646     if (isa<UndefValue>(&V)) {
8647       unionAssumedWithUndef();
8648       indicateOptimisticFixpoint();
8649       return;
8650     }
8651 
8652     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
8653       return;
8654 
8655     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
8656       return;
8657 
8658     indicatePessimisticFixpoint();
8659 
8660     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
8661                       << getAssociatedValue() << "\n");
8662   }
8663 
8664   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
8665                                 const APInt &RHS) {
8666     ICmpInst::Predicate Pred = ICI->getPredicate();
8667     switch (Pred) {
8668     case ICmpInst::ICMP_UGT:
8669       return LHS.ugt(RHS);
8670     case ICmpInst::ICMP_SGT:
8671       return LHS.sgt(RHS);
8672     case ICmpInst::ICMP_EQ:
8673       return LHS.eq(RHS);
8674     case ICmpInst::ICMP_UGE:
8675       return LHS.uge(RHS);
8676     case ICmpInst::ICMP_SGE:
8677       return LHS.sge(RHS);
8678     case ICmpInst::ICMP_ULT:
8679       return LHS.ult(RHS);
8680     case ICmpInst::ICMP_SLT:
8681       return LHS.slt(RHS);
8682     case ICmpInst::ICMP_NE:
8683       return LHS.ne(RHS);
8684     case ICmpInst::ICMP_ULE:
8685       return LHS.ule(RHS);
8686     case ICmpInst::ICMP_SLE:
8687       return LHS.sle(RHS);
8688     default:
8689       llvm_unreachable("Invalid ICmp predicate!");
8690     }
8691   }
8692 
8693   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
8694                                  uint32_t ResultBitWidth) {
8695     Instruction::CastOps CastOp = CI->getOpcode();
8696     switch (CastOp) {
8697     default:
8698       llvm_unreachable("unsupported or not integer cast");
8699     case Instruction::Trunc:
8700       return Src.trunc(ResultBitWidth);
8701     case Instruction::SExt:
8702       return Src.sext(ResultBitWidth);
8703     case Instruction::ZExt:
8704       return Src.zext(ResultBitWidth);
8705     case Instruction::BitCast:
8706       return Src;
8707     }
8708   }
8709 
8710   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
8711                                        const APInt &LHS, const APInt &RHS,
8712                                        bool &SkipOperation, bool &Unsupported) {
8713     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
8714     // Unsupported is set to true when the binary operator is not supported.
8715     // SkipOperation is set to true when UB occur with the given operand pair
8716     // (LHS, RHS).
8717     // TODO: we should look at nsw and nuw keywords to handle operations
8718     //       that create poison or undef value.
8719     switch (BinOpcode) {
8720     default:
8721       Unsupported = true;
8722       return LHS;
8723     case Instruction::Add:
8724       return LHS + RHS;
8725     case Instruction::Sub:
8726       return LHS - RHS;
8727     case Instruction::Mul:
8728       return LHS * RHS;
8729     case Instruction::UDiv:
8730       if (RHS.isNullValue()) {
8731         SkipOperation = true;
8732         return LHS;
8733       }
8734       return LHS.udiv(RHS);
8735     case Instruction::SDiv:
8736       if (RHS.isNullValue()) {
8737         SkipOperation = true;
8738         return LHS;
8739       }
8740       return LHS.sdiv(RHS);
8741     case Instruction::URem:
8742       if (RHS.isNullValue()) {
8743         SkipOperation = true;
8744         return LHS;
8745       }
8746       return LHS.urem(RHS);
8747     case Instruction::SRem:
8748       if (RHS.isNullValue()) {
8749         SkipOperation = true;
8750         return LHS;
8751       }
8752       return LHS.srem(RHS);
8753     case Instruction::Shl:
8754       return LHS.shl(RHS);
8755     case Instruction::LShr:
8756       return LHS.lshr(RHS);
8757     case Instruction::AShr:
8758       return LHS.ashr(RHS);
8759     case Instruction::And:
8760       return LHS & RHS;
8761     case Instruction::Or:
8762       return LHS | RHS;
8763     case Instruction::Xor:
8764       return LHS ^ RHS;
8765     }
8766   }
8767 
8768   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
8769                                            const APInt &LHS, const APInt &RHS) {
8770     bool SkipOperation = false;
8771     bool Unsupported = false;
8772     APInt Result =
8773         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
8774     if (Unsupported)
8775       return false;
8776     // If SkipOperation is true, we can ignore this operand pair (L, R).
8777     if (!SkipOperation)
8778       unionAssumed(Result);
8779     return isValidState();
8780   }
8781 
8782   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
8783     auto AssumedBefore = getAssumed();
8784     Value *LHS = ICI->getOperand(0);
8785     Value *RHS = ICI->getOperand(1);
8786 
8787     // Simplify the operands first.
8788     bool UsedAssumedInformation = false;
8789     const auto &SimplifiedLHS =
8790         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8791                                *this, UsedAssumedInformation);
8792     if (!SimplifiedLHS.hasValue())
8793       return ChangeStatus::UNCHANGED;
8794     if (!SimplifiedLHS.getValue())
8795       return indicatePessimisticFixpoint();
8796     LHS = *SimplifiedLHS;
8797 
8798     const auto &SimplifiedRHS =
8799         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8800                                *this, UsedAssumedInformation);
8801     if (!SimplifiedRHS.hasValue())
8802       return ChangeStatus::UNCHANGED;
8803     if (!SimplifiedRHS.getValue())
8804       return indicatePessimisticFixpoint();
8805     RHS = *SimplifiedRHS;
8806 
8807     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8808       return indicatePessimisticFixpoint();
8809 
8810     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8811                                                 DepClassTy::REQUIRED);
8812     if (!LHSAA.isValidState())
8813       return indicatePessimisticFixpoint();
8814 
8815     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8816                                                 DepClassTy::REQUIRED);
8817     if (!RHSAA.isValidState())
8818       return indicatePessimisticFixpoint();
8819 
8820     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
8821     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
8822 
8823     // TODO: make use of undef flag to limit potential values aggressively.
8824     bool MaybeTrue = false, MaybeFalse = false;
8825     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
8826     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
8827       // The result of any comparison between undefs can be soundly replaced
8828       // with undef.
8829       unionAssumedWithUndef();
8830     } else if (LHSAA.undefIsContained()) {
8831       for (const APInt &R : RHSAAPVS) {
8832         bool CmpResult = calculateICmpInst(ICI, Zero, R);
8833         MaybeTrue |= CmpResult;
8834         MaybeFalse |= !CmpResult;
8835         if (MaybeTrue & MaybeFalse)
8836           return indicatePessimisticFixpoint();
8837       }
8838     } else if (RHSAA.undefIsContained()) {
8839       for (const APInt &L : LHSAAPVS) {
8840         bool CmpResult = calculateICmpInst(ICI, L, Zero);
8841         MaybeTrue |= CmpResult;
8842         MaybeFalse |= !CmpResult;
8843         if (MaybeTrue & MaybeFalse)
8844           return indicatePessimisticFixpoint();
8845       }
8846     } else {
8847       for (const APInt &L : LHSAAPVS) {
8848         for (const APInt &R : RHSAAPVS) {
8849           bool CmpResult = calculateICmpInst(ICI, L, R);
8850           MaybeTrue |= CmpResult;
8851           MaybeFalse |= !CmpResult;
8852           if (MaybeTrue & MaybeFalse)
8853             return indicatePessimisticFixpoint();
8854         }
8855       }
8856     }
8857     if (MaybeTrue)
8858       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
8859     if (MaybeFalse)
8860       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
8861     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8862                                          : ChangeStatus::CHANGED;
8863   }
8864 
8865   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
8866     auto AssumedBefore = getAssumed();
8867     Value *LHS = SI->getTrueValue();
8868     Value *RHS = SI->getFalseValue();
8869 
8870     // Simplify the operands first.
8871     bool UsedAssumedInformation = false;
8872     const auto &SimplifiedLHS =
8873         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8874                                *this, UsedAssumedInformation);
8875     if (!SimplifiedLHS.hasValue())
8876       return ChangeStatus::UNCHANGED;
8877     if (!SimplifiedLHS.getValue())
8878       return indicatePessimisticFixpoint();
8879     LHS = *SimplifiedLHS;
8880 
8881     const auto &SimplifiedRHS =
8882         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8883                                *this, UsedAssumedInformation);
8884     if (!SimplifiedRHS.hasValue())
8885       return ChangeStatus::UNCHANGED;
8886     if (!SimplifiedRHS.getValue())
8887       return indicatePessimisticFixpoint();
8888     RHS = *SimplifiedRHS;
8889 
8890     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8891       return indicatePessimisticFixpoint();
8892 
8893     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
8894                                                   UsedAssumedInformation);
8895 
8896     // Check if we only need one operand.
8897     bool OnlyLeft = false, OnlyRight = false;
8898     if (C.hasValue() && *C && (*C)->isOneValue())
8899       OnlyLeft = true;
8900     else if (C.hasValue() && *C && (*C)->isZeroValue())
8901       OnlyRight = true;
8902 
8903     const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr;
8904     if (!OnlyRight) {
8905       LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8906                                              DepClassTy::REQUIRED);
8907       if (!LHSAA->isValidState())
8908         return indicatePessimisticFixpoint();
8909     }
8910     if (!OnlyLeft) {
8911       RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8912                                              DepClassTy::REQUIRED);
8913       if (!RHSAA->isValidState())
8914         return indicatePessimisticFixpoint();
8915     }
8916 
8917     if (!LHSAA || !RHSAA) {
8918       // select (true/false), lhs, rhs
8919       auto *OpAA = LHSAA ? LHSAA : RHSAA;
8920 
8921       if (OpAA->undefIsContained())
8922         unionAssumedWithUndef();
8923       else
8924         unionAssumed(*OpAA);
8925 
8926     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
8927       // select i1 *, undef , undef => undef
8928       unionAssumedWithUndef();
8929     } else {
8930       unionAssumed(*LHSAA);
8931       unionAssumed(*RHSAA);
8932     }
8933     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8934                                          : ChangeStatus::CHANGED;
8935   }
8936 
8937   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
8938     auto AssumedBefore = getAssumed();
8939     if (!CI->isIntegerCast())
8940       return indicatePessimisticFixpoint();
8941     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
8942     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
8943     Value *Src = CI->getOperand(0);
8944 
8945     // Simplify the operand first.
8946     bool UsedAssumedInformation = false;
8947     const auto &SimplifiedSrc =
8948         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
8949                                *this, UsedAssumedInformation);
8950     if (!SimplifiedSrc.hasValue())
8951       return ChangeStatus::UNCHANGED;
8952     if (!SimplifiedSrc.getValue())
8953       return indicatePessimisticFixpoint();
8954     Src = *SimplifiedSrc;
8955 
8956     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
8957                                                 DepClassTy::REQUIRED);
8958     if (!SrcAA.isValidState())
8959       return indicatePessimisticFixpoint();
8960     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
8961     if (SrcAA.undefIsContained())
8962       unionAssumedWithUndef();
8963     else {
8964       for (const APInt &S : SrcAAPVS) {
8965         APInt T = calculateCastInst(CI, S, ResultBitWidth);
8966         unionAssumed(T);
8967       }
8968     }
8969     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8970                                          : ChangeStatus::CHANGED;
8971   }
8972 
8973   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
8974     auto AssumedBefore = getAssumed();
8975     Value *LHS = BinOp->getOperand(0);
8976     Value *RHS = BinOp->getOperand(1);
8977 
8978     // Simplify the operands first.
8979     bool UsedAssumedInformation = false;
8980     const auto &SimplifiedLHS =
8981         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8982                                *this, UsedAssumedInformation);
8983     if (!SimplifiedLHS.hasValue())
8984       return ChangeStatus::UNCHANGED;
8985     if (!SimplifiedLHS.getValue())
8986       return indicatePessimisticFixpoint();
8987     LHS = *SimplifiedLHS;
8988 
8989     const auto &SimplifiedRHS =
8990         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8991                                *this, UsedAssumedInformation);
8992     if (!SimplifiedRHS.hasValue())
8993       return ChangeStatus::UNCHANGED;
8994     if (!SimplifiedRHS.getValue())
8995       return indicatePessimisticFixpoint();
8996     RHS = *SimplifiedRHS;
8997 
8998     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8999       return indicatePessimisticFixpoint();
9000 
9001     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
9002                                                 DepClassTy::REQUIRED);
9003     if (!LHSAA.isValidState())
9004       return indicatePessimisticFixpoint();
9005 
9006     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
9007                                                 DepClassTy::REQUIRED);
9008     if (!RHSAA.isValidState())
9009       return indicatePessimisticFixpoint();
9010 
9011     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
9012     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
9013     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9014 
9015     // TODO: make use of undef flag to limit potential values aggressively.
9016     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9017       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9018         return indicatePessimisticFixpoint();
9019     } else if (LHSAA.undefIsContained()) {
9020       for (const APInt &R : RHSAAPVS) {
9021         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9022           return indicatePessimisticFixpoint();
9023       }
9024     } else if (RHSAA.undefIsContained()) {
9025       for (const APInt &L : LHSAAPVS) {
9026         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9027           return indicatePessimisticFixpoint();
9028       }
9029     } else {
9030       for (const APInt &L : LHSAAPVS) {
9031         for (const APInt &R : RHSAAPVS) {
9032           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9033             return indicatePessimisticFixpoint();
9034         }
9035       }
9036     }
9037     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9038                                          : ChangeStatus::CHANGED;
9039   }
9040 
9041   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
9042     auto AssumedBefore = getAssumed();
9043     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
9044       Value *IncomingValue = PHI->getIncomingValue(u);
9045 
9046       // Simplify the operand first.
9047       bool UsedAssumedInformation = false;
9048       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
9049           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
9050           UsedAssumedInformation);
9051       if (!SimplifiedIncomingValue.hasValue())
9052         continue;
9053       if (!SimplifiedIncomingValue.getValue())
9054         return indicatePessimisticFixpoint();
9055       IncomingValue = *SimplifiedIncomingValue;
9056 
9057       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
9058           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
9059       if (!PotentialValuesAA.isValidState())
9060         return indicatePessimisticFixpoint();
9061       if (PotentialValuesAA.undefIsContained())
9062         unionAssumedWithUndef();
9063       else
9064         unionAssumed(PotentialValuesAA.getAssumed());
9065     }
9066     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9067                                          : ChangeStatus::CHANGED;
9068   }
9069 
9070   ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) {
9071     if (!L.getType()->isIntegerTy())
9072       return indicatePessimisticFixpoint();
9073 
9074     auto Union = [&](Value &V) {
9075       if (isa<UndefValue>(V)) {
9076         unionAssumedWithUndef();
9077         return true;
9078       }
9079       if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) {
9080         unionAssumed(CI->getValue());
9081         return true;
9082       }
9083       return false;
9084     };
9085     auto AssumedBefore = getAssumed();
9086 
9087     if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union))
9088       return indicatePessimisticFixpoint();
9089 
9090     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9091                                          : ChangeStatus::CHANGED;
9092   }
9093 
9094   /// See AbstractAttribute::updateImpl(...).
9095   ChangeStatus updateImpl(Attributor &A) override {
9096     Value &V = getAssociatedValue();
9097     Instruction *I = dyn_cast<Instruction>(&V);
9098 
9099     if (auto *ICI = dyn_cast<ICmpInst>(I))
9100       return updateWithICmpInst(A, ICI);
9101 
9102     if (auto *SI = dyn_cast<SelectInst>(I))
9103       return updateWithSelectInst(A, SI);
9104 
9105     if (auto *CI = dyn_cast<CastInst>(I))
9106       return updateWithCastInst(A, CI);
9107 
9108     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9109       return updateWithBinaryOperator(A, BinOp);
9110 
9111     if (auto *PHI = dyn_cast<PHINode>(I))
9112       return updateWithPHINode(A, PHI);
9113 
9114     if (auto *L = dyn_cast<LoadInst>(I))
9115       return updateWithLoad(A, *L);
9116 
9117     return indicatePessimisticFixpoint();
9118   }
9119 
9120   /// See AbstractAttribute::trackStatistics()
9121   void trackStatistics() const override {
9122     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9123   }
9124 };
9125 
9126 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
9127   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
9128       : AAPotentialValuesImpl(IRP, A) {}
9129 
9130   /// See AbstractAttribute::initialize(...).
9131   ChangeStatus updateImpl(Attributor &A) override {
9132     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
9133                      "not be called");
9134   }
9135 
9136   /// See AbstractAttribute::trackStatistics()
9137   void trackStatistics() const override {
9138     STATS_DECLTRACK_FN_ATTR(potential_values)
9139   }
9140 };
9141 
9142 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
9143   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
9144       : AAPotentialValuesFunction(IRP, A) {}
9145 
9146   /// See AbstractAttribute::trackStatistics()
9147   void trackStatistics() const override {
9148     STATS_DECLTRACK_CS_ATTR(potential_values)
9149   }
9150 };
9151 
9152 struct AAPotentialValuesCallSiteReturned
9153     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
9154   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
9155       : AACallSiteReturnedFromReturned<AAPotentialValues,
9156                                        AAPotentialValuesImpl>(IRP, A) {}
9157 
9158   /// See AbstractAttribute::trackStatistics()
9159   void trackStatistics() const override {
9160     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9161   }
9162 };
9163 
9164 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
9165   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
9166       : AAPotentialValuesFloating(IRP, A) {}
9167 
9168   /// See AbstractAttribute::initialize(..).
9169   void initialize(Attributor &A) override {
9170     AAPotentialValuesImpl::initialize(A);
9171     if (isAtFixpoint())
9172       return;
9173 
9174     Value &V = getAssociatedValue();
9175 
9176     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9177       unionAssumed(C->getValue());
9178       indicateOptimisticFixpoint();
9179       return;
9180     }
9181 
9182     if (isa<UndefValue>(&V)) {
9183       unionAssumedWithUndef();
9184       indicateOptimisticFixpoint();
9185       return;
9186     }
9187   }
9188 
9189   /// See AbstractAttribute::updateImpl(...).
9190   ChangeStatus updateImpl(Attributor &A) override {
9191     Value &V = getAssociatedValue();
9192     auto AssumedBefore = getAssumed();
9193     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
9194                                              DepClassTy::REQUIRED);
9195     const auto &S = AA.getAssumed();
9196     unionAssumed(S);
9197     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9198                                          : ChangeStatus::CHANGED;
9199   }
9200 
9201   /// See AbstractAttribute::trackStatistics()
9202   void trackStatistics() const override {
9203     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9204   }
9205 };
9206 
9207 /// ------------------------ NoUndef Attribute ---------------------------------
9208 struct AANoUndefImpl : AANoUndef {
9209   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9210 
9211   /// See AbstractAttribute::initialize(...).
9212   void initialize(Attributor &A) override {
9213     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9214       indicateOptimisticFixpoint();
9215       return;
9216     }
9217     Value &V = getAssociatedValue();
9218     if (isa<UndefValue>(V))
9219       indicatePessimisticFixpoint();
9220     else if (isa<FreezeInst>(V))
9221       indicateOptimisticFixpoint();
9222     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9223              isGuaranteedNotToBeUndefOrPoison(&V))
9224       indicateOptimisticFixpoint();
9225     else
9226       AANoUndef::initialize(A);
9227   }
9228 
9229   /// See followUsesInMBEC
9230   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9231                        AANoUndef::StateType &State) {
9232     const Value *UseV = U->get();
9233     const DominatorTree *DT = nullptr;
9234     AssumptionCache *AC = nullptr;
9235     InformationCache &InfoCache = A.getInfoCache();
9236     if (Function *F = getAnchorScope()) {
9237       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9238       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9239     }
9240     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9241     bool TrackUse = false;
9242     // Track use for instructions which must produce undef or poison bits when
9243     // at least one operand contains such bits.
9244     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9245       TrackUse = true;
9246     return TrackUse;
9247   }
9248 
9249   /// See AbstractAttribute::getAsStr().
9250   const std::string getAsStr() const override {
9251     return getAssumed() ? "noundef" : "may-undef-or-poison";
9252   }
9253 
9254   ChangeStatus manifest(Attributor &A) override {
9255     // We don't manifest noundef attribute for dead positions because the
9256     // associated values with dead positions would be replaced with undef
9257     // values.
9258     bool UsedAssumedInformation = false;
9259     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9260                         UsedAssumedInformation))
9261       return ChangeStatus::UNCHANGED;
9262     // A position whose simplified value does not have any value is
9263     // considered to be dead. We don't manifest noundef in such positions for
9264     // the same reason above.
9265     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9266              .hasValue())
9267       return ChangeStatus::UNCHANGED;
9268     return AANoUndef::manifest(A);
9269   }
9270 };
9271 
9272 struct AANoUndefFloating : public AANoUndefImpl {
9273   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9274       : AANoUndefImpl(IRP, A) {}
9275 
9276   /// See AbstractAttribute::initialize(...).
9277   void initialize(Attributor &A) override {
9278     AANoUndefImpl::initialize(A);
9279     if (!getState().isAtFixpoint())
9280       if (Instruction *CtxI = getCtxI())
9281         followUsesInMBEC(*this, A, getState(), *CtxI);
9282   }
9283 
9284   /// See AbstractAttribute::updateImpl(...).
9285   ChangeStatus updateImpl(Attributor &A) override {
9286     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9287                             AANoUndef::StateType &T, bool Stripped) -> bool {
9288       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9289                                              DepClassTy::REQUIRED);
9290       if (!Stripped && this == &AA) {
9291         T.indicatePessimisticFixpoint();
9292       } else {
9293         const AANoUndef::StateType &S =
9294             static_cast<const AANoUndef::StateType &>(AA.getState());
9295         T ^= S;
9296       }
9297       return T.isValidState();
9298     };
9299 
9300     StateType T;
9301     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9302                                           VisitValueCB, getCtxI()))
9303       return indicatePessimisticFixpoint();
9304 
9305     return clampStateAndIndicateChange(getState(), T);
9306   }
9307 
9308   /// See AbstractAttribute::trackStatistics()
9309   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9310 };
9311 
9312 struct AANoUndefReturned final
9313     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9314   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9315       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9316 
9317   /// See AbstractAttribute::trackStatistics()
9318   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9319 };
9320 
9321 struct AANoUndefArgument final
9322     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9323   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9324       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9325 
9326   /// See AbstractAttribute::trackStatistics()
9327   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9328 };
9329 
9330 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9331   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9332       : AANoUndefFloating(IRP, A) {}
9333 
9334   /// See AbstractAttribute::trackStatistics()
9335   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9336 };
9337 
9338 struct AANoUndefCallSiteReturned final
9339     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9340   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9341       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9342 
9343   /// See AbstractAttribute::trackStatistics()
9344   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9345 };
9346 
9347 struct AACallEdgesFunction : public AACallEdges {
9348   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9349       : AACallEdges(IRP, A) {}
9350 
9351   /// See AbstractAttribute::updateImpl(...).
9352   ChangeStatus updateImpl(Attributor &A) override {
9353     ChangeStatus Change = ChangeStatus::UNCHANGED;
9354     bool OldHasUnknownCallee = HasUnknownCallee;
9355     bool OldHasUnknownCalleeNonAsm = HasUnknownCalleeNonAsm;
9356 
9357     auto AddCalledFunction = [&](Function *Fn) {
9358       if (CalledFunctions.insert(Fn)) {
9359         Change = ChangeStatus::CHANGED;
9360         LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9361                           << "\n");
9362       }
9363     };
9364 
9365     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9366                           bool Stripped) -> bool {
9367       if (Function *Fn = dyn_cast<Function>(&V)) {
9368         AddCalledFunction(Fn);
9369       } else {
9370         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9371         HasUnknown = true;
9372         HasUnknownCalleeNonAsm = true;
9373       }
9374 
9375       // Explore all values.
9376       return true;
9377     };
9378 
9379     // Process any value that we might call.
9380     auto ProcessCalledOperand = [&](Value *V, Instruction *Ctx) {
9381       if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9382                                        HasUnknownCallee, VisitValue, nullptr,
9383                                        false)) {
9384         // If we haven't gone through all values, assume that there are unknown
9385         // callees.
9386         HasUnknownCallee = true;
9387         HasUnknownCalleeNonAsm = true;
9388       }
9389     };
9390 
9391     auto ProcessCallInst = [&](Instruction &Inst) {
9392       CallBase &CB = static_cast<CallBase &>(Inst);
9393       if (CB.isInlineAsm()) {
9394         HasUnknownCallee = true;
9395         return true;
9396       }
9397 
9398       // Process callee metadata if available.
9399       if (auto *MD = Inst.getMetadata(LLVMContext::MD_callees)) {
9400         for (auto &Op : MD->operands()) {
9401           Function *Callee = mdconst::extract_or_null<Function>(Op);
9402           if (Callee)
9403             AddCalledFunction(Callee);
9404         }
9405         // Callees metadata grantees that the called function is one of its
9406         // operands, So we are done.
9407         return true;
9408       }
9409 
9410       // The most simple case.
9411       ProcessCalledOperand(CB.getCalledOperand(), &Inst);
9412 
9413       // Process callback functions.
9414       SmallVector<const Use *, 4u> CallbackUses;
9415       AbstractCallSite::getCallbackUses(CB, CallbackUses);
9416       for (const Use *U : CallbackUses)
9417         ProcessCalledOperand(U->get(), &Inst);
9418 
9419       return true;
9420     };
9421 
9422     // Visit all callable instructions.
9423     bool UsedAssumedInformation = false;
9424     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9425                                            UsedAssumedInformation)) {
9426       // If we haven't looked at all call like instructions, assume that there
9427       // are unknown callees.
9428       HasUnknownCallee = true;
9429       HasUnknownCalleeNonAsm = true;
9430     }
9431 
9432     // Track changes.
9433     if (OldHasUnknownCallee != HasUnknownCallee ||
9434         OldHasUnknownCalleeNonAsm != HasUnknownCalleeNonAsm)
9435       Change = ChangeStatus::CHANGED;
9436 
9437     return Change;
9438   }
9439 
9440   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9441     return CalledFunctions;
9442   };
9443 
9444   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9445 
9446   virtual bool hasNonAsmUnknownCallee() const override {
9447     return HasUnknownCalleeNonAsm;
9448   }
9449 
9450   const std::string getAsStr() const override {
9451     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9452            std::to_string(CalledFunctions.size()) + "]";
9453   }
9454 
9455   void trackStatistics() const override {}
9456 
9457   /// Optimistic set of functions that might be called by this function.
9458   SetVector<Function *> CalledFunctions;
9459 
9460   /// Is there any call with a unknown callee.
9461   bool HasUnknownCallee = false;
9462 
9463   /// Is there any call with a unknown callee, excluding any inline asm.
9464   bool HasUnknownCalleeNonAsm = false;
9465 };
9466 
9467 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9468   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9469       : AAFunctionReachability(IRP, A) {}
9470 
9471   bool canReach(Attributor &A, Function *Fn) const override {
9472     // Assume that we can reach any function if we can reach a call with
9473     // unknown callee.
9474     if (CanReachUnknownCallee)
9475       return true;
9476 
9477     if (ReachableQueries.count(Fn))
9478       return true;
9479 
9480     if (UnreachableQueries.count(Fn))
9481       return false;
9482 
9483     const AACallEdges &AAEdges =
9484         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9485 
9486     const SetVector<Function *> &Edges = AAEdges.getOptimisticEdges();
9487     bool Result = checkIfReachable(A, Edges, Fn);
9488 
9489     // Attributor returns attributes as const, so this function has to be
9490     // const for users of this attribute to use it without having to do
9491     // a const_cast.
9492     // This is a hack for us to be able to cache queries.
9493     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9494 
9495     if (Result)
9496       NonConstThis->ReachableQueries.insert(Fn);
9497     else
9498       NonConstThis->UnreachableQueries.insert(Fn);
9499 
9500     return Result;
9501   }
9502 
9503   /// See AbstractAttribute::updateImpl(...).
9504   ChangeStatus updateImpl(Attributor &A) override {
9505     if (CanReachUnknownCallee)
9506       return ChangeStatus::UNCHANGED;
9507 
9508     const AACallEdges &AAEdges =
9509         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9510     const SetVector<Function *> &Edges = AAEdges.getOptimisticEdges();
9511     ChangeStatus Change = ChangeStatus::UNCHANGED;
9512 
9513     if (AAEdges.hasUnknownCallee()) {
9514       bool OldCanReachUnknown = CanReachUnknownCallee;
9515       CanReachUnknownCallee = true;
9516       return OldCanReachUnknown ? ChangeStatus::UNCHANGED
9517                                 : ChangeStatus::CHANGED;
9518     }
9519 
9520     // Check if any of the unreachable functions become reachable.
9521     for (auto Current = UnreachableQueries.begin();
9522          Current != UnreachableQueries.end();) {
9523       if (!checkIfReachable(A, Edges, *Current)) {
9524         Current++;
9525         continue;
9526       }
9527       ReachableQueries.insert(*Current);
9528       UnreachableQueries.erase(*Current++);
9529       Change = ChangeStatus::CHANGED;
9530     }
9531 
9532     return Change;
9533   }
9534 
9535   const std::string getAsStr() const override {
9536     size_t QueryCount = ReachableQueries.size() + UnreachableQueries.size();
9537 
9538     return "FunctionReachability [" + std::to_string(ReachableQueries.size()) +
9539            "," + std::to_string(QueryCount) + "]";
9540   }
9541 
9542   void trackStatistics() const override {}
9543 
9544 private:
9545   bool canReachUnknownCallee() const override { return CanReachUnknownCallee; }
9546 
9547   bool checkIfReachable(Attributor &A, const SetVector<Function *> &Edges,
9548                         Function *Fn) const {
9549     if (Edges.count(Fn))
9550       return true;
9551 
9552     for (Function *Edge : Edges) {
9553       // We don't need a dependency if the result is reachable.
9554       const AAFunctionReachability &EdgeReachability =
9555           A.getAAFor<AAFunctionReachability>(*this, IRPosition::function(*Edge),
9556                                              DepClassTy::NONE);
9557 
9558       if (EdgeReachability.canReach(A, Fn))
9559         return true;
9560     }
9561     for (Function *Fn : Edges)
9562       A.getAAFor<AAFunctionReachability>(*this, IRPosition::function(*Fn),
9563                                          DepClassTy::REQUIRED);
9564 
9565     return false;
9566   }
9567 
9568   /// Set of functions that we know for sure is reachable.
9569   SmallPtrSet<Function *, 8> ReachableQueries;
9570 
9571   /// Set of functions that are unreachable, but might become reachable.
9572   SmallPtrSet<Function *, 8> UnreachableQueries;
9573 
9574   /// If we can reach a function with a call to a unknown function we assume
9575   /// that we can reach any function.
9576   bool CanReachUnknownCallee = false;
9577 };
9578 
9579 } // namespace
9580 
9581 AACallGraphNode *AACallEdgeIterator::operator*() const {
9582   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
9583       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
9584 }
9585 
9586 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
9587 
9588 const char AAReturnedValues::ID = 0;
9589 const char AANoUnwind::ID = 0;
9590 const char AANoSync::ID = 0;
9591 const char AANoFree::ID = 0;
9592 const char AANonNull::ID = 0;
9593 const char AANoRecurse::ID = 0;
9594 const char AAWillReturn::ID = 0;
9595 const char AAUndefinedBehavior::ID = 0;
9596 const char AANoAlias::ID = 0;
9597 const char AAReachability::ID = 0;
9598 const char AANoReturn::ID = 0;
9599 const char AAIsDead::ID = 0;
9600 const char AADereferenceable::ID = 0;
9601 const char AAAlign::ID = 0;
9602 const char AANoCapture::ID = 0;
9603 const char AAValueSimplify::ID = 0;
9604 const char AAHeapToStack::ID = 0;
9605 const char AAPrivatizablePtr::ID = 0;
9606 const char AAMemoryBehavior::ID = 0;
9607 const char AAMemoryLocation::ID = 0;
9608 const char AAValueConstantRange::ID = 0;
9609 const char AAPotentialValues::ID = 0;
9610 const char AANoUndef::ID = 0;
9611 const char AACallEdges::ID = 0;
9612 const char AAFunctionReachability::ID = 0;
9613 const char AAPointerInfo::ID = 0;
9614 
9615 // Macro magic to create the static generator function for attributes that
9616 // follow the naming scheme.
9617 
9618 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
9619   case IRPosition::PK:                                                         \
9620     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
9621 
9622 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
9623   case IRPosition::PK:                                                         \
9624     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
9625     ++NumAAs;                                                                  \
9626     break;
9627 
9628 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
9629   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9630     CLASS *AA = nullptr;                                                       \
9631     switch (IRP.getPositionKind()) {                                           \
9632       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9633       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
9634       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
9635       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9636       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
9637       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
9638       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9639       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9640     }                                                                          \
9641     return *AA;                                                                \
9642   }
9643 
9644 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
9645   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9646     CLASS *AA = nullptr;                                                       \
9647     switch (IRP.getPositionKind()) {                                           \
9648       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9649       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
9650       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
9651       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9652       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9653       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
9654       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9655       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9656     }                                                                          \
9657     return *AA;                                                                \
9658   }
9659 
9660 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
9661   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9662     CLASS *AA = nullptr;                                                       \
9663     switch (IRP.getPositionKind()) {                                           \
9664       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9665       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9666       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9667       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9668       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9669       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
9670       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9671       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9672     }                                                                          \
9673     return *AA;                                                                \
9674   }
9675 
9676 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
9677   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9678     CLASS *AA = nullptr;                                                       \
9679     switch (IRP.getPositionKind()) {                                           \
9680       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9681       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
9682       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
9683       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9684       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
9685       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
9686       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
9687       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9688     }                                                                          \
9689     return *AA;                                                                \
9690   }
9691 
9692 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
9693   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9694     CLASS *AA = nullptr;                                                       \
9695     switch (IRP.getPositionKind()) {                                           \
9696       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9697       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9698       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9699       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9700       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9701       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9702       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9703       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9704     }                                                                          \
9705     return *AA;                                                                \
9706   }
9707 
9708 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
9709 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
9710 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
9711 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
9712 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
9713 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
9714 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
9715 
9716 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
9717 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
9718 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
9719 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
9720 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
9721 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
9722 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
9723 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
9724 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
9725 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
9726 
9727 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
9728 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
9729 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
9730 
9731 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
9732 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
9733 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
9734 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
9735 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
9736 
9737 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
9738 
9739 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
9740 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
9741 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
9742 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
9743 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
9744 #undef SWITCH_PK_CREATE
9745 #undef SWITCH_PK_INV
9746