1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/CaptureTracking.h"
19 #include "llvm/Analysis/LazyValueInfo.h"
20 #include "llvm/Analysis/MemoryBuiltins.h"
21 #include "llvm/Analysis/ScalarEvolution.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/IRBuilder.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/NoFolder.h"
26 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
27 #include "llvm/Transforms/Utils/Local.h"
28 
29 #include <cassert>
30 
31 using namespace llvm;
32 
33 #define DEBUG_TYPE "attributor"
34 
35 static cl::opt<bool> ManifestInternal(
36     "attributor-manifest-internal", cl::Hidden,
37     cl::desc("Manifest Attributor internal string attributes."),
38     cl::init(false));
39 
40 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
41                                        cl::Hidden);
42 
43 // Some helper macros to deal with statistics tracking.
44 //
45 // Usage:
46 // For simple IR attribute tracking overload trackStatistics in the abstract
47 // attribute and choose the right STATS_DECLTRACK_********* macro,
48 // e.g.,:
49 //  void trackStatistics() const override {
50 //    STATS_DECLTRACK_ARG_ATTR(returned)
51 //  }
52 // If there is a single "increment" side one can use the macro
53 // STATS_DECLTRACK with a custom message. If there are multiple increment
54 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
55 //
56 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
57   ("Number of " #TYPE " marked '" #NAME "'")
58 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
59 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
60 #define STATS_DECL(NAME, TYPE, MSG)                                            \
61   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
62 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
63 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
64   {                                                                            \
65     STATS_DECL(NAME, TYPE, MSG)                                                \
66     STATS_TRACK(NAME, TYPE)                                                    \
67   }
68 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
69   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
70 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
71   STATS_DECLTRACK(NAME, CSArguments,                                           \
72                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
73 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
74   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
75 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
76   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
77 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
78   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
79                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
80 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
81   STATS_DECLTRACK(NAME, CSReturn,                                              \
82                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
83 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
84   STATS_DECLTRACK(NAME, Floating,                                              \
85                   ("Number of floating values known to be '" #NAME "'"))
86 
87 // Specialization of the operator<< for abstract attributes subclasses. This
88 // disambiguates situations where multiple operators are applicable.
89 namespace llvm {
90 #define PIPE_OPERATOR(CLASS)                                                   \
91   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
92     return OS << static_cast<const AbstractAttribute &>(AA);                   \
93   }
94 
95 PIPE_OPERATOR(AAIsDead)
96 PIPE_OPERATOR(AANoUnwind)
97 PIPE_OPERATOR(AANoSync)
98 PIPE_OPERATOR(AANoRecurse)
99 PIPE_OPERATOR(AAWillReturn)
100 PIPE_OPERATOR(AANoReturn)
101 PIPE_OPERATOR(AAReturnedValues)
102 PIPE_OPERATOR(AANonNull)
103 PIPE_OPERATOR(AANoAlias)
104 PIPE_OPERATOR(AADereferenceable)
105 PIPE_OPERATOR(AAAlign)
106 PIPE_OPERATOR(AANoCapture)
107 PIPE_OPERATOR(AAValueSimplify)
108 PIPE_OPERATOR(AANoFree)
109 PIPE_OPERATOR(AAHeapToStack)
110 PIPE_OPERATOR(AAReachability)
111 PIPE_OPERATOR(AAMemoryBehavior)
112 PIPE_OPERATOR(AAMemoryLocation)
113 PIPE_OPERATOR(AAValueConstantRange)
114 PIPE_OPERATOR(AAPrivatizablePtr)
115 
116 #undef PIPE_OPERATOR
117 } // namespace llvm
118 
119 namespace {
120 
121 static Optional<ConstantInt *>
122 getAssumedConstantInt(Attributor &A, const Value &V,
123                       const AbstractAttribute &AA,
124                       bool &UsedAssumedInformation) {
125   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
126   if (C.hasValue())
127     return dyn_cast_or_null<ConstantInt>(C.getValue());
128   return llvm::None;
129 }
130 
131 /// Get pointer operand of memory accessing instruction. If \p I is
132 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
133 /// is set to false and the instruction is volatile, return nullptr.
134 static const Value *getPointerOperand(const Instruction *I,
135                                       bool AllowVolatile) {
136   if (auto *LI = dyn_cast<LoadInst>(I)) {
137     if (!AllowVolatile && LI->isVolatile())
138       return nullptr;
139     return LI->getPointerOperand();
140   }
141 
142   if (auto *SI = dyn_cast<StoreInst>(I)) {
143     if (!AllowVolatile && SI->isVolatile())
144       return nullptr;
145     return SI->getPointerOperand();
146   }
147 
148   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
149     if (!AllowVolatile && CXI->isVolatile())
150       return nullptr;
151     return CXI->getPointerOperand();
152   }
153 
154   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
155     if (!AllowVolatile && RMWI->isVolatile())
156       return nullptr;
157     return RMWI->getPointerOperand();
158   }
159 
160   return nullptr;
161 }
162 
163 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
164 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
165 /// getelement pointer instructions that traverse the natural type of \p Ptr if
166 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
167 /// through a cast to i8*.
168 ///
169 /// TODO: This could probably live somewhere more prominantly if it doesn't
170 ///       already exist.
171 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
172                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
173   assert(Offset >= 0 && "Negative offset not supported yet!");
174   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
175                     << "-bytes as " << *ResTy << "\n");
176 
177   // The initial type we are trying to traverse to get nice GEPs.
178   Type *Ty = Ptr->getType();
179 
180   SmallVector<Value *, 4> Indices;
181   std::string GEPName = Ptr->getName().str();
182   while (Offset) {
183     uint64_t Idx, Rem;
184 
185     if (auto *STy = dyn_cast<StructType>(Ty)) {
186       const StructLayout *SL = DL.getStructLayout(STy);
187       if (int64_t(SL->getSizeInBytes()) < Offset)
188         break;
189       Idx = SL->getElementContainingOffset(Offset);
190       assert(Idx < STy->getNumElements() && "Offset calculation error!");
191       Rem = Offset - SL->getElementOffset(Idx);
192       Ty = STy->getElementType(Idx);
193     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
194       Ty = PTy->getElementType();
195       if (!Ty->isSized())
196         break;
197       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
198       assert(ElementSize && "Expected type with size!");
199       Idx = Offset / ElementSize;
200       Rem = Offset % ElementSize;
201     } else {
202       // Non-aggregate type, we cast and make byte-wise progress now.
203       break;
204     }
205 
206     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
207                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
208 
209     GEPName += "." + std::to_string(Idx);
210     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
211     Offset = Rem;
212   }
213 
214   // Create a GEP if we collected indices above.
215   if (Indices.size())
216     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
217 
218   // If an offset is left we use byte-wise adjustment.
219   if (Offset) {
220     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
221     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
222                         GEPName + ".b" + Twine(Offset));
223   }
224 
225   // Ensure the result has the requested type.
226   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
227 
228   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
229   return Ptr;
230 }
231 
232 /// Recursively visit all values that might become \p IRP at some point. This
233 /// will be done by looking through cast instructions, selects, phis, and calls
234 /// with the "returned" attribute. Once we cannot look through the value any
235 /// further, the callback \p VisitValueCB is invoked and passed the current
236 /// value, the \p State, and a flag to indicate if we stripped anything.
237 /// Stripped means that we unpacked the value associated with \p IRP at least
238 /// once. Note that the value used for the callback may still be the value
239 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
240 /// we will never visit more values than specified by \p MaxValues.
241 template <typename AAType, typename StateTy>
242 static bool genericValueTraversal(
243     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
244     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
245         VisitValueCB,
246     const Instruction *CtxI, int MaxValues = 16,
247     function_ref<Value *(Value *)> StripCB = nullptr) {
248 
249   const AAIsDead *LivenessAA = nullptr;
250   if (IRP.getAnchorScope())
251     LivenessAA = &A.getAAFor<AAIsDead>(
252         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
253         /* TrackDependence */ false);
254   bool AnyDead = false;
255 
256   using Item = std::pair<Value *, const Instruction *>;
257   SmallSet<Item, 16> Visited;
258   SmallVector<Item, 16> Worklist;
259   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
260 
261   int Iteration = 0;
262   do {
263     Item I = Worklist.pop_back_val();
264     Value *V = I.first;
265     CtxI = I.second;
266     if (StripCB)
267       V = StripCB(V);
268 
269     // Check if we should process the current value. To prevent endless
270     // recursion keep a record of the values we followed!
271     if (!Visited.insert(I).second)
272       continue;
273 
274     // Make sure we limit the compile time for complex expressions.
275     if (Iteration++ >= MaxValues)
276       return false;
277 
278     // Explicitly look through calls with a "returned" attribute if we do
279     // not have a pointer as stripPointerCasts only works on them.
280     Value *NewV = nullptr;
281     if (V->getType()->isPointerTy()) {
282       NewV = V->stripPointerCasts();
283     } else {
284       auto *CB = dyn_cast<CallBase>(V);
285       if (CB && CB->getCalledFunction()) {
286         for (Argument &Arg : CB->getCalledFunction()->args())
287           if (Arg.hasReturnedAttr()) {
288             NewV = CB->getArgOperand(Arg.getArgNo());
289             break;
290           }
291       }
292     }
293     if (NewV && NewV != V) {
294       Worklist.push_back({NewV, CtxI});
295       continue;
296     }
297 
298     // Look through select instructions, visit both potential values.
299     if (auto *SI = dyn_cast<SelectInst>(V)) {
300       Worklist.push_back({SI->getTrueValue(), CtxI});
301       Worklist.push_back({SI->getFalseValue(), CtxI});
302       continue;
303     }
304 
305     // Look through phi nodes, visit all live operands.
306     if (auto *PHI = dyn_cast<PHINode>(V)) {
307       assert(LivenessAA &&
308              "Expected liveness in the presence of instructions!");
309       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
310         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
311         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
312                             LivenessAA,
313                             /* CheckBBLivenessOnly */ true)) {
314           AnyDead = true;
315           continue;
316         }
317         Worklist.push_back(
318             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
319       }
320       continue;
321     }
322 
323     // Once a leaf is reached we inform the user through the callback.
324     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
325       return false;
326   } while (!Worklist.empty());
327 
328   // If we actually used liveness information so we have to record a dependence.
329   if (AnyDead)
330     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
331 
332   // All values have been visited.
333   return true;
334 }
335 
336 static const Value *
337 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
338                                      const DataLayout &DL,
339                                      bool AllowNonInbounds = false) {
340   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
341   if (!Ptr)
342     return nullptr;
343 
344   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
345                                           AllowNonInbounds);
346 }
347 
348 /// Helper function to clamp a state \p S of type \p StateType with the
349 /// information in \p R and indicate/return if \p S did change (as-in update is
350 /// required to be run again).
351 template <typename StateType>
352 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
353   auto Assumed = S.getAssumed();
354   S ^= R;
355   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
356                                    : ChangeStatus::CHANGED;
357 }
358 
359 /// Clamp the information known for all returned values of a function
360 /// (identified by \p QueryingAA) into \p S.
361 template <typename AAType, typename StateType = typename AAType::StateType>
362 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
363                                      StateType &S) {
364   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
365                     << QueryingAA << " into " << S << "\n");
366 
367   assert((QueryingAA.getIRPosition().getPositionKind() ==
368               IRPosition::IRP_RETURNED ||
369           QueryingAA.getIRPosition().getPositionKind() ==
370               IRPosition::IRP_CALL_SITE_RETURNED) &&
371          "Can only clamp returned value states for a function returned or call "
372          "site returned position!");
373 
374   // Use an optional state as there might not be any return values and we want
375   // to join (IntegerState::operator&) the state of all there are.
376   Optional<StateType> T;
377 
378   // Callback for each possibly returned value.
379   auto CheckReturnValue = [&](Value &RV) -> bool {
380     const IRPosition &RVPos = IRPosition::value(RV);
381     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
382     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
383                       << " @ " << RVPos << "\n");
384     const StateType &AAS = static_cast<const StateType &>(AA.getState());
385     if (T.hasValue())
386       *T &= AAS;
387     else
388       T = AAS;
389     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
390                       << "\n");
391     return T->isValidState();
392   };
393 
394   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
395     S.indicatePessimisticFixpoint();
396   else if (T.hasValue())
397     S ^= *T;
398 }
399 
400 /// Helper class to compose two generic deduction
401 template <typename AAType, typename Base, typename StateType,
402           template <typename...> class F, template <typename...> class G>
403 struct AAComposeTwoGenericDeduction
404     : public F<AAType, G<AAType, Base, StateType>, StateType> {
405   AAComposeTwoGenericDeduction(const IRPosition &IRP)
406       : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
407 
408   void initialize(Attributor &A) override {
409     F<AAType, G<AAType, Base, StateType>, StateType>::initialize(A);
410     G<AAType, Base, StateType>::initialize(A);
411   }
412 
413   /// See AbstractAttribute::updateImpl(...).
414   ChangeStatus updateImpl(Attributor &A) override {
415     ChangeStatus ChangedF =
416         F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
417     ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
418     return ChangedF | ChangedG;
419   }
420 };
421 
422 /// Helper class for generic deduction: return value -> returned position.
423 template <typename AAType, typename Base,
424           typename StateType = typename Base::StateType>
425 struct AAReturnedFromReturnedValues : public Base {
426   AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
427 
428   /// See AbstractAttribute::updateImpl(...).
429   ChangeStatus updateImpl(Attributor &A) override {
430     StateType S(StateType::getBestState(this->getState()));
431     clampReturnedValueStates<AAType, StateType>(A, *this, S);
432     // TODO: If we know we visited all returned values, thus no are assumed
433     // dead, we can take the known information from the state T.
434     return clampStateAndIndicateChange<StateType>(this->getState(), S);
435   }
436 };
437 
438 /// Clamp the information known at all call sites for a given argument
439 /// (identified by \p QueryingAA) into \p S.
440 template <typename AAType, typename StateType = typename AAType::StateType>
441 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
442                                         StateType &S) {
443   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
444                     << QueryingAA << " into " << S << "\n");
445 
446   assert(QueryingAA.getIRPosition().getPositionKind() ==
447              IRPosition::IRP_ARGUMENT &&
448          "Can only clamp call site argument states for an argument position!");
449 
450   // Use an optional state as there might not be any return values and we want
451   // to join (IntegerState::operator&) the state of all there are.
452   Optional<StateType> T;
453 
454   // The argument number which is also the call site argument number.
455   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
456 
457   auto CallSiteCheck = [&](AbstractCallSite ACS) {
458     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
459     // Check if a coresponding argument was found or if it is on not associated
460     // (which can happen for callback calls).
461     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
462       return false;
463 
464     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
465     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
466                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
467     const StateType &AAS = static_cast<const StateType &>(AA.getState());
468     if (T.hasValue())
469       *T &= AAS;
470     else
471       T = AAS;
472     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
473                       << "\n");
474     return T->isValidState();
475   };
476 
477   bool AllCallSitesKnown;
478   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
479                               AllCallSitesKnown))
480     S.indicatePessimisticFixpoint();
481   else if (T.hasValue())
482     S ^= *T;
483 }
484 
485 /// Helper class for generic deduction: call site argument -> argument position.
486 template <typename AAType, typename Base,
487           typename StateType = typename AAType::StateType>
488 struct AAArgumentFromCallSiteArguments : public Base {
489   AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
490 
491   /// See AbstractAttribute::updateImpl(...).
492   ChangeStatus updateImpl(Attributor &A) override {
493     StateType S(StateType::getBestState(this->getState()));
494     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
495     // TODO: If we know we visited all incoming values, thus no are assumed
496     // dead, we can take the known information from the state T.
497     return clampStateAndIndicateChange<StateType>(this->getState(), S);
498   }
499 };
500 
501 /// Helper class for generic replication: function returned -> cs returned.
502 template <typename AAType, typename Base,
503           typename StateType = typename Base::StateType>
504 struct AACallSiteReturnedFromReturned : public Base {
505   AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
506 
507   /// See AbstractAttribute::updateImpl(...).
508   ChangeStatus updateImpl(Attributor &A) override {
509     assert(this->getIRPosition().getPositionKind() ==
510                IRPosition::IRP_CALL_SITE_RETURNED &&
511            "Can only wrap function returned positions for call site returned "
512            "positions!");
513     auto &S = this->getState();
514 
515     const Function *AssociatedFunction =
516         this->getIRPosition().getAssociatedFunction();
517     if (!AssociatedFunction)
518       return S.indicatePessimisticFixpoint();
519 
520     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
521     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
522     return clampStateAndIndicateChange(
523         S, static_cast<const StateType &>(AA.getState()));
524   }
525 };
526 
527 /// Helper class for generic deduction using must-be-executed-context
528 /// Base class is required to have `followUse` method.
529 
530 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
531 /// U - Underlying use.
532 /// I - The user of the \p U.
533 /// `followUse` returns true if the value should be tracked transitively.
534 
535 template <typename AAType, typename Base,
536           typename StateType = typename AAType::StateType>
537 struct AAFromMustBeExecutedContext : public Base {
538   AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
539 
540   void initialize(Attributor &A) override {
541     Base::initialize(A);
542     const IRPosition &IRP = this->getIRPosition();
543     Instruction *CtxI = IRP.getCtxI();
544 
545     if (!CtxI)
546       return;
547 
548     for (const Use &U : IRP.getAssociatedValue().uses())
549       Uses.insert(&U);
550   }
551 
552   /// Helper function to accumulate uses.
553   void followUsesInContext(Attributor &A,
554                            MustBeExecutedContextExplorer &Explorer,
555                            const Instruction *CtxI,
556                            SetVector<const Use *> &Uses, StateType &State) {
557     auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
558     for (unsigned u = 0; u < Uses.size(); ++u) {
559       const Use *U = Uses[u];
560       if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
561         bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
562         if (Found && Base::followUse(A, U, UserI, State))
563           for (const Use &Us : UserI->uses())
564             Uses.insert(&Us);
565       }
566     }
567   }
568 
569   /// See AbstractAttribute::updateImpl(...).
570   ChangeStatus updateImpl(Attributor &A) override {
571     auto BeforeState = this->getState();
572     auto &S = this->getState();
573     Instruction *CtxI = this->getIRPosition().getCtxI();
574     if (!CtxI)
575       return ChangeStatus::UNCHANGED;
576 
577     MustBeExecutedContextExplorer &Explorer =
578         A.getInfoCache().getMustBeExecutedContextExplorer();
579 
580     followUsesInContext(A, Explorer, CtxI, Uses, S);
581 
582     if (this->isAtFixpoint())
583       return ChangeStatus::CHANGED;
584 
585     SmallVector<const BranchInst *, 4> BrInsts;
586     auto Pred = [&](const Instruction *I) {
587       if (const BranchInst *Br = dyn_cast<BranchInst>(I))
588         if (Br->isConditional())
589           BrInsts.push_back(Br);
590       return true;
591     };
592 
593     // Here, accumulate conditional branch instructions in the context. We
594     // explore the child paths and collect the known states. The disjunction of
595     // those states can be merged to its own state. Let ParentState_i be a state
596     // to indicate the known information for an i-th branch instruction in the
597     // context. ChildStates are created for its successors respectively.
598     //
599     // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
600     // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
601     //      ...
602     // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
603     //
604     // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
605     //
606     // FIXME: Currently, recursive branches are not handled. For example, we
607     // can't deduce that ptr must be dereferenced in below function.
608     //
609     // void f(int a, int c, int *ptr) {
610     //    if(a)
611     //      if (b) {
612     //        *ptr = 0;
613     //      } else {
614     //        *ptr = 1;
615     //      }
616     //    else {
617     //      if (b) {
618     //        *ptr = 0;
619     //      } else {
620     //        *ptr = 1;
621     //      }
622     //    }
623     // }
624 
625     Explorer.checkForAllContext(CtxI, Pred);
626     for (const BranchInst *Br : BrInsts) {
627       StateType ParentState;
628 
629       // The known state of the parent state is a conjunction of children's
630       // known states so it is initialized with a best state.
631       ParentState.indicateOptimisticFixpoint();
632 
633       for (const BasicBlock *BB : Br->successors()) {
634         StateType ChildState;
635 
636         size_t BeforeSize = Uses.size();
637         followUsesInContext(A, Explorer, &BB->front(), Uses, ChildState);
638 
639         // Erase uses which only appear in the child.
640         for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
641           It = Uses.erase(It);
642 
643         ParentState &= ChildState;
644       }
645 
646       // Use only known state.
647       S += ParentState;
648     }
649 
650     return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
651   }
652 
653 private:
654   /// Container for (transitive) uses of the associated value.
655   SetVector<const Use *> Uses;
656 };
657 
658 template <typename AAType, typename Base,
659           typename StateType = typename AAType::StateType>
660 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
661     AAComposeTwoGenericDeduction<AAType, Base, StateType,
662                                  AAFromMustBeExecutedContext,
663                                  AAArgumentFromCallSiteArguments>;
664 
665 template <typename AAType, typename Base,
666           typename StateType = typename AAType::StateType>
667 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
668     AAComposeTwoGenericDeduction<AAType, Base, StateType,
669                                  AAFromMustBeExecutedContext,
670                                  AACallSiteReturnedFromReturned>;
671 
672 /// -----------------------NoUnwind Function Attribute--------------------------
673 
674 struct AANoUnwindImpl : AANoUnwind {
675   AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
676 
677   const std::string getAsStr() const override {
678     return getAssumed() ? "nounwind" : "may-unwind";
679   }
680 
681   /// See AbstractAttribute::updateImpl(...).
682   ChangeStatus updateImpl(Attributor &A) override {
683     auto Opcodes = {
684         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
685         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
686         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
687 
688     auto CheckForNoUnwind = [&](Instruction &I) {
689       if (!I.mayThrow())
690         return true;
691 
692       if (const auto *CB = dyn_cast<CallBase>(&I)) {
693         const auto &NoUnwindAA =
694             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB));
695         return NoUnwindAA.isAssumedNoUnwind();
696       }
697       return false;
698     };
699 
700     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
701       return indicatePessimisticFixpoint();
702 
703     return ChangeStatus::UNCHANGED;
704   }
705 };
706 
707 struct AANoUnwindFunction final : public AANoUnwindImpl {
708   AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
709 
710   /// See AbstractAttribute::trackStatistics()
711   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
712 };
713 
714 /// NoUnwind attribute deduction for a call sites.
715 struct AANoUnwindCallSite final : AANoUnwindImpl {
716   AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
717 
718   /// See AbstractAttribute::initialize(...).
719   void initialize(Attributor &A) override {
720     AANoUnwindImpl::initialize(A);
721     Function *F = getAssociatedFunction();
722     if (!F)
723       indicatePessimisticFixpoint();
724   }
725 
726   /// See AbstractAttribute::updateImpl(...).
727   ChangeStatus updateImpl(Attributor &A) override {
728     // TODO: Once we have call site specific value information we can provide
729     //       call site specific liveness information and then it makes
730     //       sense to specialize attributes for call sites arguments instead of
731     //       redirecting requests to the callee argument.
732     Function *F = getAssociatedFunction();
733     const IRPosition &FnPos = IRPosition::function(*F);
734     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
735     return clampStateAndIndicateChange(
736         getState(),
737         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
738   }
739 
740   /// See AbstractAttribute::trackStatistics()
741   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
742 };
743 
744 /// --------------------- Function Return Values -------------------------------
745 
746 /// "Attribute" that collects all potential returned values and the return
747 /// instructions that they arise from.
748 ///
749 /// If there is a unique returned value R, the manifest method will:
750 ///   - mark R with the "returned" attribute, if R is an argument.
751 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
752 
753   /// Mapping of values potentially returned by the associated function to the
754   /// return instructions that might return them.
755   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
756 
757   /// Mapping to remember the number of returned values for a call site such
758   /// that we can avoid updates if nothing changed.
759   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
760 
761   /// Set of unresolved calls returned by the associated function.
762   SmallSetVector<CallBase *, 4> UnresolvedCalls;
763 
764   /// State flags
765   ///
766   ///{
767   bool IsFixed = false;
768   bool IsValidState = true;
769   ///}
770 
771 public:
772   AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
773 
774   /// See AbstractAttribute::initialize(...).
775   void initialize(Attributor &A) override {
776     // Reset the state.
777     IsFixed = false;
778     IsValidState = true;
779     ReturnedValues.clear();
780 
781     Function *F = getAssociatedFunction();
782     if (!F) {
783       indicatePessimisticFixpoint();
784       return;
785     }
786     assert(!F->getReturnType()->isVoidTy() &&
787            "Did not expect a void return type!");
788 
789     // The map from instruction opcodes to those instructions in the function.
790     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
791 
792     // Look through all arguments, if one is marked as returned we are done.
793     for (Argument &Arg : F->args()) {
794       if (Arg.hasReturnedAttr()) {
795         auto &ReturnInstSet = ReturnedValues[&Arg];
796         for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
797           ReturnInstSet.insert(cast<ReturnInst>(RI));
798 
799         indicateOptimisticFixpoint();
800         return;
801       }
802     }
803 
804     if (!A.isFunctionIPOAmendable(*F))
805       indicatePessimisticFixpoint();
806   }
807 
808   /// See AbstractAttribute::manifest(...).
809   ChangeStatus manifest(Attributor &A) override;
810 
811   /// See AbstractAttribute::getState(...).
812   AbstractState &getState() override { return *this; }
813 
814   /// See AbstractAttribute::getState(...).
815   const AbstractState &getState() const override { return *this; }
816 
817   /// See AbstractAttribute::updateImpl(Attributor &A).
818   ChangeStatus updateImpl(Attributor &A) override;
819 
820   llvm::iterator_range<iterator> returned_values() override {
821     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
822   }
823 
824   llvm::iterator_range<const_iterator> returned_values() const override {
825     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
826   }
827 
828   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
829     return UnresolvedCalls;
830   }
831 
832   /// Return the number of potential return values, -1 if unknown.
833   size_t getNumReturnValues() const override {
834     return isValidState() ? ReturnedValues.size() : -1;
835   }
836 
837   /// Return an assumed unique return value if a single candidate is found. If
838   /// there cannot be one, return a nullptr. If it is not clear yet, return the
839   /// Optional::NoneType.
840   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
841 
842   /// See AbstractState::checkForAllReturnedValues(...).
843   bool checkForAllReturnedValuesAndReturnInsts(
844       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
845       const override;
846 
847   /// Pretty print the attribute similar to the IR representation.
848   const std::string getAsStr() const override;
849 
850   /// See AbstractState::isAtFixpoint().
851   bool isAtFixpoint() const override { return IsFixed; }
852 
853   /// See AbstractState::isValidState().
854   bool isValidState() const override { return IsValidState; }
855 
856   /// See AbstractState::indicateOptimisticFixpoint(...).
857   ChangeStatus indicateOptimisticFixpoint() override {
858     IsFixed = true;
859     return ChangeStatus::UNCHANGED;
860   }
861 
862   ChangeStatus indicatePessimisticFixpoint() override {
863     IsFixed = true;
864     IsValidState = false;
865     return ChangeStatus::CHANGED;
866   }
867 };
868 
869 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
870   ChangeStatus Changed = ChangeStatus::UNCHANGED;
871 
872   // Bookkeeping.
873   assert(isValidState());
874   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
875                   "Number of function with known return values");
876 
877   // Check if we have an assumed unique return value that we could manifest.
878   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
879 
880   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
881     return Changed;
882 
883   // Bookkeeping.
884   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
885                   "Number of function with unique return");
886 
887   // Callback to replace the uses of CB with the constant C.
888   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
889     if (CB.getNumUses() == 0)
890       return ChangeStatus::UNCHANGED;
891     if (A.changeValueAfterManifest(CB, C))
892       return ChangeStatus::CHANGED;
893     return ChangeStatus::UNCHANGED;
894   };
895 
896   // If the assumed unique return value is an argument, annotate it.
897   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
898     // TODO: This should be handled differently!
899     this->AnchorVal = UniqueRVArg;
900     this->KindOrArgNo = UniqueRVArg->getArgNo();
901     Changed = IRAttribute::manifest(A);
902   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
903     // We can replace the returned value with the unique returned constant.
904     Value &AnchorValue = getAnchorValue();
905     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
906       for (const Use &U : F->uses())
907         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
908           if (CB->isCallee(&U)) {
909             Constant *RVCCast =
910                 CB->getType() == RVC->getType()
911                     ? RVC
912                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
913             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
914           }
915     } else {
916       assert(isa<CallBase>(AnchorValue) &&
917              "Expcected a function or call base anchor!");
918       Constant *RVCCast =
919           AnchorValue.getType() == RVC->getType()
920               ? RVC
921               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
922       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
923     }
924     if (Changed == ChangeStatus::CHANGED)
925       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
926                       "Number of function returns replaced by constant return");
927   }
928 
929   return Changed;
930 }
931 
932 const std::string AAReturnedValuesImpl::getAsStr() const {
933   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
934          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
935          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
936 }
937 
938 Optional<Value *>
939 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
940   // If checkForAllReturnedValues provides a unique value, ignoring potential
941   // undef values that can also be present, it is assumed to be the actual
942   // return value and forwarded to the caller of this method. If there are
943   // multiple, a nullptr is returned indicating there cannot be a unique
944   // returned value.
945   Optional<Value *> UniqueRV;
946 
947   auto Pred = [&](Value &RV) -> bool {
948     // If we found a second returned value and neither the current nor the saved
949     // one is an undef, there is no unique returned value. Undefs are special
950     // since we can pretend they have any value.
951     if (UniqueRV.hasValue() && UniqueRV != &RV &&
952         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
953       UniqueRV = nullptr;
954       return false;
955     }
956 
957     // Do not overwrite a value with an undef.
958     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
959       UniqueRV = &RV;
960 
961     return true;
962   };
963 
964   if (!A.checkForAllReturnedValues(Pred, *this))
965     UniqueRV = nullptr;
966 
967   return UniqueRV;
968 }
969 
970 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
971     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
972     const {
973   if (!isValidState())
974     return false;
975 
976   // Check all returned values but ignore call sites as long as we have not
977   // encountered an overdefined one during an update.
978   for (auto &It : ReturnedValues) {
979     Value *RV = It.first;
980 
981     CallBase *CB = dyn_cast<CallBase>(RV);
982     if (CB && !UnresolvedCalls.count(CB))
983       continue;
984 
985     if (!Pred(*RV, It.second))
986       return false;
987   }
988 
989   return true;
990 }
991 
992 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
993   size_t NumUnresolvedCalls = UnresolvedCalls.size();
994   bool Changed = false;
995 
996   // State used in the value traversals starting in returned values.
997   struct RVState {
998     // The map in which we collect return values -> return instrs.
999     decltype(ReturnedValues) &RetValsMap;
1000     // The flag to indicate a change.
1001     bool &Changed;
1002     // The return instrs we come from.
1003     SmallSetVector<ReturnInst *, 4> RetInsts;
1004   };
1005 
1006   // Callback for a leaf value returned by the associated function.
1007   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1008                          bool) -> bool {
1009     auto Size = RVS.RetValsMap[&Val].size();
1010     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1011     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1012     RVS.Changed |= Inserted;
1013     LLVM_DEBUG({
1014       if (Inserted)
1015         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1016                << " => " << RVS.RetInsts.size() << "\n";
1017     });
1018     return true;
1019   };
1020 
1021   // Helper method to invoke the generic value traversal.
1022   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1023                                 const Instruction *CtxI) {
1024     IRPosition RetValPos = IRPosition::value(RV);
1025     return genericValueTraversal<AAReturnedValues, RVState>(
1026         A, RetValPos, *this, RVS, VisitValueCB, CtxI);
1027   };
1028 
1029   // Callback for all "return intructions" live in the associated function.
1030   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1031     ReturnInst &Ret = cast<ReturnInst>(I);
1032     RVState RVS({ReturnedValues, Changed, {}});
1033     RVS.RetInsts.insert(&Ret);
1034     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1035   };
1036 
1037   // Start by discovering returned values from all live returned instructions in
1038   // the associated function.
1039   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1040     return indicatePessimisticFixpoint();
1041 
1042   // Once returned values "directly" present in the code are handled we try to
1043   // resolve returned calls.
1044   decltype(ReturnedValues) NewRVsMap;
1045   for (auto &It : ReturnedValues) {
1046     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1047                       << " by #" << It.second.size() << " RIs\n");
1048     CallBase *CB = dyn_cast<CallBase>(It.first);
1049     if (!CB || UnresolvedCalls.count(CB))
1050       continue;
1051 
1052     if (!CB->getCalledFunction()) {
1053       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1054                         << "\n");
1055       UnresolvedCalls.insert(CB);
1056       continue;
1057     }
1058 
1059     // TODO: use the function scope once we have call site AAReturnedValues.
1060     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1061         *this, IRPosition::function(*CB->getCalledFunction()));
1062     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1063                       << RetValAA << "\n");
1064 
1065     // Skip dead ends, thus if we do not know anything about the returned
1066     // call we mark it as unresolved and it will stay that way.
1067     if (!RetValAA.getState().isValidState()) {
1068       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1069                         << "\n");
1070       UnresolvedCalls.insert(CB);
1071       continue;
1072     }
1073 
1074     // Do not try to learn partial information. If the callee has unresolved
1075     // return values we will treat the call as unresolved/opaque.
1076     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1077     if (!RetValAAUnresolvedCalls.empty()) {
1078       UnresolvedCalls.insert(CB);
1079       continue;
1080     }
1081 
1082     // Now check if we can track transitively returned values. If possible, thus
1083     // if all return value can be represented in the current scope, do so.
1084     bool Unresolved = false;
1085     for (auto &RetValAAIt : RetValAA.returned_values()) {
1086       Value *RetVal = RetValAAIt.first;
1087       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1088           isa<Constant>(RetVal))
1089         continue;
1090       // Anything that did not fit in the above categories cannot be resolved,
1091       // mark the call as unresolved.
1092       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1093                            "cannot be translated: "
1094                         << *RetVal << "\n");
1095       UnresolvedCalls.insert(CB);
1096       Unresolved = true;
1097       break;
1098     }
1099 
1100     if (Unresolved)
1101       continue;
1102 
1103     // Now track transitively returned values.
1104     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1105     if (NumRetAA == RetValAA.getNumReturnValues()) {
1106       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1107                            "changed since it was seen last\n");
1108       continue;
1109     }
1110     NumRetAA = RetValAA.getNumReturnValues();
1111 
1112     for (auto &RetValAAIt : RetValAA.returned_values()) {
1113       Value *RetVal = RetValAAIt.first;
1114       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1115         // Arguments are mapped to call site operands and we begin the traversal
1116         // again.
1117         bool Unused = false;
1118         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1119         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1120         continue;
1121       } else if (isa<CallBase>(RetVal)) {
1122         // Call sites are resolved by the callee attribute over time, no need to
1123         // do anything for us.
1124         continue;
1125       } else if (isa<Constant>(RetVal)) {
1126         // Constants are valid everywhere, we can simply take them.
1127         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1128         continue;
1129       }
1130     }
1131   }
1132 
1133   // To avoid modifications to the ReturnedValues map while we iterate over it
1134   // we kept record of potential new entries in a copy map, NewRVsMap.
1135   for (auto &It : NewRVsMap) {
1136     assert(!It.second.empty() && "Entry does not add anything.");
1137     auto &ReturnInsts = ReturnedValues[It.first];
1138     for (ReturnInst *RI : It.second)
1139       if (ReturnInsts.insert(RI)) {
1140         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1141                           << *It.first << " => " << *RI << "\n");
1142         Changed = true;
1143       }
1144   }
1145 
1146   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1147   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1148 }
1149 
1150 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1151   AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1152 
1153   /// See AbstractAttribute::trackStatistics()
1154   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1155 };
1156 
1157 /// Returned values information for a call sites.
1158 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1159   AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1160 
1161   /// See AbstractAttribute::initialize(...).
1162   void initialize(Attributor &A) override {
1163     // TODO: Once we have call site specific value information we can provide
1164     //       call site specific liveness information and then it makes
1165     //       sense to specialize attributes for call sites instead of
1166     //       redirecting requests to the callee.
1167     llvm_unreachable("Abstract attributes for returned values are not "
1168                      "supported for call sites yet!");
1169   }
1170 
1171   /// See AbstractAttribute::updateImpl(...).
1172   ChangeStatus updateImpl(Attributor &A) override {
1173     return indicatePessimisticFixpoint();
1174   }
1175 
1176   /// See AbstractAttribute::trackStatistics()
1177   void trackStatistics() const override {}
1178 };
1179 
1180 /// ------------------------ NoSync Function Attribute -------------------------
1181 
1182 struct AANoSyncImpl : AANoSync {
1183   AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1184 
1185   const std::string getAsStr() const override {
1186     return getAssumed() ? "nosync" : "may-sync";
1187   }
1188 
1189   /// See AbstractAttribute::updateImpl(...).
1190   ChangeStatus updateImpl(Attributor &A) override;
1191 
1192   /// Helper function used to determine whether an instruction is non-relaxed
1193   /// atomic. In other words, if an atomic instruction does not have unordered
1194   /// or monotonic ordering
1195   static bool isNonRelaxedAtomic(Instruction *I);
1196 
1197   /// Helper function used to determine whether an instruction is volatile.
1198   static bool isVolatile(Instruction *I);
1199 
1200   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1201   /// memset).
1202   static bool isNoSyncIntrinsic(Instruction *I);
1203 };
1204 
1205 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1206   if (!I->isAtomic())
1207     return false;
1208 
1209   AtomicOrdering Ordering;
1210   switch (I->getOpcode()) {
1211   case Instruction::AtomicRMW:
1212     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1213     break;
1214   case Instruction::Store:
1215     Ordering = cast<StoreInst>(I)->getOrdering();
1216     break;
1217   case Instruction::Load:
1218     Ordering = cast<LoadInst>(I)->getOrdering();
1219     break;
1220   case Instruction::Fence: {
1221     auto *FI = cast<FenceInst>(I);
1222     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1223       return false;
1224     Ordering = FI->getOrdering();
1225     break;
1226   }
1227   case Instruction::AtomicCmpXchg: {
1228     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1229     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1230     // Only if both are relaxed, than it can be treated as relaxed.
1231     // Otherwise it is non-relaxed.
1232     if (Success != AtomicOrdering::Unordered &&
1233         Success != AtomicOrdering::Monotonic)
1234       return true;
1235     if (Failure != AtomicOrdering::Unordered &&
1236         Failure != AtomicOrdering::Monotonic)
1237       return true;
1238     return false;
1239   }
1240   default:
1241     llvm_unreachable(
1242         "New atomic operations need to be known in the attributor.");
1243   }
1244 
1245   // Relaxed.
1246   if (Ordering == AtomicOrdering::Unordered ||
1247       Ordering == AtomicOrdering::Monotonic)
1248     return false;
1249   return true;
1250 }
1251 
1252 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1253 /// FIXME: We should ipmrove the handling of intrinsics.
1254 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1255   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1256     switch (II->getIntrinsicID()) {
1257     /// Element wise atomic memory intrinsics are can only be unordered,
1258     /// therefore nosync.
1259     case Intrinsic::memset_element_unordered_atomic:
1260     case Intrinsic::memmove_element_unordered_atomic:
1261     case Intrinsic::memcpy_element_unordered_atomic:
1262       return true;
1263     case Intrinsic::memset:
1264     case Intrinsic::memmove:
1265     case Intrinsic::memcpy:
1266       if (!cast<MemIntrinsic>(II)->isVolatile())
1267         return true;
1268       return false;
1269     default:
1270       return false;
1271     }
1272   }
1273   return false;
1274 }
1275 
1276 bool AANoSyncImpl::isVolatile(Instruction *I) {
1277   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1278 
1279   switch (I->getOpcode()) {
1280   case Instruction::AtomicRMW:
1281     return cast<AtomicRMWInst>(I)->isVolatile();
1282   case Instruction::Store:
1283     return cast<StoreInst>(I)->isVolatile();
1284   case Instruction::Load:
1285     return cast<LoadInst>(I)->isVolatile();
1286   case Instruction::AtomicCmpXchg:
1287     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1288   default:
1289     return false;
1290   }
1291 }
1292 
1293 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1294 
1295   auto CheckRWInstForNoSync = [&](Instruction &I) {
1296     /// We are looking for volatile instructions or Non-Relaxed atomics.
1297     /// FIXME: We should improve the handling of intrinsics.
1298 
1299     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1300       return true;
1301 
1302     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1303       if (CB->hasFnAttr(Attribute::NoSync))
1304         return true;
1305 
1306       const auto &NoSyncAA =
1307           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB));
1308       if (NoSyncAA.isAssumedNoSync())
1309         return true;
1310       return false;
1311     }
1312 
1313     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1314       return true;
1315 
1316     return false;
1317   };
1318 
1319   auto CheckForNoSync = [&](Instruction &I) {
1320     // At this point we handled all read/write effects and they are all
1321     // nosync, so they can be skipped.
1322     if (I.mayReadOrWriteMemory())
1323       return true;
1324 
1325     // non-convergent and readnone imply nosync.
1326     return !cast<CallBase>(I).isConvergent();
1327   };
1328 
1329   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1330       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1331     return indicatePessimisticFixpoint();
1332 
1333   return ChangeStatus::UNCHANGED;
1334 }
1335 
1336 struct AANoSyncFunction final : public AANoSyncImpl {
1337   AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1338 
1339   /// See AbstractAttribute::trackStatistics()
1340   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1341 };
1342 
1343 /// NoSync attribute deduction for a call sites.
1344 struct AANoSyncCallSite final : AANoSyncImpl {
1345   AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1346 
1347   /// See AbstractAttribute::initialize(...).
1348   void initialize(Attributor &A) override {
1349     AANoSyncImpl::initialize(A);
1350     Function *F = getAssociatedFunction();
1351     if (!F)
1352       indicatePessimisticFixpoint();
1353   }
1354 
1355   /// See AbstractAttribute::updateImpl(...).
1356   ChangeStatus updateImpl(Attributor &A) override {
1357     // TODO: Once we have call site specific value information we can provide
1358     //       call site specific liveness information and then it makes
1359     //       sense to specialize attributes for call sites arguments instead of
1360     //       redirecting requests to the callee argument.
1361     Function *F = getAssociatedFunction();
1362     const IRPosition &FnPos = IRPosition::function(*F);
1363     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1364     return clampStateAndIndicateChange(
1365         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1366   }
1367 
1368   /// See AbstractAttribute::trackStatistics()
1369   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1370 };
1371 
1372 /// ------------------------ No-Free Attributes ----------------------------
1373 
1374 struct AANoFreeImpl : public AANoFree {
1375   AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1376 
1377   /// See AbstractAttribute::updateImpl(...).
1378   ChangeStatus updateImpl(Attributor &A) override {
1379     auto CheckForNoFree = [&](Instruction &I) {
1380       const auto &CB = cast<CallBase>(I);
1381       if (CB.hasFnAttr(Attribute::NoFree))
1382         return true;
1383 
1384       const auto &NoFreeAA =
1385           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB));
1386       return NoFreeAA.isAssumedNoFree();
1387     };
1388 
1389     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1390       return indicatePessimisticFixpoint();
1391     return ChangeStatus::UNCHANGED;
1392   }
1393 
1394   /// See AbstractAttribute::getAsStr().
1395   const std::string getAsStr() const override {
1396     return getAssumed() ? "nofree" : "may-free";
1397   }
1398 };
1399 
1400 struct AANoFreeFunction final : public AANoFreeImpl {
1401   AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1402 
1403   /// See AbstractAttribute::trackStatistics()
1404   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1405 };
1406 
1407 /// NoFree attribute deduction for a call sites.
1408 struct AANoFreeCallSite final : AANoFreeImpl {
1409   AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1410 
1411   /// See AbstractAttribute::initialize(...).
1412   void initialize(Attributor &A) override {
1413     AANoFreeImpl::initialize(A);
1414     Function *F = getAssociatedFunction();
1415     if (!F)
1416       indicatePessimisticFixpoint();
1417   }
1418 
1419   /// See AbstractAttribute::updateImpl(...).
1420   ChangeStatus updateImpl(Attributor &A) override {
1421     // TODO: Once we have call site specific value information we can provide
1422     //       call site specific liveness information and then it makes
1423     //       sense to specialize attributes for call sites arguments instead of
1424     //       redirecting requests to the callee argument.
1425     Function *F = getAssociatedFunction();
1426     const IRPosition &FnPos = IRPosition::function(*F);
1427     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1428     return clampStateAndIndicateChange(
1429         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1430   }
1431 
1432   /// See AbstractAttribute::trackStatistics()
1433   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1434 };
1435 
1436 /// NoFree attribute for floating values.
1437 struct AANoFreeFloating : AANoFreeImpl {
1438   AANoFreeFloating(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1439 
1440   /// See AbstractAttribute::trackStatistics()
1441   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1442 
1443   /// See Abstract Attribute::updateImpl(...).
1444   ChangeStatus updateImpl(Attributor &A) override {
1445     const IRPosition &IRP = getIRPosition();
1446 
1447     const auto &NoFreeAA =
1448         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1449     if (NoFreeAA.isAssumedNoFree())
1450       return ChangeStatus::UNCHANGED;
1451 
1452     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1453     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1454       Instruction *UserI = cast<Instruction>(U.getUser());
1455       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1456         if (CB->isBundleOperand(&U))
1457           return false;
1458         if (!CB->isArgOperand(&U))
1459           return true;
1460         unsigned ArgNo = CB->getArgOperandNo(&U);
1461 
1462         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1463             *this, IRPosition::callsite_argument(*CB, ArgNo));
1464         return NoFreeArg.isAssumedNoFree();
1465       }
1466 
1467       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1468           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1469         Follow = true;
1470         return true;
1471       }
1472       if (isa<ReturnInst>(UserI))
1473         return true;
1474 
1475       // Unknown user.
1476       return false;
1477     };
1478     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1479       return indicatePessimisticFixpoint();
1480 
1481     return ChangeStatus::UNCHANGED;
1482   }
1483 };
1484 
1485 /// NoFree attribute for a call site argument.
1486 struct AANoFreeArgument final : AANoFreeFloating {
1487   AANoFreeArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1488 
1489   /// See AbstractAttribute::trackStatistics()
1490   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1491 };
1492 
1493 /// NoFree attribute for call site arguments.
1494 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1495   AANoFreeCallSiteArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1496 
1497   /// See AbstractAttribute::updateImpl(...).
1498   ChangeStatus updateImpl(Attributor &A) override {
1499     // TODO: Once we have call site specific value information we can provide
1500     //       call site specific liveness information and then it makes
1501     //       sense to specialize attributes for call sites arguments instead of
1502     //       redirecting requests to the callee argument.
1503     Argument *Arg = getAssociatedArgument();
1504     if (!Arg)
1505       return indicatePessimisticFixpoint();
1506     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1507     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1508     return clampStateAndIndicateChange(
1509         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1510   }
1511 
1512   /// See AbstractAttribute::trackStatistics()
1513   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1514 };
1515 
1516 /// NoFree attribute for function return value.
1517 struct AANoFreeReturned final : AANoFreeFloating {
1518   AANoFreeReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {
1519     llvm_unreachable("NoFree is not applicable to function returns!");
1520   }
1521 
1522   /// See AbstractAttribute::initialize(...).
1523   void initialize(Attributor &A) override {
1524     llvm_unreachable("NoFree is not applicable to function returns!");
1525   }
1526 
1527   /// See AbstractAttribute::updateImpl(...).
1528   ChangeStatus updateImpl(Attributor &A) override {
1529     llvm_unreachable("NoFree is not applicable to function returns!");
1530   }
1531 
1532   /// See AbstractAttribute::trackStatistics()
1533   void trackStatistics() const override {}
1534 };
1535 
1536 /// NoFree attribute deduction for a call site return value.
1537 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1538   AANoFreeCallSiteReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1539 
1540   ChangeStatus manifest(Attributor &A) override {
1541     return ChangeStatus::UNCHANGED;
1542   }
1543   /// See AbstractAttribute::trackStatistics()
1544   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1545 };
1546 
1547 /// ------------------------ NonNull Argument Attribute ------------------------
1548 static int64_t getKnownNonNullAndDerefBytesForUse(
1549     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1550     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1551   TrackUse = false;
1552 
1553   const Value *UseV = U->get();
1554   if (!UseV->getType()->isPointerTy())
1555     return 0;
1556 
1557   Type *PtrTy = UseV->getType();
1558   const Function *F = I->getFunction();
1559   bool NullPointerIsDefined =
1560       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1561   const DataLayout &DL = A.getInfoCache().getDL();
1562   if (const auto *CB = dyn_cast<CallBase>(I)) {
1563     if (CB->isBundleOperand(U))
1564       return 0;
1565 
1566     if (CB->isCallee(U)) {
1567       IsNonNull |= !NullPointerIsDefined;
1568       return 0;
1569     }
1570 
1571     unsigned ArgNo = CB->getArgOperandNo(U);
1572     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1573     // As long as we only use known information there is no need to track
1574     // dependences here.
1575     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1576                                                   /* TrackDependence */ false);
1577     IsNonNull |= DerefAA.isKnownNonNull();
1578     return DerefAA.getKnownDereferenceableBytes();
1579   }
1580 
1581   // We need to follow common pointer manipulation uses to the accesses they
1582   // feed into. We can try to be smart to avoid looking through things we do not
1583   // like for now, e.g., non-inbounds GEPs.
1584   if (isa<CastInst>(I)) {
1585     TrackUse = true;
1586     return 0;
1587   }
1588   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1589     if (GEP->hasAllConstantIndices()) {
1590       TrackUse = true;
1591       return 0;
1592     }
1593 
1594   int64_t Offset;
1595   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1596     if (Base == &AssociatedValue &&
1597         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1598       int64_t DerefBytes =
1599           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1600 
1601       IsNonNull |= !NullPointerIsDefined;
1602       return std::max(int64_t(0), DerefBytes);
1603     }
1604   }
1605 
1606   /// Corner case when an offset is 0.
1607   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1608           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1609     if (Offset == 0 && Base == &AssociatedValue &&
1610         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1611       int64_t DerefBytes =
1612           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1613       IsNonNull |= !NullPointerIsDefined;
1614       return std::max(int64_t(0), DerefBytes);
1615     }
1616   }
1617 
1618   return 0;
1619 }
1620 
1621 struct AANonNullImpl : AANonNull {
1622   AANonNullImpl(const IRPosition &IRP)
1623       : AANonNull(IRP),
1624         NullIsDefined(NullPointerIsDefined(
1625             getAnchorScope(),
1626             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1627 
1628   /// See AbstractAttribute::initialize(...).
1629   void initialize(Attributor &A) override {
1630     if (!NullIsDefined &&
1631         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1632                 /* IgnoreSubsumingPositions */ false, &A))
1633       indicateOptimisticFixpoint();
1634     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1635       indicatePessimisticFixpoint();
1636     else
1637       AANonNull::initialize(A);
1638   }
1639 
1640   /// See AAFromMustBeExecutedContext
1641   bool followUse(Attributor &A, const Use *U, const Instruction *I,
1642                  AANonNull::StateType &State) {
1643     bool IsNonNull = false;
1644     bool TrackUse = false;
1645     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1646                                        IsNonNull, TrackUse);
1647     State.setKnown(IsNonNull);
1648     return TrackUse;
1649   }
1650 
1651   /// See AbstractAttribute::getAsStr().
1652   const std::string getAsStr() const override {
1653     return getAssumed() ? "nonnull" : "may-null";
1654   }
1655 
1656   /// Flag to determine if the underlying value can be null and still allow
1657   /// valid accesses.
1658   const bool NullIsDefined;
1659 };
1660 
1661 /// NonNull attribute for a floating value.
1662 struct AANonNullFloating
1663     : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1664   using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1665   AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1666 
1667   /// See AbstractAttribute::updateImpl(...).
1668   ChangeStatus updateImpl(Attributor &A) override {
1669     ChangeStatus Change = Base::updateImpl(A);
1670     if (isKnownNonNull())
1671       return Change;
1672 
1673     if (!NullIsDefined) {
1674       const auto &DerefAA =
1675           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1676       if (DerefAA.getAssumedDereferenceableBytes())
1677         return Change;
1678     }
1679 
1680     const DataLayout &DL = A.getDataLayout();
1681 
1682     DominatorTree *DT = nullptr;
1683     AssumptionCache *AC = nullptr;
1684     InformationCache &InfoCache = A.getInfoCache();
1685     if (const Function *Fn = getAnchorScope()) {
1686       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1687       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1688     }
1689 
1690     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1691                             AANonNull::StateType &T, bool Stripped) -> bool {
1692       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1693       if (!Stripped && this == &AA) {
1694         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1695           T.indicatePessimisticFixpoint();
1696       } else {
1697         // Use abstract attribute information.
1698         const AANonNull::StateType &NS =
1699             static_cast<const AANonNull::StateType &>(AA.getState());
1700         T ^= NS;
1701       }
1702       return T.isValidState();
1703     };
1704 
1705     StateType T;
1706     if (!genericValueTraversal<AANonNull, StateType>(
1707             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1708       return indicatePessimisticFixpoint();
1709 
1710     return clampStateAndIndicateChange(getState(), T);
1711   }
1712 
1713   /// See AbstractAttribute::trackStatistics()
1714   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1715 };
1716 
1717 /// NonNull attribute for function return value.
1718 struct AANonNullReturned final
1719     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1720   AANonNullReturned(const IRPosition &IRP)
1721       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
1722 
1723   /// See AbstractAttribute::trackStatistics()
1724   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1725 };
1726 
1727 /// NonNull attribute for function argument.
1728 struct AANonNullArgument final
1729     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1730                                                               AANonNullImpl> {
1731   AANonNullArgument(const IRPosition &IRP)
1732       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1733                                                                 AANonNullImpl>(
1734             IRP) {}
1735 
1736   /// See AbstractAttribute::trackStatistics()
1737   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1738 };
1739 
1740 struct AANonNullCallSiteArgument final : AANonNullFloating {
1741   AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
1742 
1743   /// See AbstractAttribute::trackStatistics()
1744   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1745 };
1746 
1747 /// NonNull attribute for a call site return position.
1748 struct AANonNullCallSiteReturned final
1749     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1750                                                              AANonNullImpl> {
1751   AANonNullCallSiteReturned(const IRPosition &IRP)
1752       : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1753                                                                AANonNullImpl>(
1754             IRP) {}
1755 
1756   /// See AbstractAttribute::trackStatistics()
1757   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1758 };
1759 
1760 /// ------------------------ No-Recurse Attributes ----------------------------
1761 
1762 struct AANoRecurseImpl : public AANoRecurse {
1763   AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
1764 
1765   /// See AbstractAttribute::getAsStr()
1766   const std::string getAsStr() const override {
1767     return getAssumed() ? "norecurse" : "may-recurse";
1768   }
1769 };
1770 
1771 struct AANoRecurseFunction final : AANoRecurseImpl {
1772   AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1773 
1774   /// See AbstractAttribute::initialize(...).
1775   void initialize(Attributor &A) override {
1776     AANoRecurseImpl::initialize(A);
1777     if (const Function *F = getAnchorScope())
1778       if (A.getInfoCache().getSccSize(*F) != 1)
1779         indicatePessimisticFixpoint();
1780   }
1781 
1782   /// See AbstractAttribute::updateImpl(...).
1783   ChangeStatus updateImpl(Attributor &A) override {
1784 
1785     // If all live call sites are known to be no-recurse, we are as well.
1786     auto CallSitePred = [&](AbstractCallSite ACS) {
1787       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1788           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1789           /* TrackDependence */ false, DepClassTy::OPTIONAL);
1790       return NoRecurseAA.isKnownNoRecurse();
1791     };
1792     bool AllCallSitesKnown;
1793     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1794       // If we know all call sites and all are known no-recurse, we are done.
1795       // If all known call sites, which might not be all that exist, are known
1796       // to be no-recurse, we are not done but we can continue to assume
1797       // no-recurse. If one of the call sites we have not visited will become
1798       // live, another update is triggered.
1799       if (AllCallSitesKnown)
1800         indicateOptimisticFixpoint();
1801       return ChangeStatus::UNCHANGED;
1802     }
1803 
1804     // If the above check does not hold anymore we look at the calls.
1805     auto CheckForNoRecurse = [&](Instruction &I) {
1806       const auto &CB = cast<CallBase>(I);
1807       if (CB.hasFnAttr(Attribute::NoRecurse))
1808         return true;
1809 
1810       const auto &NoRecurseAA =
1811           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB));
1812       if (!NoRecurseAA.isAssumedNoRecurse())
1813         return false;
1814 
1815       // Recursion to the same function
1816       if (CB.getCalledFunction() == getAnchorScope())
1817         return false;
1818 
1819       return true;
1820     };
1821 
1822     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1823       return indicatePessimisticFixpoint();
1824     return ChangeStatus::UNCHANGED;
1825   }
1826 
1827   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1828 };
1829 
1830 /// NoRecurse attribute deduction for a call sites.
1831 struct AANoRecurseCallSite final : AANoRecurseImpl {
1832   AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1833 
1834   /// See AbstractAttribute::initialize(...).
1835   void initialize(Attributor &A) override {
1836     AANoRecurseImpl::initialize(A);
1837     Function *F = getAssociatedFunction();
1838     if (!F)
1839       indicatePessimisticFixpoint();
1840   }
1841 
1842   /// See AbstractAttribute::updateImpl(...).
1843   ChangeStatus updateImpl(Attributor &A) override {
1844     // TODO: Once we have call site specific value information we can provide
1845     //       call site specific liveness information and then it makes
1846     //       sense to specialize attributes for call sites arguments instead of
1847     //       redirecting requests to the callee argument.
1848     Function *F = getAssociatedFunction();
1849     const IRPosition &FnPos = IRPosition::function(*F);
1850     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1851     return clampStateAndIndicateChange(
1852         getState(),
1853         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1854   }
1855 
1856   /// See AbstractAttribute::trackStatistics()
1857   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1858 };
1859 
1860 /// -------------------- Undefined-Behavior Attributes ------------------------
1861 
1862 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1863   AAUndefinedBehaviorImpl(const IRPosition &IRP) : AAUndefinedBehavior(IRP) {}
1864 
1865   /// See AbstractAttribute::updateImpl(...).
1866   // through a pointer (i.e. also branches etc.)
1867   ChangeStatus updateImpl(Attributor &A) override {
1868     const size_t UBPrevSize = KnownUBInsts.size();
1869     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1870 
1871     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1872       // Skip instructions that are already saved.
1873       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1874         return true;
1875 
1876       // If we reach here, we know we have an instruction
1877       // that accesses memory through a pointer operand,
1878       // for which getPointerOperand() should give it to us.
1879       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1880       assert(PtrOp &&
1881              "Expected pointer operand of memory accessing instruction");
1882 
1883       // Either we stopped and the appropriate action was taken,
1884       // or we got back a simplified value to continue.
1885       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1886       if (!SimplifiedPtrOp.hasValue())
1887         return true;
1888       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1889 
1890       // A memory access through a pointer is considered UB
1891       // only if the pointer has constant null value.
1892       // TODO: Expand it to not only check constant values.
1893       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1894         AssumedNoUBInsts.insert(&I);
1895         return true;
1896       }
1897       const Type *PtrTy = PtrOpVal->getType();
1898 
1899       // Because we only consider instructions inside functions,
1900       // assume that a parent function exists.
1901       const Function *F = I.getFunction();
1902 
1903       // A memory access using constant null pointer is only considered UB
1904       // if null pointer is _not_ defined for the target platform.
1905       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1906         AssumedNoUBInsts.insert(&I);
1907       else
1908         KnownUBInsts.insert(&I);
1909       return true;
1910     };
1911 
1912     auto InspectBrInstForUB = [&](Instruction &I) {
1913       // A conditional branch instruction is considered UB if it has `undef`
1914       // condition.
1915 
1916       // Skip instructions that are already saved.
1917       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1918         return true;
1919 
1920       // We know we have a branch instruction.
1921       auto BrInst = cast<BranchInst>(&I);
1922 
1923       // Unconditional branches are never considered UB.
1924       if (BrInst->isUnconditional())
1925         return true;
1926 
1927       // Either we stopped and the appropriate action was taken,
1928       // or we got back a simplified value to continue.
1929       Optional<Value *> SimplifiedCond =
1930           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1931       if (!SimplifiedCond.hasValue())
1932         return true;
1933       AssumedNoUBInsts.insert(&I);
1934       return true;
1935     };
1936 
1937     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
1938                               {Instruction::Load, Instruction::Store,
1939                                Instruction::AtomicCmpXchg,
1940                                Instruction::AtomicRMW},
1941                               /* CheckBBLivenessOnly */ true);
1942     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
1943                               /* CheckBBLivenessOnly */ true);
1944     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
1945         UBPrevSize != KnownUBInsts.size())
1946       return ChangeStatus::CHANGED;
1947     return ChangeStatus::UNCHANGED;
1948   }
1949 
1950   bool isKnownToCauseUB(Instruction *I) const override {
1951     return KnownUBInsts.count(I);
1952   }
1953 
1954   bool isAssumedToCauseUB(Instruction *I) const override {
1955     // In simple words, if an instruction is not in the assumed to _not_
1956     // cause UB, then it is assumed UB (that includes those
1957     // in the KnownUBInsts set). The rest is boilerplate
1958     // is to ensure that it is one of the instructions we test
1959     // for UB.
1960 
1961     switch (I->getOpcode()) {
1962     case Instruction::Load:
1963     case Instruction::Store:
1964     case Instruction::AtomicCmpXchg:
1965     case Instruction::AtomicRMW:
1966       return !AssumedNoUBInsts.count(I);
1967     case Instruction::Br: {
1968       auto BrInst = cast<BranchInst>(I);
1969       if (BrInst->isUnconditional())
1970         return false;
1971       return !AssumedNoUBInsts.count(I);
1972     } break;
1973     default:
1974       return false;
1975     }
1976     return false;
1977   }
1978 
1979   ChangeStatus manifest(Attributor &A) override {
1980     if (KnownUBInsts.empty())
1981       return ChangeStatus::UNCHANGED;
1982     for (Instruction *I : KnownUBInsts)
1983       A.changeToUnreachableAfterManifest(I);
1984     return ChangeStatus::CHANGED;
1985   }
1986 
1987   /// See AbstractAttribute::getAsStr()
1988   const std::string getAsStr() const override {
1989     return getAssumed() ? "undefined-behavior" : "no-ub";
1990   }
1991 
1992   /// Note: The correctness of this analysis depends on the fact that the
1993   /// following 2 sets will stop changing after some point.
1994   /// "Change" here means that their size changes.
1995   /// The size of each set is monotonically increasing
1996   /// (we only add items to them) and it is upper bounded by the number of
1997   /// instructions in the processed function (we can never save more
1998   /// elements in either set than this number). Hence, at some point,
1999   /// they will stop increasing.
2000   /// Consequently, at some point, both sets will have stopped
2001   /// changing, effectively making the analysis reach a fixpoint.
2002 
2003   /// Note: These 2 sets are disjoint and an instruction can be considered
2004   /// one of 3 things:
2005   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2006   ///    the KnownUBInsts set.
2007   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2008   ///    has a reason to assume it).
2009   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2010   ///    could not find a reason to assume or prove that it can cause UB,
2011   ///    hence it assumes it doesn't. We have a set for these instructions
2012   ///    so that we don't reprocess them in every update.
2013   ///    Note however that instructions in this set may cause UB.
2014 
2015 protected:
2016   /// A set of all live instructions _known_ to cause UB.
2017   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2018 
2019 private:
2020   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2021   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2022 
2023   // Should be called on updates in which if we're processing an instruction
2024   // \p I that depends on a value \p V, one of the following has to happen:
2025   // - If the value is assumed, then stop.
2026   // - If the value is known but undef, then consider it UB.
2027   // - Otherwise, do specific processing with the simplified value.
2028   // We return None in the first 2 cases to signify that an appropriate
2029   // action was taken and the caller should stop.
2030   // Otherwise, we return the simplified value that the caller should
2031   // use for specific processing.
2032   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2033                                          Instruction *I) {
2034     const auto &ValueSimplifyAA =
2035         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2036     Optional<Value *> SimplifiedV =
2037         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2038     if (!ValueSimplifyAA.isKnown()) {
2039       // Don't depend on assumed values.
2040       return llvm::None;
2041     }
2042     if (!SimplifiedV.hasValue()) {
2043       // If it is known (which we tested above) but it doesn't have a value,
2044       // then we can assume `undef` and hence the instruction is UB.
2045       KnownUBInsts.insert(I);
2046       return llvm::None;
2047     }
2048     Value *Val = SimplifiedV.getValue();
2049     if (isa<UndefValue>(Val)) {
2050       KnownUBInsts.insert(I);
2051       return llvm::None;
2052     }
2053     return Val;
2054   }
2055 };
2056 
2057 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2058   AAUndefinedBehaviorFunction(const IRPosition &IRP)
2059       : AAUndefinedBehaviorImpl(IRP) {}
2060 
2061   /// See AbstractAttribute::trackStatistics()
2062   void trackStatistics() const override {
2063     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2064                "Number of instructions known to have UB");
2065     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2066         KnownUBInsts.size();
2067   }
2068 };
2069 
2070 /// ------------------------ Will-Return Attributes ----------------------------
2071 
2072 // Helper function that checks whether a function has any cycle which we don't
2073 // know if it is bounded or not.
2074 // Loops with maximum trip count are considered bounded, any other cycle not.
2075 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2076   ScalarEvolution *SE =
2077       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2078   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2079   // If either SCEV or LoopInfo is not available for the function then we assume
2080   // any cycle to be unbounded cycle.
2081   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2082   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2083   if (!SE || !LI) {
2084     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2085       if (SCCI.hasCycle())
2086         return true;
2087     return false;
2088   }
2089 
2090   // If there's irreducible control, the function may contain non-loop cycles.
2091   if (mayContainIrreducibleControl(F, LI))
2092     return true;
2093 
2094   // Any loop that does not have a max trip count is considered unbounded cycle.
2095   for (auto *L : LI->getLoopsInPreorder()) {
2096     if (!SE->getSmallConstantMaxTripCount(L))
2097       return true;
2098   }
2099   return false;
2100 }
2101 
2102 struct AAWillReturnImpl : public AAWillReturn {
2103   AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
2104 
2105   /// See AbstractAttribute::initialize(...).
2106   void initialize(Attributor &A) override {
2107     AAWillReturn::initialize(A);
2108 
2109     Function *F = getAnchorScope();
2110     if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A))
2111       indicatePessimisticFixpoint();
2112   }
2113 
2114   /// See AbstractAttribute::updateImpl(...).
2115   ChangeStatus updateImpl(Attributor &A) override {
2116     auto CheckForWillReturn = [&](Instruction &I) {
2117       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2118       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2119       if (WillReturnAA.isKnownWillReturn())
2120         return true;
2121       if (!WillReturnAA.isAssumedWillReturn())
2122         return false;
2123       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2124       return NoRecurseAA.isAssumedNoRecurse();
2125     };
2126 
2127     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2128       return indicatePessimisticFixpoint();
2129 
2130     return ChangeStatus::UNCHANGED;
2131   }
2132 
2133   /// See AbstractAttribute::getAsStr()
2134   const std::string getAsStr() const override {
2135     return getAssumed() ? "willreturn" : "may-noreturn";
2136   }
2137 };
2138 
2139 struct AAWillReturnFunction final : AAWillReturnImpl {
2140   AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2141 
2142   /// See AbstractAttribute::trackStatistics()
2143   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2144 };
2145 
2146 /// WillReturn attribute deduction for a call sites.
2147 struct AAWillReturnCallSite final : AAWillReturnImpl {
2148   AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2149 
2150   /// See AbstractAttribute::initialize(...).
2151   void initialize(Attributor &A) override {
2152     AAWillReturnImpl::initialize(A);
2153     Function *F = getAssociatedFunction();
2154     if (!F)
2155       indicatePessimisticFixpoint();
2156   }
2157 
2158   /// See AbstractAttribute::updateImpl(...).
2159   ChangeStatus updateImpl(Attributor &A) override {
2160     // TODO: Once we have call site specific value information we can provide
2161     //       call site specific liveness information and then it makes
2162     //       sense to specialize attributes for call sites arguments instead of
2163     //       redirecting requests to the callee argument.
2164     Function *F = getAssociatedFunction();
2165     const IRPosition &FnPos = IRPosition::function(*F);
2166     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2167     return clampStateAndIndicateChange(
2168         getState(),
2169         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2170   }
2171 
2172   /// See AbstractAttribute::trackStatistics()
2173   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2174 };
2175 
2176 /// -------------------AAReachability Attribute--------------------------
2177 
2178 struct AAReachabilityImpl : AAReachability {
2179   AAReachabilityImpl(const IRPosition &IRP) : AAReachability(IRP) {}
2180 
2181   const std::string getAsStr() const override {
2182     // TODO: Return the number of reachable queries.
2183     return "reachable";
2184   }
2185 
2186   /// See AbstractAttribute::initialize(...).
2187   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2188 
2189   /// See AbstractAttribute::updateImpl(...).
2190   ChangeStatus updateImpl(Attributor &A) override {
2191     return indicatePessimisticFixpoint();
2192   }
2193 };
2194 
2195 struct AAReachabilityFunction final : public AAReachabilityImpl {
2196   AAReachabilityFunction(const IRPosition &IRP) : AAReachabilityImpl(IRP) {}
2197 
2198   /// See AbstractAttribute::trackStatistics()
2199   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2200 };
2201 
2202 /// ------------------------ NoAlias Argument Attribute ------------------------
2203 
2204 struct AANoAliasImpl : AANoAlias {
2205   AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {
2206     assert(getAssociatedType()->isPointerTy() &&
2207            "Noalias is a pointer attribute");
2208   }
2209 
2210   const std::string getAsStr() const override {
2211     return getAssumed() ? "noalias" : "may-alias";
2212   }
2213 };
2214 
2215 /// NoAlias attribute for a floating value.
2216 struct AANoAliasFloating final : AANoAliasImpl {
2217   AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2218 
2219   /// See AbstractAttribute::initialize(...).
2220   void initialize(Attributor &A) override {
2221     AANoAliasImpl::initialize(A);
2222     Value *Val = &getAssociatedValue();
2223     do {
2224       CastInst *CI = dyn_cast<CastInst>(Val);
2225       if (!CI)
2226         break;
2227       Value *Base = CI->getOperand(0);
2228       if (Base->getNumUses() != 1)
2229         break;
2230       Val = Base;
2231     } while (true);
2232 
2233     if (!Val->getType()->isPointerTy()) {
2234       indicatePessimisticFixpoint();
2235       return;
2236     }
2237 
2238     if (isa<AllocaInst>(Val))
2239       indicateOptimisticFixpoint();
2240     else if (isa<ConstantPointerNull>(Val) &&
2241              !NullPointerIsDefined(getAnchorScope(),
2242                                    Val->getType()->getPointerAddressSpace()))
2243       indicateOptimisticFixpoint();
2244     else if (Val != &getAssociatedValue()) {
2245       const auto &ValNoAliasAA =
2246           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2247       if (ValNoAliasAA.isKnownNoAlias())
2248         indicateOptimisticFixpoint();
2249     }
2250   }
2251 
2252   /// See AbstractAttribute::updateImpl(...).
2253   ChangeStatus updateImpl(Attributor &A) override {
2254     // TODO: Implement this.
2255     return indicatePessimisticFixpoint();
2256   }
2257 
2258   /// See AbstractAttribute::trackStatistics()
2259   void trackStatistics() const override {
2260     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2261   }
2262 };
2263 
2264 /// NoAlias attribute for an argument.
2265 struct AANoAliasArgument final
2266     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2267   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2268   AANoAliasArgument(const IRPosition &IRP) : Base(IRP) {}
2269 
2270   /// See AbstractAttribute::initialize(...).
2271   void initialize(Attributor &A) override {
2272     Base::initialize(A);
2273     // See callsite argument attribute and callee argument attribute.
2274     if (hasAttr({Attribute::ByVal}))
2275       indicateOptimisticFixpoint();
2276   }
2277 
2278   /// See AbstractAttribute::update(...).
2279   ChangeStatus updateImpl(Attributor &A) override {
2280     // We have to make sure no-alias on the argument does not break
2281     // synchronization when this is a callback argument, see also [1] below.
2282     // If synchronization cannot be affected, we delegate to the base updateImpl
2283     // function, otherwise we give up for now.
2284 
2285     // If the function is no-sync, no-alias cannot break synchronization.
2286     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2287         *this, IRPosition::function_scope(getIRPosition()));
2288     if (NoSyncAA.isAssumedNoSync())
2289       return Base::updateImpl(A);
2290 
2291     // If the argument is read-only, no-alias cannot break synchronization.
2292     const auto &MemBehaviorAA =
2293         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2294     if (MemBehaviorAA.isAssumedReadOnly())
2295       return Base::updateImpl(A);
2296 
2297     // If the argument is never passed through callbacks, no-alias cannot break
2298     // synchronization.
2299     bool AllCallSitesKnown;
2300     if (A.checkForAllCallSites(
2301             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2302             true, AllCallSitesKnown))
2303       return Base::updateImpl(A);
2304 
2305     // TODO: add no-alias but make sure it doesn't break synchronization by
2306     // introducing fake uses. See:
2307     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2308     //     International Workshop on OpenMP 2018,
2309     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2310 
2311     return indicatePessimisticFixpoint();
2312   }
2313 
2314   /// See AbstractAttribute::trackStatistics()
2315   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2316 };
2317 
2318 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2319   AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2320 
2321   /// See AbstractAttribute::initialize(...).
2322   void initialize(Attributor &A) override {
2323     // See callsite argument attribute and callee argument attribute.
2324     const auto &CB = cast<CallBase>(getAnchorValue());
2325     if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias))
2326       indicateOptimisticFixpoint();
2327     Value &Val = getAssociatedValue();
2328     if (isa<ConstantPointerNull>(Val) &&
2329         !NullPointerIsDefined(getAnchorScope(),
2330                               Val.getType()->getPointerAddressSpace()))
2331       indicateOptimisticFixpoint();
2332   }
2333 
2334   /// Determine if the underlying value may alias with the call site argument
2335   /// \p OtherArgNo of \p ICS (= the underlying call site).
2336   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2337                             const AAMemoryBehavior &MemBehaviorAA,
2338                             const CallBase &CB, unsigned OtherArgNo) {
2339     // We do not need to worry about aliasing with the underlying IRP.
2340     if (this->getArgNo() == (int)OtherArgNo)
2341       return false;
2342 
2343     // If it is not a pointer or pointer vector we do not alias.
2344     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2345     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2346       return false;
2347 
2348     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2349         *this, IRPosition::callsite_argument(CB, OtherArgNo),
2350         /* TrackDependence */ false);
2351 
2352     // If the argument is readnone, there is no read-write aliasing.
2353     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2354       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2355       return false;
2356     }
2357 
2358     // If the argument is readonly and the underlying value is readonly, there
2359     // is no read-write aliasing.
2360     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2361     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2362       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2363       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2364       return false;
2365     }
2366 
2367     // We have to utilize actual alias analysis queries so we need the object.
2368     if (!AAR)
2369       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2370 
2371     // Try to rule it out at the call site.
2372     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2373     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2374                          "callsite arguments: "
2375                       << getAssociatedValue() << " " << *ArgOp << " => "
2376                       << (IsAliasing ? "" : "no-") << "alias \n");
2377 
2378     return IsAliasing;
2379   }
2380 
2381   bool
2382   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2383                                          const AAMemoryBehavior &MemBehaviorAA,
2384                                          const AANoAlias &NoAliasAA) {
2385     // We can deduce "noalias" if the following conditions hold.
2386     // (i)   Associated value is assumed to be noalias in the definition.
2387     // (ii)  Associated value is assumed to be no-capture in all the uses
2388     //       possibly executed before this callsite.
2389     // (iii) There is no other pointer argument which could alias with the
2390     //       value.
2391 
2392     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2393     if (!AssociatedValueIsNoAliasAtDef) {
2394       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2395                         << " is not no-alias at the definition\n");
2396       return false;
2397     }
2398 
2399     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2400 
2401     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2402     auto &NoCaptureAA =
2403         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2404     // Check whether the value is captured in the scope using AANoCapture.
2405     //      Look at CFG and check only uses possibly executed before this
2406     //      callsite.
2407     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2408       Instruction *UserI = cast<Instruction>(U.getUser());
2409 
2410       // If user if curr instr and only use.
2411       if ((UserI == getCtxI()) && (UserI->getNumUses() == 1))
2412         return true;
2413 
2414       const Function *ScopeFn = VIRP.getAnchorScope();
2415       if (ScopeFn) {
2416         const auto &ReachabilityAA =
2417             A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn));
2418 
2419         if (!ReachabilityAA.isAssumedReachable(UserI, getCtxI()))
2420           return true;
2421 
2422         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2423           if (CB->isArgOperand(&U)) {
2424 
2425             unsigned ArgNo = CB->getArgOperandNo(&U);
2426 
2427             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2428                 *this, IRPosition::callsite_argument(*CB, ArgNo));
2429 
2430             if (NoCaptureAA.isAssumedNoCapture())
2431               return true;
2432           }
2433         }
2434       }
2435 
2436       // For cases which can potentially have more users
2437       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2438           isa<SelectInst>(U)) {
2439         Follow = true;
2440         return true;
2441       }
2442 
2443       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2444       return false;
2445     };
2446 
2447     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2448       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2449         LLVM_DEBUG(
2450             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2451                    << " cannot be noalias as it is potentially captured\n");
2452         return false;
2453       }
2454     }
2455     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2456 
2457     // Check there is no other pointer argument which could alias with the
2458     // value passed at this call site.
2459     // TODO: AbstractCallSite
2460     const auto &CB = cast<CallBase>(getAnchorValue());
2461     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2462          OtherArgNo++)
2463       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2464         return false;
2465 
2466     return true;
2467   }
2468 
2469   /// See AbstractAttribute::updateImpl(...).
2470   ChangeStatus updateImpl(Attributor &A) override {
2471     // If the argument is readnone we are done as there are no accesses via the
2472     // argument.
2473     auto &MemBehaviorAA =
2474         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2475                                      /* TrackDependence */ false);
2476     if (MemBehaviorAA.isAssumedReadNone()) {
2477       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2478       return ChangeStatus::UNCHANGED;
2479     }
2480 
2481     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2482     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2483                                                   /* TrackDependence */ false);
2484 
2485     AAResults *AAR = nullptr;
2486     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2487                                                NoAliasAA)) {
2488       LLVM_DEBUG(
2489           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2490       return ChangeStatus::UNCHANGED;
2491     }
2492 
2493     return indicatePessimisticFixpoint();
2494   }
2495 
2496   /// See AbstractAttribute::trackStatistics()
2497   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2498 };
2499 
2500 /// NoAlias attribute for function return value.
2501 struct AANoAliasReturned final : AANoAliasImpl {
2502   AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2503 
2504   /// See AbstractAttribute::updateImpl(...).
2505   virtual ChangeStatus updateImpl(Attributor &A) override {
2506 
2507     auto CheckReturnValue = [&](Value &RV) -> bool {
2508       if (Constant *C = dyn_cast<Constant>(&RV))
2509         if (C->isNullValue() || isa<UndefValue>(C))
2510           return true;
2511 
2512       /// For now, we can only deduce noalias if we have call sites.
2513       /// FIXME: add more support.
2514       if (!isa<CallBase>(&RV))
2515         return false;
2516 
2517       const IRPosition &RVPos = IRPosition::value(RV);
2518       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2519       if (!NoAliasAA.isAssumedNoAlias())
2520         return false;
2521 
2522       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2523       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2524     };
2525 
2526     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2527       return indicatePessimisticFixpoint();
2528 
2529     return ChangeStatus::UNCHANGED;
2530   }
2531 
2532   /// See AbstractAttribute::trackStatistics()
2533   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2534 };
2535 
2536 /// NoAlias attribute deduction for a call site return value.
2537 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2538   AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2539 
2540   /// See AbstractAttribute::initialize(...).
2541   void initialize(Attributor &A) override {
2542     AANoAliasImpl::initialize(A);
2543     Function *F = getAssociatedFunction();
2544     if (!F)
2545       indicatePessimisticFixpoint();
2546   }
2547 
2548   /// See AbstractAttribute::updateImpl(...).
2549   ChangeStatus updateImpl(Attributor &A) override {
2550     // TODO: Once we have call site specific value information we can provide
2551     //       call site specific liveness information and then it makes
2552     //       sense to specialize attributes for call sites arguments instead of
2553     //       redirecting requests to the callee argument.
2554     Function *F = getAssociatedFunction();
2555     const IRPosition &FnPos = IRPosition::returned(*F);
2556     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2557     return clampStateAndIndicateChange(
2558         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2559   }
2560 
2561   /// See AbstractAttribute::trackStatistics()
2562   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2563 };
2564 
2565 /// -------------------AAIsDead Function Attribute-----------------------
2566 
2567 struct AAIsDeadValueImpl : public AAIsDead {
2568   AAIsDeadValueImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2569 
2570   /// See AAIsDead::isAssumedDead().
2571   bool isAssumedDead() const override { return getAssumed(); }
2572 
2573   /// See AAIsDead::isKnownDead().
2574   bool isKnownDead() const override { return getKnown(); }
2575 
2576   /// See AAIsDead::isAssumedDead(BasicBlock *).
2577   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2578 
2579   /// See AAIsDead::isKnownDead(BasicBlock *).
2580   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2581 
2582   /// See AAIsDead::isAssumedDead(Instruction *I).
2583   bool isAssumedDead(const Instruction *I) const override {
2584     return I == getCtxI() && isAssumedDead();
2585   }
2586 
2587   /// See AAIsDead::isKnownDead(Instruction *I).
2588   bool isKnownDead(const Instruction *I) const override {
2589     return isAssumedDead(I) && getKnown();
2590   }
2591 
2592   /// See AbstractAttribute::getAsStr().
2593   const std::string getAsStr() const override {
2594     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2595   }
2596 
2597   /// Check if all uses are assumed dead.
2598   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2599     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2600     // Explicitly set the dependence class to required because we want a long
2601     // chain of N dependent instructions to be considered live as soon as one is
2602     // without going through N update cycles. This is not required for
2603     // correctness.
2604     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2605   }
2606 
2607   /// Determine if \p I is assumed to be side-effect free.
2608   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2609     if (!I || wouldInstructionBeTriviallyDead(I))
2610       return true;
2611 
2612     auto *CB = dyn_cast<CallBase>(I);
2613     if (!CB || isa<IntrinsicInst>(CB))
2614       return false;
2615 
2616     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2617     const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, CallIRP);
2618     if (!NoUnwindAA.isAssumedNoUnwind())
2619       return false;
2620 
2621     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, CallIRP);
2622     if (!MemBehaviorAA.isAssumedReadOnly())
2623       return false;
2624 
2625     return true;
2626   }
2627 };
2628 
2629 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2630   AAIsDeadFloating(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2631 
2632   /// See AbstractAttribute::initialize(...).
2633   void initialize(Attributor &A) override {
2634     if (isa<UndefValue>(getAssociatedValue())) {
2635       indicatePessimisticFixpoint();
2636       return;
2637     }
2638 
2639     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2640     if (!isAssumedSideEffectFree(A, I))
2641       indicatePessimisticFixpoint();
2642   }
2643 
2644   /// See AbstractAttribute::updateImpl(...).
2645   ChangeStatus updateImpl(Attributor &A) override {
2646     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2647     if (!isAssumedSideEffectFree(A, I))
2648       return indicatePessimisticFixpoint();
2649 
2650     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2651       return indicatePessimisticFixpoint();
2652     return ChangeStatus::UNCHANGED;
2653   }
2654 
2655   /// See AbstractAttribute::manifest(...).
2656   ChangeStatus manifest(Attributor &A) override {
2657     Value &V = getAssociatedValue();
2658     if (auto *I = dyn_cast<Instruction>(&V)) {
2659       // If we get here we basically know the users are all dead. We check if
2660       // isAssumedSideEffectFree returns true here again because it might not be
2661       // the case and only the users are dead but the instruction (=call) is
2662       // still needed.
2663       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2664         A.deleteAfterManifest(*I);
2665         return ChangeStatus::CHANGED;
2666       }
2667     }
2668     if (V.use_empty())
2669       return ChangeStatus::UNCHANGED;
2670 
2671     bool UsedAssumedInformation = false;
2672     Optional<Constant *> C =
2673         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2674     if (C.hasValue() && C.getValue())
2675       return ChangeStatus::UNCHANGED;
2676 
2677     // Replace the value with undef as it is dead but keep droppable uses around
2678     // as they provide information we don't want to give up on just yet.
2679     UndefValue &UV = *UndefValue::get(V.getType());
2680     bool AnyChange =
2681         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2682     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2683   }
2684 
2685   /// See AbstractAttribute::trackStatistics()
2686   void trackStatistics() const override {
2687     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2688   }
2689 };
2690 
2691 struct AAIsDeadArgument : public AAIsDeadFloating {
2692   AAIsDeadArgument(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2693 
2694   /// See AbstractAttribute::initialize(...).
2695   void initialize(Attributor &A) override {
2696     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2697       indicatePessimisticFixpoint();
2698   }
2699 
2700   /// See AbstractAttribute::manifest(...).
2701   ChangeStatus manifest(Attributor &A) override {
2702     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2703     Argument &Arg = *getAssociatedArgument();
2704     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2705       if (A.registerFunctionSignatureRewrite(
2706               Arg, /* ReplacementTypes */ {},
2707               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2708               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2709         Arg.dropDroppableUses();
2710         return ChangeStatus::CHANGED;
2711       }
2712     return Changed;
2713   }
2714 
2715   /// See AbstractAttribute::trackStatistics()
2716   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2717 };
2718 
2719 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2720   AAIsDeadCallSiteArgument(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2721 
2722   /// See AbstractAttribute::initialize(...).
2723   void initialize(Attributor &A) override {
2724     if (isa<UndefValue>(getAssociatedValue()))
2725       indicatePessimisticFixpoint();
2726   }
2727 
2728   /// See AbstractAttribute::updateImpl(...).
2729   ChangeStatus updateImpl(Attributor &A) override {
2730     // TODO: Once we have call site specific value information we can provide
2731     //       call site specific liveness information and then it makes
2732     //       sense to specialize attributes for call sites arguments instead of
2733     //       redirecting requests to the callee argument.
2734     Argument *Arg = getAssociatedArgument();
2735     if (!Arg)
2736       return indicatePessimisticFixpoint();
2737     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2738     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2739     return clampStateAndIndicateChange(
2740         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2741   }
2742 
2743   /// See AbstractAttribute::manifest(...).
2744   ChangeStatus manifest(Attributor &A) override {
2745     CallBase &CB = cast<CallBase>(getAnchorValue());
2746     Use &U = CB.getArgOperandUse(getArgNo());
2747     assert(!isa<UndefValue>(U.get()) &&
2748            "Expected undef values to be filtered out!");
2749     UndefValue &UV = *UndefValue::get(U->getType());
2750     if (A.changeUseAfterManifest(U, UV))
2751       return ChangeStatus::CHANGED;
2752     return ChangeStatus::UNCHANGED;
2753   }
2754 
2755   /// See AbstractAttribute::trackStatistics()
2756   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2757 };
2758 
2759 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2760   AAIsDeadCallSiteReturned(const IRPosition &IRP)
2761       : AAIsDeadFloating(IRP), IsAssumedSideEffectFree(true) {}
2762 
2763   /// See AAIsDead::isAssumedDead().
2764   bool isAssumedDead() const override {
2765     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2766   }
2767 
2768   /// See AbstractAttribute::initialize(...).
2769   void initialize(Attributor &A) override {
2770     if (isa<UndefValue>(getAssociatedValue())) {
2771       indicatePessimisticFixpoint();
2772       return;
2773     }
2774 
2775     // We track this separately as a secondary state.
2776     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2777   }
2778 
2779   /// See AbstractAttribute::updateImpl(...).
2780   ChangeStatus updateImpl(Attributor &A) override {
2781     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2782     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2783       IsAssumedSideEffectFree = false;
2784       Changed = ChangeStatus::CHANGED;
2785     }
2786 
2787     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2788       return indicatePessimisticFixpoint();
2789     return Changed;
2790   }
2791 
2792   /// See AbstractAttribute::trackStatistics()
2793   void trackStatistics() const override {
2794     if (IsAssumedSideEffectFree)
2795       STATS_DECLTRACK_CSRET_ATTR(IsDead)
2796     else
2797       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
2798   }
2799 
2800   /// See AbstractAttribute::getAsStr().
2801   const std::string getAsStr() const override {
2802     return isAssumedDead()
2803                ? "assumed-dead"
2804                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
2805   }
2806 
2807 private:
2808   bool IsAssumedSideEffectFree;
2809 };
2810 
2811 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2812   AAIsDeadReturned(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2813 
2814   /// See AbstractAttribute::updateImpl(...).
2815   ChangeStatus updateImpl(Attributor &A) override {
2816 
2817     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
2818                               {Instruction::Ret});
2819 
2820     auto PredForCallSite = [&](AbstractCallSite ACS) {
2821       if (ACS.isCallbackCall() || !ACS.getInstruction())
2822         return false;
2823       return areAllUsesAssumedDead(A, *ACS.getInstruction());
2824     };
2825 
2826     bool AllCallSitesKnown;
2827     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
2828                                 AllCallSitesKnown))
2829       return indicatePessimisticFixpoint();
2830 
2831     return ChangeStatus::UNCHANGED;
2832   }
2833 
2834   /// See AbstractAttribute::manifest(...).
2835   ChangeStatus manifest(Attributor &A) override {
2836     // TODO: Rewrite the signature to return void?
2837     bool AnyChange = false;
2838     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2839     auto RetInstPred = [&](Instruction &I) {
2840       ReturnInst &RI = cast<ReturnInst>(I);
2841       if (!isa<UndefValue>(RI.getReturnValue()))
2842         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2843       return true;
2844     };
2845     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2846     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2847   }
2848 
2849   /// See AbstractAttribute::trackStatistics()
2850   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2851 };
2852 
2853 struct AAIsDeadFunction : public AAIsDead {
2854   AAIsDeadFunction(const IRPosition &IRP) : AAIsDead(IRP) {}
2855 
2856   /// See AbstractAttribute::initialize(...).
2857   void initialize(Attributor &A) override {
2858     const Function *F = getAnchorScope();
2859     if (F && !F->isDeclaration()) {
2860       ToBeExploredFrom.insert(&F->getEntryBlock().front());
2861       assumeLive(A, F->getEntryBlock());
2862     }
2863   }
2864 
2865   /// See AbstractAttribute::getAsStr().
2866   const std::string getAsStr() const override {
2867     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2868            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
2869            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2870            std::to_string(KnownDeadEnds.size()) + "]";
2871   }
2872 
2873   /// See AbstractAttribute::manifest(...).
2874   ChangeStatus manifest(Attributor &A) override {
2875     assert(getState().isValidState() &&
2876            "Attempted to manifest an invalid state!");
2877 
2878     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2879     Function &F = *getAnchorScope();
2880 
2881     if (AssumedLiveBlocks.empty()) {
2882       A.deleteAfterManifest(F);
2883       return ChangeStatus::CHANGED;
2884     }
2885 
2886     // Flag to determine if we can change an invoke to a call assuming the
2887     // callee is nounwind. This is not possible if the personality of the
2888     // function allows to catch asynchronous exceptions.
2889     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2890 
2891     KnownDeadEnds.set_union(ToBeExploredFrom);
2892     for (const Instruction *DeadEndI : KnownDeadEnds) {
2893       auto *CB = dyn_cast<CallBase>(DeadEndI);
2894       if (!CB)
2895         continue;
2896       const auto &NoReturnAA =
2897           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
2898       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2899       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2900         continue;
2901 
2902       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
2903         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
2904       else
2905         A.changeToUnreachableAfterManifest(
2906             const_cast<Instruction *>(DeadEndI->getNextNode()));
2907       HasChanged = ChangeStatus::CHANGED;
2908     }
2909 
2910     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
2911     for (BasicBlock &BB : F)
2912       if (!AssumedLiveBlocks.count(&BB)) {
2913         A.deleteAfterManifest(BB);
2914         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
2915       }
2916 
2917     return HasChanged;
2918   }
2919 
2920   /// See AbstractAttribute::updateImpl(...).
2921   ChangeStatus updateImpl(Attributor &A) override;
2922 
2923   /// See AbstractAttribute::trackStatistics()
2924   void trackStatistics() const override {}
2925 
2926   /// Returns true if the function is assumed dead.
2927   bool isAssumedDead() const override { return false; }
2928 
2929   /// See AAIsDead::isKnownDead().
2930   bool isKnownDead() const override { return false; }
2931 
2932   /// See AAIsDead::isAssumedDead(BasicBlock *).
2933   bool isAssumedDead(const BasicBlock *BB) const override {
2934     assert(BB->getParent() == getAnchorScope() &&
2935            "BB must be in the same anchor scope function.");
2936 
2937     if (!getAssumed())
2938       return false;
2939     return !AssumedLiveBlocks.count(BB);
2940   }
2941 
2942   /// See AAIsDead::isKnownDead(BasicBlock *).
2943   bool isKnownDead(const BasicBlock *BB) const override {
2944     return getKnown() && isAssumedDead(BB);
2945   }
2946 
2947   /// See AAIsDead::isAssumed(Instruction *I).
2948   bool isAssumedDead(const Instruction *I) const override {
2949     assert(I->getParent()->getParent() == getAnchorScope() &&
2950            "Instruction must be in the same anchor scope function.");
2951 
2952     if (!getAssumed())
2953       return false;
2954 
2955     // If it is not in AssumedLiveBlocks then it for sure dead.
2956     // Otherwise, it can still be after noreturn call in a live block.
2957     if (!AssumedLiveBlocks.count(I->getParent()))
2958       return true;
2959 
2960     // If it is not after a liveness barrier it is live.
2961     const Instruction *PrevI = I->getPrevNode();
2962     while (PrevI) {
2963       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
2964         return true;
2965       PrevI = PrevI->getPrevNode();
2966     }
2967     return false;
2968   }
2969 
2970   /// See AAIsDead::isKnownDead(Instruction *I).
2971   bool isKnownDead(const Instruction *I) const override {
2972     return getKnown() && isAssumedDead(I);
2973   }
2974 
2975   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2976   /// that internal function called from \p BB should now be looked at.
2977   bool assumeLive(Attributor &A, const BasicBlock &BB) {
2978     if (!AssumedLiveBlocks.insert(&BB).second)
2979       return false;
2980 
2981     // We assume that all of BB is (probably) live now and if there are calls to
2982     // internal functions we will assume that those are now live as well. This
2983     // is a performance optimization for blocks with calls to a lot of internal
2984     // functions. It can however cause dead functions to be treated as live.
2985     for (const Instruction &I : BB)
2986       if (const auto *CB = dyn_cast<CallBase>(&I))
2987         if (const Function *F = CB->getCalledFunction())
2988           if (F->hasLocalLinkage())
2989             A.markLiveInternalFunction(*F);
2990     return true;
2991   }
2992 
2993   /// Collection of instructions that need to be explored again, e.g., we
2994   /// did assume they do not transfer control to (one of their) successors.
2995   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
2996 
2997   /// Collection of instructions that are known to not transfer control.
2998   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
2999 
3000   /// Collection of all assumed live BasicBlocks.
3001   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3002 };
3003 
3004 static bool
3005 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3006                         AbstractAttribute &AA,
3007                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3008   const IRPosition &IPos = IRPosition::callsite_function(CB);
3009 
3010   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
3011   if (NoReturnAA.isAssumedNoReturn())
3012     return !NoReturnAA.isKnownNoReturn();
3013   if (CB.isTerminator())
3014     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3015   else
3016     AliveSuccessors.push_back(CB.getNextNode());
3017   return false;
3018 }
3019 
3020 static bool
3021 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3022                         AbstractAttribute &AA,
3023                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3024   bool UsedAssumedInformation =
3025       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3026 
3027   // First, determine if we can change an invoke to a call assuming the
3028   // callee is nounwind. This is not possible if the personality of the
3029   // function allows to catch asynchronous exceptions.
3030   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3031     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3032   } else {
3033     const IRPosition &IPos = IRPosition::callsite_function(II);
3034     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
3035     if (AANoUnw.isAssumedNoUnwind()) {
3036       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3037     } else {
3038       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3039     }
3040   }
3041   return UsedAssumedInformation;
3042 }
3043 
3044 static bool
3045 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3046                         AbstractAttribute &AA,
3047                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3048   bool UsedAssumedInformation = false;
3049   if (BI.getNumSuccessors() == 1) {
3050     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3051   } else {
3052     Optional<ConstantInt *> CI = getAssumedConstantInt(
3053         A, *BI.getCondition(), AA, UsedAssumedInformation);
3054     if (!CI.hasValue()) {
3055       // No value yet, assume both edges are dead.
3056     } else if (CI.getValue()) {
3057       const BasicBlock *SuccBB =
3058           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3059       AliveSuccessors.push_back(&SuccBB->front());
3060     } else {
3061       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3062       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3063       UsedAssumedInformation = false;
3064     }
3065   }
3066   return UsedAssumedInformation;
3067 }
3068 
3069 static bool
3070 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3071                         AbstractAttribute &AA,
3072                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3073   bool UsedAssumedInformation = false;
3074   Optional<ConstantInt *> CI =
3075       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3076   if (!CI.hasValue()) {
3077     // No value yet, assume all edges are dead.
3078   } else if (CI.getValue()) {
3079     for (auto &CaseIt : SI.cases()) {
3080       if (CaseIt.getCaseValue() == CI.getValue()) {
3081         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3082         return UsedAssumedInformation;
3083       }
3084     }
3085     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3086     return UsedAssumedInformation;
3087   } else {
3088     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3089       AliveSuccessors.push_back(&SuccBB->front());
3090   }
3091   return UsedAssumedInformation;
3092 }
3093 
3094 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3095   ChangeStatus Change = ChangeStatus::UNCHANGED;
3096 
3097   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3098                     << getAnchorScope()->size() << "] BBs and "
3099                     << ToBeExploredFrom.size() << " exploration points and "
3100                     << KnownDeadEnds.size() << " known dead ends\n");
3101 
3102   // Copy and clear the list of instructions we need to explore from. It is
3103   // refilled with instructions the next update has to look at.
3104   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3105                                                ToBeExploredFrom.end());
3106   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3107 
3108   SmallVector<const Instruction *, 8> AliveSuccessors;
3109   while (!Worklist.empty()) {
3110     const Instruction *I = Worklist.pop_back_val();
3111     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3112 
3113     AliveSuccessors.clear();
3114 
3115     bool UsedAssumedInformation = false;
3116     switch (I->getOpcode()) {
3117     // TODO: look for (assumed) UB to backwards propagate "deadness".
3118     default:
3119       if (I->isTerminator()) {
3120         for (const BasicBlock *SuccBB : successors(I->getParent()))
3121           AliveSuccessors.push_back(&SuccBB->front());
3122       } else {
3123         AliveSuccessors.push_back(I->getNextNode());
3124       }
3125       break;
3126     case Instruction::Call:
3127       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3128                                                        *this, AliveSuccessors);
3129       break;
3130     case Instruction::Invoke:
3131       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3132                                                        *this, AliveSuccessors);
3133       break;
3134     case Instruction::Br:
3135       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3136                                                        *this, AliveSuccessors);
3137       break;
3138     case Instruction::Switch:
3139       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3140                                                        *this, AliveSuccessors);
3141       break;
3142     }
3143 
3144     if (UsedAssumedInformation) {
3145       NewToBeExploredFrom.insert(I);
3146     } else {
3147       Change = ChangeStatus::CHANGED;
3148       if (AliveSuccessors.empty() ||
3149           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3150         KnownDeadEnds.insert(I);
3151     }
3152 
3153     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3154                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3155                       << UsedAssumedInformation << "\n");
3156 
3157     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3158       if (!I->isTerminator()) {
3159         assert(AliveSuccessors.size() == 1 &&
3160                "Non-terminator expected to have a single successor!");
3161         Worklist.push_back(AliveSuccessor);
3162       } else {
3163         if (assumeLive(A, *AliveSuccessor->getParent()))
3164           Worklist.push_back(AliveSuccessor);
3165       }
3166     }
3167   }
3168 
3169   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3170 
3171   // If we know everything is live there is no need to query for liveness.
3172   // Instead, indicating a pessimistic fixpoint will cause the state to be
3173   // "invalid" and all queries to be answered conservatively without lookups.
3174   // To be in this state we have to (1) finished the exploration and (3) not
3175   // discovered any non-trivial dead end and (2) not ruled unreachable code
3176   // dead.
3177   if (ToBeExploredFrom.empty() &&
3178       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3179       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3180         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3181       }))
3182     return indicatePessimisticFixpoint();
3183   return Change;
3184 }
3185 
3186 /// Liveness information for a call sites.
3187 struct AAIsDeadCallSite final : AAIsDeadFunction {
3188   AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadFunction(IRP) {}
3189 
3190   /// See AbstractAttribute::initialize(...).
3191   void initialize(Attributor &A) override {
3192     // TODO: Once we have call site specific value information we can provide
3193     //       call site specific liveness information and then it makes
3194     //       sense to specialize attributes for call sites instead of
3195     //       redirecting requests to the callee.
3196     llvm_unreachable("Abstract attributes for liveness are not "
3197                      "supported for call sites yet!");
3198   }
3199 
3200   /// See AbstractAttribute::updateImpl(...).
3201   ChangeStatus updateImpl(Attributor &A) override {
3202     return indicatePessimisticFixpoint();
3203   }
3204 
3205   /// See AbstractAttribute::trackStatistics()
3206   void trackStatistics() const override {}
3207 };
3208 
3209 /// -------------------- Dereferenceable Argument Attribute --------------------
3210 
3211 template <>
3212 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3213                                                      const DerefState &R) {
3214   ChangeStatus CS0 =
3215       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3216   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3217   return CS0 | CS1;
3218 }
3219 
3220 struct AADereferenceableImpl : AADereferenceable {
3221   AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
3222   using StateType = DerefState;
3223 
3224   void initialize(Attributor &A) override {
3225     SmallVector<Attribute, 4> Attrs;
3226     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3227              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3228     for (const Attribute &Attr : Attrs)
3229       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3230 
3231     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition(),
3232                                        /* TrackDependence */ false);
3233 
3234     const IRPosition &IRP = this->getIRPosition();
3235     bool IsFnInterface = IRP.isFnInterfaceKind();
3236     Function *FnScope = IRP.getAnchorScope();
3237     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope)))
3238       indicatePessimisticFixpoint();
3239   }
3240 
3241   /// See AbstractAttribute::getState()
3242   /// {
3243   StateType &getState() override { return *this; }
3244   const StateType &getState() const override { return *this; }
3245   /// }
3246 
3247   /// Helper function for collecting accessed bytes in must-be-executed-context
3248   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3249                               DerefState &State) {
3250     const Value *UseV = U->get();
3251     if (!UseV->getType()->isPointerTy())
3252       return;
3253 
3254     Type *PtrTy = UseV->getType();
3255     const DataLayout &DL = A.getDataLayout();
3256     int64_t Offset;
3257     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3258             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3259       if (Base == &getAssociatedValue() &&
3260           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3261         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3262         State.addAccessedBytes(Offset, Size);
3263       }
3264     }
3265     return;
3266   }
3267 
3268   /// See AAFromMustBeExecutedContext
3269   bool followUse(Attributor &A, const Use *U, const Instruction *I,
3270                  AADereferenceable::StateType &State) {
3271     bool IsNonNull = false;
3272     bool TrackUse = false;
3273     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3274         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3275 
3276     addAccessedBytesForUse(A, U, I, State);
3277     State.takeKnownDerefBytesMaximum(DerefBytes);
3278     return TrackUse;
3279   }
3280 
3281   /// See AbstractAttribute::manifest(...).
3282   ChangeStatus manifest(Attributor &A) override {
3283     ChangeStatus Change = AADereferenceable::manifest(A);
3284     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3285       removeAttrs({Attribute::DereferenceableOrNull});
3286       return ChangeStatus::CHANGED;
3287     }
3288     return Change;
3289   }
3290 
3291   void getDeducedAttributes(LLVMContext &Ctx,
3292                             SmallVectorImpl<Attribute> &Attrs) const override {
3293     // TODO: Add *_globally support
3294     if (isAssumedNonNull())
3295       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3296           Ctx, getAssumedDereferenceableBytes()));
3297     else
3298       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3299           Ctx, getAssumedDereferenceableBytes()));
3300   }
3301 
3302   /// See AbstractAttribute::getAsStr().
3303   const std::string getAsStr() const override {
3304     if (!getAssumedDereferenceableBytes())
3305       return "unknown-dereferenceable";
3306     return std::string("dereferenceable") +
3307            (isAssumedNonNull() ? "" : "_or_null") +
3308            (isAssumedGlobal() ? "_globally" : "") + "<" +
3309            std::to_string(getKnownDereferenceableBytes()) + "-" +
3310            std::to_string(getAssumedDereferenceableBytes()) + ">";
3311   }
3312 };
3313 
3314 /// Dereferenceable attribute for a floating value.
3315 struct AADereferenceableFloating
3316     : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
3317   using Base =
3318       AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
3319   AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
3320 
3321   /// See AbstractAttribute::updateImpl(...).
3322   ChangeStatus updateImpl(Attributor &A) override {
3323     ChangeStatus Change = Base::updateImpl(A);
3324 
3325     const DataLayout &DL = A.getDataLayout();
3326 
3327     auto VisitValueCB = [&](Value &V, const Instruction *, DerefState &T,
3328                             bool Stripped) -> bool {
3329       unsigned IdxWidth =
3330           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3331       APInt Offset(IdxWidth, 0);
3332       const Value *Base =
3333           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3334 
3335       const auto &AA =
3336           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3337       int64_t DerefBytes = 0;
3338       if (!Stripped && this == &AA) {
3339         // Use IR information if we did not strip anything.
3340         // TODO: track globally.
3341         bool CanBeNull;
3342         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3343         T.GlobalState.indicatePessimisticFixpoint();
3344       } else {
3345         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3346         DerefBytes = DS.DerefBytesState.getAssumed();
3347         T.GlobalState &= DS.GlobalState;
3348       }
3349 
3350       // TODO: Use `AAConstantRange` to infer dereferenceable bytes.
3351 
3352       // For now we do not try to "increase" dereferenceability due to negative
3353       // indices as we first have to come up with code to deal with loops and
3354       // for overflows of the dereferenceable bytes.
3355       int64_t OffsetSExt = Offset.getSExtValue();
3356       if (OffsetSExt < 0)
3357         OffsetSExt = 0;
3358 
3359       T.takeAssumedDerefBytesMinimum(
3360           std::max(int64_t(0), DerefBytes - OffsetSExt));
3361 
3362       if (this == &AA) {
3363         if (!Stripped) {
3364           // If nothing was stripped IR information is all we got.
3365           T.takeKnownDerefBytesMaximum(
3366               std::max(int64_t(0), DerefBytes - OffsetSExt));
3367           T.indicatePessimisticFixpoint();
3368         } else if (OffsetSExt > 0) {
3369           // If something was stripped but there is circular reasoning we look
3370           // for the offset. If it is positive we basically decrease the
3371           // dereferenceable bytes in a circluar loop now, which will simply
3372           // drive them down to the known value in a very slow way which we
3373           // can accelerate.
3374           T.indicatePessimisticFixpoint();
3375         }
3376       }
3377 
3378       return T.isValidState();
3379     };
3380 
3381     DerefState T;
3382     if (!genericValueTraversal<AADereferenceable, DerefState>(
3383             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3384       return indicatePessimisticFixpoint();
3385 
3386     return Change | clampStateAndIndicateChange(getState(), T);
3387   }
3388 
3389   /// See AbstractAttribute::trackStatistics()
3390   void trackStatistics() const override {
3391     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3392   }
3393 };
3394 
3395 /// Dereferenceable attribute for a return value.
3396 struct AADereferenceableReturned final
3397     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3398   AADereferenceableReturned(const IRPosition &IRP)
3399       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3400             IRP) {}
3401 
3402   /// See AbstractAttribute::trackStatistics()
3403   void trackStatistics() const override {
3404     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3405   }
3406 };
3407 
3408 /// Dereferenceable attribute for an argument
3409 struct AADereferenceableArgument final
3410     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3411           AADereferenceable, AADereferenceableImpl> {
3412   using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3413       AADereferenceable, AADereferenceableImpl>;
3414   AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
3415 
3416   /// See AbstractAttribute::trackStatistics()
3417   void trackStatistics() const override {
3418     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3419   }
3420 };
3421 
3422 /// Dereferenceable attribute for a call site argument.
3423 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3424   AADereferenceableCallSiteArgument(const IRPosition &IRP)
3425       : AADereferenceableFloating(IRP) {}
3426 
3427   /// See AbstractAttribute::trackStatistics()
3428   void trackStatistics() const override {
3429     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3430   }
3431 };
3432 
3433 /// Dereferenceable attribute deduction for a call site return value.
3434 struct AADereferenceableCallSiteReturned final
3435     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3436           AADereferenceable, AADereferenceableImpl> {
3437   using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3438       AADereferenceable, AADereferenceableImpl>;
3439   AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3440 
3441   /// See AbstractAttribute::trackStatistics()
3442   void trackStatistics() const override {
3443     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3444   }
3445 };
3446 
3447 // ------------------------ Align Argument Attribute ------------------------
3448 
3449 /// \p Ptr is accessed so we can get alignment information if the ABI requires
3450 /// the element type to be aligned.
3451 static MaybeAlign getKnownAlignmentFromAccessedPtr(const Value *Ptr,
3452                                                    const DataLayout &DL) {
3453   MaybeAlign KnownAlignment = Ptr->getPointerAlignment(DL);
3454   Type *ElementTy = Ptr->getType()->getPointerElementType();
3455   if (ElementTy->isSized())
3456     KnownAlignment = max(KnownAlignment, DL.getABITypeAlign(ElementTy));
3457   return KnownAlignment;
3458 }
3459 
3460 static unsigned getKnownAlignForUse(Attributor &A,
3461                                     AbstractAttribute &QueryingAA,
3462                                     Value &AssociatedValue, const Use *U,
3463                                     const Instruction *I, bool &TrackUse) {
3464   // We need to follow common pointer manipulation uses to the accesses they
3465   // feed into.
3466   if (isa<CastInst>(I)) {
3467     // Follow all but ptr2int casts.
3468     TrackUse = !isa<PtrToIntInst>(I);
3469     return 0;
3470   }
3471   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3472     if (GEP->hasAllConstantIndices()) {
3473       TrackUse = true;
3474       return 0;
3475     }
3476   }
3477 
3478   MaybeAlign MA;
3479   if (const auto *CB = dyn_cast<CallBase>(I)) {
3480     if (CB->isBundleOperand(U) || CB->isCallee(U))
3481       return 0;
3482 
3483     unsigned ArgNo = CB->getArgOperandNo(U);
3484     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3485     // As long as we only use known information there is no need to track
3486     // dependences here.
3487     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3488                                         /* TrackDependence */ false);
3489     MA = MaybeAlign(AlignAA.getKnownAlign());
3490   }
3491 
3492   const DataLayout &DL = A.getDataLayout();
3493   const Value *UseV = U->get();
3494   if (auto *SI = dyn_cast<StoreInst>(I)) {
3495     if (SI->getPointerOperand() == UseV) {
3496       if (unsigned SIAlign = SI->getAlignment())
3497         MA = MaybeAlign(SIAlign);
3498       else
3499         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3500     }
3501   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3502     if (LI->getPointerOperand() == UseV) {
3503       if (unsigned LIAlign = LI->getAlignment())
3504         MA = MaybeAlign(LIAlign);
3505       else
3506         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3507     }
3508   }
3509 
3510   if (!MA.hasValue() || MA <= 1)
3511     return 0;
3512 
3513   unsigned Alignment = MA->value();
3514   int64_t Offset;
3515 
3516   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3517     if (Base == &AssociatedValue) {
3518       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3519       // So we can say that the maximum power of two which is a divisor of
3520       // gcd(Offset, Alignment) is an alignment.
3521 
3522       uint32_t gcd =
3523           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3524       Alignment = llvm::PowerOf2Floor(gcd);
3525     }
3526   }
3527 
3528   return Alignment;
3529 }
3530 
3531 struct AAAlignImpl : AAAlign {
3532   AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
3533 
3534   /// See AbstractAttribute::initialize(...).
3535   void initialize(Attributor &A) override {
3536     SmallVector<Attribute, 4> Attrs;
3537     getAttrs({Attribute::Alignment}, Attrs);
3538     for (const Attribute &Attr : Attrs)
3539       takeKnownMaximum(Attr.getValueAsInt());
3540 
3541     if (getIRPosition().isFnInterfaceKind() &&
3542         (!getAnchorScope() ||
3543          !A.isFunctionIPOAmendable(*getAssociatedFunction())))
3544       indicatePessimisticFixpoint();
3545   }
3546 
3547   /// See AbstractAttribute::manifest(...).
3548   ChangeStatus manifest(Attributor &A) override {
3549     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3550 
3551     // Check for users that allow alignment annotations.
3552     Value &AssociatedValue = getAssociatedValue();
3553     for (const Use &U : AssociatedValue.uses()) {
3554       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3555         if (SI->getPointerOperand() == &AssociatedValue)
3556           if (SI->getAlignment() < getAssumedAlign()) {
3557             STATS_DECLTRACK(AAAlign, Store,
3558                             "Number of times alignment added to a store");
3559             SI->setAlignment(Align(getAssumedAlign()));
3560             LoadStoreChanged = ChangeStatus::CHANGED;
3561           }
3562       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3563         if (LI->getPointerOperand() == &AssociatedValue)
3564           if (LI->getAlignment() < getAssumedAlign()) {
3565             LI->setAlignment(Align(getAssumedAlign()));
3566             STATS_DECLTRACK(AAAlign, Load,
3567                             "Number of times alignment added to a load");
3568             LoadStoreChanged = ChangeStatus::CHANGED;
3569           }
3570       }
3571     }
3572 
3573     ChangeStatus Changed = AAAlign::manifest(A);
3574 
3575     MaybeAlign InheritAlign =
3576         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3577     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3578       return LoadStoreChanged;
3579     return Changed | LoadStoreChanged;
3580   }
3581 
3582   // TODO: Provide a helper to determine the implied ABI alignment and check in
3583   //       the existing manifest method and a new one for AAAlignImpl that value
3584   //       to avoid making the alignment explicit if it did not improve.
3585 
3586   /// See AbstractAttribute::getDeducedAttributes
3587   virtual void
3588   getDeducedAttributes(LLVMContext &Ctx,
3589                        SmallVectorImpl<Attribute> &Attrs) const override {
3590     if (getAssumedAlign() > 1)
3591       Attrs.emplace_back(
3592           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3593   }
3594   /// See AAFromMustBeExecutedContext
3595   bool followUse(Attributor &A, const Use *U, const Instruction *I,
3596                  AAAlign::StateType &State) {
3597     bool TrackUse = false;
3598 
3599     unsigned int KnownAlign =
3600         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3601     State.takeKnownMaximum(KnownAlign);
3602 
3603     return TrackUse;
3604   }
3605 
3606   /// See AbstractAttribute::getAsStr().
3607   const std::string getAsStr() const override {
3608     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3609                                 "-" + std::to_string(getAssumedAlign()) + ">")
3610                              : "unknown-align";
3611   }
3612 };
3613 
3614 /// Align attribute for a floating value.
3615 struct AAAlignFloating : AAFromMustBeExecutedContext<AAAlign, AAAlignImpl> {
3616   using Base = AAFromMustBeExecutedContext<AAAlign, AAAlignImpl>;
3617   AAAlignFloating(const IRPosition &IRP) : Base(IRP) {}
3618 
3619   /// See AbstractAttribute::updateImpl(...).
3620   ChangeStatus updateImpl(Attributor &A) override {
3621     Base::updateImpl(A);
3622 
3623     const DataLayout &DL = A.getDataLayout();
3624 
3625     auto VisitValueCB = [&](Value &V, const Instruction *,
3626                             AAAlign::StateType &T, bool Stripped) -> bool {
3627       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3628       if (!Stripped && this == &AA) {
3629         // Use only IR information if we did not strip anything.
3630         const MaybeAlign PA = V.getPointerAlignment(DL);
3631         T.takeKnownMaximum(PA ? PA->value() : 0);
3632         T.indicatePessimisticFixpoint();
3633       } else {
3634         // Use abstract attribute information.
3635         const AAAlign::StateType &DS =
3636             static_cast<const AAAlign::StateType &>(AA.getState());
3637         T ^= DS;
3638       }
3639       return T.isValidState();
3640     };
3641 
3642     StateType T;
3643     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3644                                                    VisitValueCB, getCtxI()))
3645       return indicatePessimisticFixpoint();
3646 
3647     // TODO: If we know we visited all incoming values, thus no are assumed
3648     // dead, we can take the known information from the state T.
3649     return clampStateAndIndicateChange(getState(), T);
3650   }
3651 
3652   /// See AbstractAttribute::trackStatistics()
3653   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3654 };
3655 
3656 /// Align attribute for function return value.
3657 struct AAAlignReturned final
3658     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3659   AAAlignReturned(const IRPosition &IRP)
3660       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
3661 
3662   /// See AbstractAttribute::trackStatistics()
3663   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3664 };
3665 
3666 /// Align attribute for function argument.
3667 struct AAAlignArgument final
3668     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3669                                                               AAAlignImpl> {
3670   using Base =
3671       AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3672                                                               AAAlignImpl>;
3673   AAAlignArgument(const IRPosition &IRP) : Base(IRP) {}
3674 
3675   /// See AbstractAttribute::manifest(...).
3676   ChangeStatus manifest(Attributor &A) override {
3677     // If the associated argument is involved in a must-tail call we give up
3678     // because we would need to keep the argument alignments of caller and
3679     // callee in-sync. Just does not seem worth the trouble right now.
3680     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3681       return ChangeStatus::UNCHANGED;
3682     return Base::manifest(A);
3683   }
3684 
3685   /// See AbstractAttribute::trackStatistics()
3686   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3687 };
3688 
3689 struct AAAlignCallSiteArgument final : AAAlignFloating {
3690   AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
3691 
3692   /// See AbstractAttribute::manifest(...).
3693   ChangeStatus manifest(Attributor &A) override {
3694     // If the associated argument is involved in a must-tail call we give up
3695     // because we would need to keep the argument alignments of caller and
3696     // callee in-sync. Just does not seem worth the trouble right now.
3697     if (Argument *Arg = getAssociatedArgument())
3698       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3699         return ChangeStatus::UNCHANGED;
3700     ChangeStatus Changed = AAAlignImpl::manifest(A);
3701     MaybeAlign InheritAlign =
3702         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3703     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3704       Changed = ChangeStatus::UNCHANGED;
3705     return Changed;
3706   }
3707 
3708   /// See AbstractAttribute::updateImpl(Attributor &A).
3709   ChangeStatus updateImpl(Attributor &A) override {
3710     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3711     if (Argument *Arg = getAssociatedArgument()) {
3712       // We only take known information from the argument
3713       // so we do not need to track a dependence.
3714       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3715           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3716       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3717     }
3718     return Changed;
3719   }
3720 
3721   /// See AbstractAttribute::trackStatistics()
3722   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3723 };
3724 
3725 /// Align attribute deduction for a call site return value.
3726 struct AAAlignCallSiteReturned final
3727     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3728                                                              AAAlignImpl> {
3729   using Base =
3730       AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3731                                                              AAAlignImpl>;
3732   AAAlignCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3733 
3734   /// See AbstractAttribute::initialize(...).
3735   void initialize(Attributor &A) override {
3736     Base::initialize(A);
3737     Function *F = getAssociatedFunction();
3738     if (!F)
3739       indicatePessimisticFixpoint();
3740   }
3741 
3742   /// See AbstractAttribute::trackStatistics()
3743   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3744 };
3745 
3746 /// ------------------ Function No-Return Attribute ----------------------------
3747 struct AANoReturnImpl : public AANoReturn {
3748   AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
3749 
3750   /// See AbstractAttribute::initialize(...).
3751   void initialize(Attributor &A) override {
3752     AANoReturn::initialize(A);
3753     Function *F = getAssociatedFunction();
3754     if (!F)
3755       indicatePessimisticFixpoint();
3756   }
3757 
3758   /// See AbstractAttribute::getAsStr().
3759   const std::string getAsStr() const override {
3760     return getAssumed() ? "noreturn" : "may-return";
3761   }
3762 
3763   /// See AbstractAttribute::updateImpl(Attributor &A).
3764   virtual ChangeStatus updateImpl(Attributor &A) override {
3765     auto CheckForNoReturn = [](Instruction &) { return false; };
3766     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3767                                    {(unsigned)Instruction::Ret}))
3768       return indicatePessimisticFixpoint();
3769     return ChangeStatus::UNCHANGED;
3770   }
3771 };
3772 
3773 struct AANoReturnFunction final : AANoReturnImpl {
3774   AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3775 
3776   /// See AbstractAttribute::trackStatistics()
3777   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3778 };
3779 
3780 /// NoReturn attribute deduction for a call sites.
3781 struct AANoReturnCallSite final : AANoReturnImpl {
3782   AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3783 
3784   /// See AbstractAttribute::updateImpl(...).
3785   ChangeStatus updateImpl(Attributor &A) override {
3786     // TODO: Once we have call site specific value information we can provide
3787     //       call site specific liveness information and then it makes
3788     //       sense to specialize attributes for call sites arguments instead of
3789     //       redirecting requests to the callee argument.
3790     Function *F = getAssociatedFunction();
3791     const IRPosition &FnPos = IRPosition::function(*F);
3792     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3793     return clampStateAndIndicateChange(
3794         getState(),
3795         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3796   }
3797 
3798   /// See AbstractAttribute::trackStatistics()
3799   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3800 };
3801 
3802 /// ----------------------- Variable Capturing ---------------------------------
3803 
3804 /// A class to hold the state of for no-capture attributes.
3805 struct AANoCaptureImpl : public AANoCapture {
3806   AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
3807 
3808   /// See AbstractAttribute::initialize(...).
3809   void initialize(Attributor &A) override {
3810     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3811       indicateOptimisticFixpoint();
3812       return;
3813     }
3814     Function *AnchorScope = getAnchorScope();
3815     if (isFnInterfaceKind() &&
3816         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
3817       indicatePessimisticFixpoint();
3818       return;
3819     }
3820 
3821     // You cannot "capture" null in the default address space.
3822     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3823         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3824       indicateOptimisticFixpoint();
3825       return;
3826     }
3827 
3828     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3829 
3830     // Check what state the associated function can actually capture.
3831     if (F)
3832       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3833     else
3834       indicatePessimisticFixpoint();
3835   }
3836 
3837   /// See AbstractAttribute::updateImpl(...).
3838   ChangeStatus updateImpl(Attributor &A) override;
3839 
3840   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3841   virtual void
3842   getDeducedAttributes(LLVMContext &Ctx,
3843                        SmallVectorImpl<Attribute> &Attrs) const override {
3844     if (!isAssumedNoCaptureMaybeReturned())
3845       return;
3846 
3847     if (getArgNo() >= 0) {
3848       if (isAssumedNoCapture())
3849         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3850       else if (ManifestInternal)
3851         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3852     }
3853   }
3854 
3855   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3856   /// depending on the ability of the function associated with \p IRP to capture
3857   /// state in memory and through "returning/throwing", respectively.
3858   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3859                                                    const Function &F,
3860                                                    BitIntegerState &State) {
3861     // TODO: Once we have memory behavior attributes we should use them here.
3862 
3863     // If we know we cannot communicate or write to memory, we do not care about
3864     // ptr2int anymore.
3865     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3866         F.getReturnType()->isVoidTy()) {
3867       State.addKnownBits(NO_CAPTURE);
3868       return;
3869     }
3870 
3871     // A function cannot capture state in memory if it only reads memory, it can
3872     // however return/throw state and the state might be influenced by the
3873     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3874     if (F.onlyReadsMemory())
3875       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3876 
3877     // A function cannot communicate state back if it does not through
3878     // exceptions and doesn not return values.
3879     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3880       State.addKnownBits(NOT_CAPTURED_IN_RET);
3881 
3882     // Check existing "returned" attributes.
3883     int ArgNo = IRP.getArgNo();
3884     if (F.doesNotThrow() && ArgNo >= 0) {
3885       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3886         if (F.hasParamAttribute(u, Attribute::Returned)) {
3887           if (u == unsigned(ArgNo))
3888             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3889           else if (F.onlyReadsMemory())
3890             State.addKnownBits(NO_CAPTURE);
3891           else
3892             State.addKnownBits(NOT_CAPTURED_IN_RET);
3893           break;
3894         }
3895     }
3896   }
3897 
3898   /// See AbstractState::getAsStr().
3899   const std::string getAsStr() const override {
3900     if (isKnownNoCapture())
3901       return "known not-captured";
3902     if (isAssumedNoCapture())
3903       return "assumed not-captured";
3904     if (isKnownNoCaptureMaybeReturned())
3905       return "known not-captured-maybe-returned";
3906     if (isAssumedNoCaptureMaybeReturned())
3907       return "assumed not-captured-maybe-returned";
3908     return "assumed-captured";
3909   }
3910 };
3911 
3912 /// Attributor-aware capture tracker.
3913 struct AACaptureUseTracker final : public CaptureTracker {
3914 
3915   /// Create a capture tracker that can lookup in-flight abstract attributes
3916   /// through the Attributor \p A.
3917   ///
3918   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3919   /// search is stopped. If a use leads to a return instruction,
3920   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3921   /// If a use leads to a ptr2int which may capture the value,
3922   /// \p CapturedInInteger is set. If a use is found that is currently assumed
3923   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3924   /// set. All values in \p PotentialCopies are later tracked as well. For every
3925   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3926   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3927   /// conservatively set to true.
3928   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3929                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3930                       SmallVectorImpl<const Value *> &PotentialCopies,
3931                       unsigned &RemainingUsesToExplore)
3932       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3933         PotentialCopies(PotentialCopies),
3934         RemainingUsesToExplore(RemainingUsesToExplore) {}
3935 
3936   /// Determine if \p V maybe captured. *Also updates the state!*
3937   bool valueMayBeCaptured(const Value *V) {
3938     if (V->getType()->isPointerTy()) {
3939       PointerMayBeCaptured(V, this);
3940     } else {
3941       State.indicatePessimisticFixpoint();
3942     }
3943     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3944   }
3945 
3946   /// See CaptureTracker::tooManyUses().
3947   void tooManyUses() override {
3948     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
3949   }
3950 
3951   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3952     if (CaptureTracker::isDereferenceableOrNull(O, DL))
3953       return true;
3954     const auto &DerefAA = A.getAAFor<AADereferenceable>(
3955         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
3956         DepClassTy::OPTIONAL);
3957     return DerefAA.getAssumedDereferenceableBytes();
3958   }
3959 
3960   /// See CaptureTracker::captured(...).
3961   bool captured(const Use *U) override {
3962     Instruction *UInst = cast<Instruction>(U->getUser());
3963     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3964                       << "\n");
3965 
3966     // Because we may reuse the tracker multiple times we keep track of the
3967     // number of explored uses ourselves as well.
3968     if (RemainingUsesToExplore-- == 0) {
3969       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
3970       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3971                           /* Return */ true);
3972     }
3973 
3974     // Deal with ptr2int by following uses.
3975     if (isa<PtrToIntInst>(UInst)) {
3976       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
3977       return valueMayBeCaptured(UInst);
3978     }
3979 
3980     // Explicitly catch return instructions.
3981     if (isa<ReturnInst>(UInst))
3982       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3983                           /* Return */ true);
3984 
3985     // For now we only use special logic for call sites. However, the tracker
3986     // itself knows about a lot of other non-capturing cases already.
3987     auto *CB = dyn_cast<CallBase>(UInst);
3988     if (!CB || !CB->isArgOperand(U))
3989       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3990                           /* Return */ true);
3991 
3992     unsigned ArgNo = CB->getArgOperandNo(U);
3993     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
3994     // If we have a abstract no-capture attribute for the argument we can use
3995     // it to justify a non-capture attribute here. This allows recursion!
3996     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
3997     if (ArgNoCaptureAA.isAssumedNoCapture())
3998       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3999                           /* Return */ false);
4000     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4001       addPotentialCopy(*CB);
4002       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4003                           /* Return */ false);
4004     }
4005 
4006     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4007     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4008                         /* Return */ true);
4009   }
4010 
4011   /// Register \p CS as potential copy of the value we are checking.
4012   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4013 
4014   /// See CaptureTracker::shouldExplore(...).
4015   bool shouldExplore(const Use *U) override {
4016     // Check liveness and ignore droppable users.
4017     return !U->getUser()->isDroppable() &&
4018            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4019   }
4020 
4021   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4022   /// \p CapturedInRet, then return the appropriate value for use in the
4023   /// CaptureTracker::captured() interface.
4024   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4025                     bool CapturedInRet) {
4026     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4027                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4028     if (CapturedInMem)
4029       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4030     if (CapturedInInt)
4031       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4032     if (CapturedInRet)
4033       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4034     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4035   }
4036 
4037 private:
4038   /// The attributor providing in-flight abstract attributes.
4039   Attributor &A;
4040 
4041   /// The abstract attribute currently updated.
4042   AANoCapture &NoCaptureAA;
4043 
4044   /// The abstract liveness state.
4045   const AAIsDead &IsDeadAA;
4046 
4047   /// The state currently updated.
4048   AANoCapture::StateType &State;
4049 
4050   /// Set of potential copies of the tracked value.
4051   SmallVectorImpl<const Value *> &PotentialCopies;
4052 
4053   /// Global counter to limit the number of explored uses.
4054   unsigned &RemainingUsesToExplore;
4055 };
4056 
4057 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4058   const IRPosition &IRP = getIRPosition();
4059   const Value *V =
4060       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4061   if (!V)
4062     return indicatePessimisticFixpoint();
4063 
4064   const Function *F =
4065       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4066   assert(F && "Expected a function!");
4067   const IRPosition &FnPos = IRPosition::function(*F);
4068   const auto &IsDeadAA =
4069       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4070 
4071   AANoCapture::StateType T;
4072 
4073   // Readonly means we cannot capture through memory.
4074   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
4075       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4076   if (FnMemAA.isAssumedReadOnly()) {
4077     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4078     if (FnMemAA.isKnownReadOnly())
4079       addKnownBits(NOT_CAPTURED_IN_MEM);
4080   }
4081 
4082   // Make sure all returned values are different than the underlying value.
4083   // TODO: we could do this in a more sophisticated way inside
4084   //       AAReturnedValues, e.g., track all values that escape through returns
4085   //       directly somehow.
4086   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4087     bool SeenConstant = false;
4088     for (auto &It : RVAA.returned_values()) {
4089       if (isa<Constant>(It.first)) {
4090         if (SeenConstant)
4091           return false;
4092         SeenConstant = true;
4093       } else if (!isa<Argument>(It.first) ||
4094                  It.first == getAssociatedArgument())
4095         return false;
4096     }
4097     return true;
4098   };
4099 
4100   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4101       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4102   if (NoUnwindAA.isAssumedNoUnwind()) {
4103     bool IsVoidTy = F->getReturnType()->isVoidTy();
4104     const AAReturnedValues *RVAA =
4105         IsVoidTy ? nullptr
4106                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4107                                                  /* TrackDependence */ true,
4108                                                  DepClassTy::OPTIONAL);
4109     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4110       T.addKnownBits(NOT_CAPTURED_IN_RET);
4111       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4112         return ChangeStatus::UNCHANGED;
4113       if (NoUnwindAA.isKnownNoUnwind() &&
4114           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4115         addKnownBits(NOT_CAPTURED_IN_RET);
4116         if (isKnown(NOT_CAPTURED_IN_MEM))
4117           return indicateOptimisticFixpoint();
4118       }
4119     }
4120   }
4121 
4122   // Use the CaptureTracker interface and logic with the specialized tracker,
4123   // defined in AACaptureUseTracker, that can look at in-flight abstract
4124   // attributes and directly updates the assumed state.
4125   SmallVector<const Value *, 4> PotentialCopies;
4126   unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
4127   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4128                               RemainingUsesToExplore);
4129 
4130   // Check all potential copies of the associated value until we can assume
4131   // none will be captured or we have to assume at least one might be.
4132   unsigned Idx = 0;
4133   PotentialCopies.push_back(V);
4134   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4135     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4136 
4137   AANoCapture::StateType &S = getState();
4138   auto Assumed = S.getAssumed();
4139   S.intersectAssumedBits(T.getAssumed());
4140   if (!isAssumedNoCaptureMaybeReturned())
4141     return indicatePessimisticFixpoint();
4142   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4143                                    : ChangeStatus::CHANGED;
4144 }
4145 
4146 /// NoCapture attribute for function arguments.
4147 struct AANoCaptureArgument final : AANoCaptureImpl {
4148   AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4149 
4150   /// See AbstractAttribute::trackStatistics()
4151   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4152 };
4153 
4154 /// NoCapture attribute for call site arguments.
4155 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4156   AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4157 
4158   /// See AbstractAttribute::initialize(...).
4159   void initialize(Attributor &A) override {
4160     if (Argument *Arg = getAssociatedArgument())
4161       if (Arg->hasByValAttr())
4162         indicateOptimisticFixpoint();
4163     AANoCaptureImpl::initialize(A);
4164   }
4165 
4166   /// See AbstractAttribute::updateImpl(...).
4167   ChangeStatus updateImpl(Attributor &A) override {
4168     // TODO: Once we have call site specific value information we can provide
4169     //       call site specific liveness information and then it makes
4170     //       sense to specialize attributes for call sites arguments instead of
4171     //       redirecting requests to the callee argument.
4172     Argument *Arg = getAssociatedArgument();
4173     if (!Arg)
4174       return indicatePessimisticFixpoint();
4175     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4176     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4177     return clampStateAndIndicateChange(
4178         getState(),
4179         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4180   }
4181 
4182   /// See AbstractAttribute::trackStatistics()
4183   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4184 };
4185 
4186 /// NoCapture attribute for floating values.
4187 struct AANoCaptureFloating final : AANoCaptureImpl {
4188   AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4189 
4190   /// See AbstractAttribute::trackStatistics()
4191   void trackStatistics() const override {
4192     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4193   }
4194 };
4195 
4196 /// NoCapture attribute for function return value.
4197 struct AANoCaptureReturned final : AANoCaptureImpl {
4198   AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
4199     llvm_unreachable("NoCapture is not applicable to function returns!");
4200   }
4201 
4202   /// See AbstractAttribute::initialize(...).
4203   void initialize(Attributor &A) override {
4204     llvm_unreachable("NoCapture is not applicable to function returns!");
4205   }
4206 
4207   /// See AbstractAttribute::updateImpl(...).
4208   ChangeStatus updateImpl(Attributor &A) override {
4209     llvm_unreachable("NoCapture is not applicable to function returns!");
4210   }
4211 
4212   /// See AbstractAttribute::trackStatistics()
4213   void trackStatistics() const override {}
4214 };
4215 
4216 /// NoCapture attribute deduction for a call site return value.
4217 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4218   AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4219 
4220   /// See AbstractAttribute::trackStatistics()
4221   void trackStatistics() const override {
4222     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4223   }
4224 };
4225 
4226 /// ------------------ Value Simplify Attribute ----------------------------
4227 struct AAValueSimplifyImpl : AAValueSimplify {
4228   AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
4229 
4230   /// See AbstractAttribute::initialize(...).
4231   void initialize(Attributor &A) override {
4232     if (getAssociatedValue().getType()->isVoidTy())
4233       indicatePessimisticFixpoint();
4234   }
4235 
4236   /// See AbstractAttribute::getAsStr().
4237   const std::string getAsStr() const override {
4238     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4239                         : "not-simple";
4240   }
4241 
4242   /// See AbstractAttribute::trackStatistics()
4243   void trackStatistics() const override {}
4244 
4245   /// See AAValueSimplify::getAssumedSimplifiedValue()
4246   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4247     if (!getAssumed())
4248       return const_cast<Value *>(&getAssociatedValue());
4249     return SimplifiedAssociatedValue;
4250   }
4251 
4252   /// Helper function for querying AAValueSimplify and updating candicate.
4253   /// \param QueryingValue Value trying to unify with SimplifiedValue
4254   /// \param AccumulatedSimplifiedValue Current simplification result.
4255   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4256                              Value &QueryingValue,
4257                              Optional<Value *> &AccumulatedSimplifiedValue) {
4258     // FIXME: Add a typecast support.
4259 
4260     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4261         QueryingAA, IRPosition::value(QueryingValue));
4262 
4263     Optional<Value *> QueryingValueSimplified =
4264         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4265 
4266     if (!QueryingValueSimplified.hasValue())
4267       return true;
4268 
4269     if (!QueryingValueSimplified.getValue())
4270       return false;
4271 
4272     Value &QueryingValueSimplifiedUnwrapped =
4273         *QueryingValueSimplified.getValue();
4274 
4275     if (AccumulatedSimplifiedValue.hasValue() &&
4276         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4277         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4278       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4279     if (AccumulatedSimplifiedValue.hasValue() &&
4280         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4281       return true;
4282 
4283     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4284                       << " is assumed to be "
4285                       << QueryingValueSimplifiedUnwrapped << "\n");
4286 
4287     AccumulatedSimplifiedValue = QueryingValueSimplified;
4288     return true;
4289   }
4290 
4291   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4292     if (!getAssociatedValue().getType()->isIntegerTy())
4293       return false;
4294 
4295     const auto &ValueConstantRangeAA =
4296         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4297 
4298     Optional<ConstantInt *> COpt =
4299         ValueConstantRangeAA.getAssumedConstantInt(A);
4300     if (COpt.hasValue()) {
4301       if (auto *C = COpt.getValue())
4302         SimplifiedAssociatedValue = C;
4303       else
4304         return false;
4305     } else {
4306       SimplifiedAssociatedValue = llvm::None;
4307     }
4308     return true;
4309   }
4310 
4311   /// See AbstractAttribute::manifest(...).
4312   ChangeStatus manifest(Attributor &A) override {
4313     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4314 
4315     if (SimplifiedAssociatedValue.hasValue() &&
4316         !SimplifiedAssociatedValue.getValue())
4317       return Changed;
4318 
4319     Value &V = getAssociatedValue();
4320     auto *C = SimplifiedAssociatedValue.hasValue()
4321                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4322                   : UndefValue::get(V.getType());
4323     if (C) {
4324       // We can replace the AssociatedValue with the constant.
4325       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4326         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4327                           << " :: " << *this << "\n");
4328         if (A.changeValueAfterManifest(V, *C))
4329           Changed = ChangeStatus::CHANGED;
4330       }
4331     }
4332 
4333     return Changed | AAValueSimplify::manifest(A);
4334   }
4335 
4336   /// See AbstractState::indicatePessimisticFixpoint(...).
4337   ChangeStatus indicatePessimisticFixpoint() override {
4338     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4339     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4340     SimplifiedAssociatedValue = &getAssociatedValue();
4341     indicateOptimisticFixpoint();
4342     return ChangeStatus::CHANGED;
4343   }
4344 
4345 protected:
4346   // An assumed simplified value. Initially, it is set to Optional::None, which
4347   // means that the value is not clear under current assumption. If in the
4348   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4349   // returns orignal associated value.
4350   Optional<Value *> SimplifiedAssociatedValue;
4351 };
4352 
4353 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4354   AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4355 
4356   void initialize(Attributor &A) override {
4357     AAValueSimplifyImpl::initialize(A);
4358     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4359       indicatePessimisticFixpoint();
4360     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4361                 /* IgnoreSubsumingPositions */ true))
4362       indicatePessimisticFixpoint();
4363 
4364     // FIXME: This is a hack to prevent us from propagating function poiner in
4365     // the new pass manager CGSCC pass as it creates call edges the
4366     // CallGraphUpdater cannot handle yet.
4367     Value &V = getAssociatedValue();
4368     if (V.getType()->isPointerTy() &&
4369         V.getType()->getPointerElementType()->isFunctionTy() &&
4370         !A.isModulePass())
4371       indicatePessimisticFixpoint();
4372   }
4373 
4374   /// See AbstractAttribute::updateImpl(...).
4375   ChangeStatus updateImpl(Attributor &A) override {
4376     // Byval is only replacable if it is readonly otherwise we would write into
4377     // the replaced value and not the copy that byval creates implicitly.
4378     Argument *Arg = getAssociatedArgument();
4379     if (Arg->hasByValAttr()) {
4380       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4381       //       there is no race by not copying a constant byval.
4382       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4383       if (!MemAA.isAssumedReadOnly())
4384         return indicatePessimisticFixpoint();
4385     }
4386 
4387     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4388 
4389     auto PredForCallSite = [&](AbstractCallSite ACS) {
4390       const IRPosition &ACSArgPos =
4391           IRPosition::callsite_argument(ACS, getArgNo());
4392       // Check if a coresponding argument was found or if it is on not
4393       // associated (which can happen for callback calls).
4394       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4395         return false;
4396 
4397       // We can only propagate thread independent values through callbacks.
4398       // This is different to direct/indirect call sites because for them we
4399       // know the thread executing the caller and callee is the same. For
4400       // callbacks this is not guaranteed, thus a thread dependent value could
4401       // be different for the caller and callee, making it invalid to propagate.
4402       Value &ArgOp = ACSArgPos.getAssociatedValue();
4403       if (ACS.isCallbackCall())
4404         if (auto *C = dyn_cast<Constant>(&ArgOp))
4405           if (C->isThreadDependent())
4406             return false;
4407       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4408     };
4409 
4410     bool AllCallSitesKnown;
4411     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4412                                 AllCallSitesKnown))
4413       if (!askSimplifiedValueForAAValueConstantRange(A))
4414         return indicatePessimisticFixpoint();
4415 
4416     // If a candicate was found in this update, return CHANGED.
4417     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4418                ? ChangeStatus::UNCHANGED
4419                : ChangeStatus ::CHANGED;
4420   }
4421 
4422   /// See AbstractAttribute::trackStatistics()
4423   void trackStatistics() const override {
4424     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4425   }
4426 };
4427 
4428 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4429   AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4430 
4431   /// See AbstractAttribute::updateImpl(...).
4432   ChangeStatus updateImpl(Attributor &A) override {
4433     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4434 
4435     auto PredForReturned = [&](Value &V) {
4436       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4437     };
4438 
4439     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4440       if (!askSimplifiedValueForAAValueConstantRange(A))
4441         return indicatePessimisticFixpoint();
4442 
4443     // If a candicate was found in this update, return CHANGED.
4444     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4445                ? ChangeStatus::UNCHANGED
4446                : ChangeStatus ::CHANGED;
4447   }
4448 
4449   ChangeStatus manifest(Attributor &A) override {
4450     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4451 
4452     if (SimplifiedAssociatedValue.hasValue() &&
4453         !SimplifiedAssociatedValue.getValue())
4454       return Changed;
4455 
4456     Value &V = getAssociatedValue();
4457     auto *C = SimplifiedAssociatedValue.hasValue()
4458                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4459                   : UndefValue::get(V.getType());
4460     if (C) {
4461       auto PredForReturned =
4462           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4463             // We can replace the AssociatedValue with the constant.
4464             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4465               return true;
4466 
4467             for (ReturnInst *RI : RetInsts) {
4468               if (RI->getFunction() != getAnchorScope())
4469                 continue;
4470               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4471                                 << " in " << *RI << " :: " << *this << "\n");
4472               if (A.changeUseAfterManifest(RI->getOperandUse(0), *C))
4473                 Changed = ChangeStatus::CHANGED;
4474             }
4475             return true;
4476           };
4477       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4478     }
4479 
4480     return Changed | AAValueSimplify::manifest(A);
4481   }
4482 
4483   /// See AbstractAttribute::trackStatistics()
4484   void trackStatistics() const override {
4485     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4486   }
4487 };
4488 
4489 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4490   AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4491 
4492   /// See AbstractAttribute::initialize(...).
4493   void initialize(Attributor &A) override {
4494     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4495     //        Needs investigation.
4496     // AAValueSimplifyImpl::initialize(A);
4497     Value &V = getAnchorValue();
4498 
4499     // TODO: add other stuffs
4500     if (isa<Constant>(V))
4501       indicatePessimisticFixpoint();
4502   }
4503 
4504   /// See AbstractAttribute::updateImpl(...).
4505   ChangeStatus updateImpl(Attributor &A) override {
4506     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4507 
4508     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4509                             bool Stripped) -> bool {
4510       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4511       if (!Stripped && this == &AA) {
4512         // TODO: Look the instruction and check recursively.
4513 
4514         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4515                           << "\n");
4516         return false;
4517       }
4518       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4519     };
4520 
4521     bool Dummy = false;
4522     if (!genericValueTraversal<AAValueSimplify, bool>(
4523             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI()))
4524       if (!askSimplifiedValueForAAValueConstantRange(A))
4525         return indicatePessimisticFixpoint();
4526 
4527     // If a candicate was found in this update, return CHANGED.
4528 
4529     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4530                ? ChangeStatus::UNCHANGED
4531                : ChangeStatus ::CHANGED;
4532   }
4533 
4534   /// See AbstractAttribute::trackStatistics()
4535   void trackStatistics() const override {
4536     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4537   }
4538 };
4539 
4540 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4541   AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4542 
4543   /// See AbstractAttribute::initialize(...).
4544   void initialize(Attributor &A) override {
4545     SimplifiedAssociatedValue = &getAnchorValue();
4546     indicateOptimisticFixpoint();
4547   }
4548   /// See AbstractAttribute::initialize(...).
4549   ChangeStatus updateImpl(Attributor &A) override {
4550     llvm_unreachable(
4551         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4552   }
4553   /// See AbstractAttribute::trackStatistics()
4554   void trackStatistics() const override {
4555     STATS_DECLTRACK_FN_ATTR(value_simplify)
4556   }
4557 };
4558 
4559 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4560   AAValueSimplifyCallSite(const IRPosition &IRP)
4561       : AAValueSimplifyFunction(IRP) {}
4562   /// See AbstractAttribute::trackStatistics()
4563   void trackStatistics() const override {
4564     STATS_DECLTRACK_CS_ATTR(value_simplify)
4565   }
4566 };
4567 
4568 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4569   AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
4570       : AAValueSimplifyReturned(IRP) {}
4571 
4572   /// See AbstractAttribute::manifest(...).
4573   ChangeStatus manifest(Attributor &A) override {
4574     return AAValueSimplifyImpl::manifest(A);
4575   }
4576 
4577   void trackStatistics() const override {
4578     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4579   }
4580 };
4581 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4582   AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
4583       : AAValueSimplifyFloating(IRP) {}
4584 
4585   void trackStatistics() const override {
4586     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4587   }
4588 };
4589 
4590 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4591 struct AAHeapToStackImpl : public AAHeapToStack {
4592   AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
4593 
4594   const std::string getAsStr() const override {
4595     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4596   }
4597 
4598   ChangeStatus manifest(Attributor &A) override {
4599     assert(getState().isValidState() &&
4600            "Attempted to manifest an invalid state!");
4601 
4602     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4603     Function *F = getAnchorScope();
4604     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4605 
4606     for (Instruction *MallocCall : MallocCalls) {
4607       // This malloc cannot be replaced.
4608       if (BadMallocCalls.count(MallocCall))
4609         continue;
4610 
4611       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4612         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4613         A.deleteAfterManifest(*FreeCall);
4614         HasChanged = ChangeStatus::CHANGED;
4615       }
4616 
4617       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4618                         << "\n");
4619 
4620       MaybeAlign Alignment;
4621       Constant *Size;
4622       if (isCallocLikeFn(MallocCall, TLI)) {
4623         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4624         auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1));
4625         APInt TotalSize = SizeT->getValue() * Num->getValue();
4626         Size =
4627             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4628       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
4629         Size = cast<ConstantInt>(MallocCall->getOperand(1));
4630         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
4631                                    ->getValue()
4632                                    .getZExtValue());
4633       } else {
4634         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4635       }
4636 
4637       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4638       Instruction *AI =
4639           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
4640                          "", MallocCall->getNextNode());
4641 
4642       if (AI->getType() != MallocCall->getType())
4643         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4644                              AI->getNextNode());
4645 
4646       A.changeValueAfterManifest(*MallocCall, *AI);
4647 
4648       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4649         auto *NBB = II->getNormalDest();
4650         BranchInst::Create(NBB, MallocCall->getParent());
4651         A.deleteAfterManifest(*MallocCall);
4652       } else {
4653         A.deleteAfterManifest(*MallocCall);
4654       }
4655 
4656       // Zero out the allocated memory if it was a calloc.
4657       if (isCallocLikeFn(MallocCall, TLI)) {
4658         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4659                                    AI->getNextNode());
4660         Value *Ops[] = {
4661             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4662             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4663 
4664         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4665         Module *M = F->getParent();
4666         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4667         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4668       }
4669       HasChanged = ChangeStatus::CHANGED;
4670     }
4671 
4672     return HasChanged;
4673   }
4674 
4675   /// Collection of all malloc calls in a function.
4676   SmallSetVector<Instruction *, 4> MallocCalls;
4677 
4678   /// Collection of malloc calls that cannot be converted.
4679   DenseSet<const Instruction *> BadMallocCalls;
4680 
4681   /// A map for each malloc call to the set of associated free calls.
4682   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4683 
4684   ChangeStatus updateImpl(Attributor &A) override;
4685 };
4686 
4687 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4688   const Function *F = getAnchorScope();
4689   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4690 
4691   MustBeExecutedContextExplorer &Explorer =
4692       A.getInfoCache().getMustBeExecutedContextExplorer();
4693 
4694   auto FreeCheck = [&](Instruction &I) {
4695     const auto &Frees = FreesForMalloc.lookup(&I);
4696     if (Frees.size() != 1)
4697       return false;
4698     Instruction *UniqueFree = *Frees.begin();
4699     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4700   };
4701 
4702   auto UsesCheck = [&](Instruction &I) {
4703     bool ValidUsesOnly = true;
4704     bool MustUse = true;
4705     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4706       Instruction *UserI = cast<Instruction>(U.getUser());
4707       if (isa<LoadInst>(UserI))
4708         return true;
4709       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4710         if (SI->getValueOperand() == U.get()) {
4711           LLVM_DEBUG(dbgs()
4712                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4713           ValidUsesOnly = false;
4714         } else {
4715           // A store into the malloc'ed memory is fine.
4716         }
4717         return true;
4718       }
4719       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4720         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4721           return true;
4722         // Record malloc.
4723         if (isFreeCall(UserI, TLI)) {
4724           if (MustUse) {
4725             FreesForMalloc[&I].insert(UserI);
4726           } else {
4727             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4728                               << *UserI << "\n");
4729             ValidUsesOnly = false;
4730           }
4731           return true;
4732         }
4733 
4734         unsigned ArgNo = CB->getArgOperandNo(&U);
4735 
4736         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4737             *this, IRPosition::callsite_argument(*CB, ArgNo));
4738 
4739         // If a callsite argument use is nofree, we are fine.
4740         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4741             *this, IRPosition::callsite_argument(*CB, ArgNo));
4742 
4743         if (!NoCaptureAA.isAssumedNoCapture() ||
4744             !ArgNoFreeAA.isAssumedNoFree()) {
4745           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4746           ValidUsesOnly = false;
4747         }
4748         return true;
4749       }
4750 
4751       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4752           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4753         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4754         Follow = true;
4755         return true;
4756       }
4757       // Unknown user for which we can not track uses further (in a way that
4758       // makes sense).
4759       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4760       ValidUsesOnly = false;
4761       return true;
4762     };
4763     A.checkForAllUses(Pred, *this, I);
4764     return ValidUsesOnly;
4765   };
4766 
4767   auto MallocCallocCheck = [&](Instruction &I) {
4768     if (BadMallocCalls.count(&I))
4769       return true;
4770 
4771     bool IsMalloc = isMallocLikeFn(&I, TLI);
4772     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
4773     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4774     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
4775       BadMallocCalls.insert(&I);
4776       return true;
4777     }
4778 
4779     if (IsMalloc) {
4780       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4781         if (Size->getValue().ule(MaxHeapToStackSize))
4782           if (UsesCheck(I) || FreeCheck(I)) {
4783             MallocCalls.insert(&I);
4784             return true;
4785           }
4786     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
4787       // Only if the alignment and sizes are constant.
4788       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4789         if (Size->getValue().ule(MaxHeapToStackSize))
4790           if (UsesCheck(I) || FreeCheck(I)) {
4791             MallocCalls.insert(&I);
4792             return true;
4793           }
4794     } else if (IsCalloc) {
4795       bool Overflow = false;
4796       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4797         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4798           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4799                   .ule(MaxHeapToStackSize))
4800             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4801               MallocCalls.insert(&I);
4802               return true;
4803             }
4804     }
4805 
4806     BadMallocCalls.insert(&I);
4807     return true;
4808   };
4809 
4810   size_t NumBadMallocs = BadMallocCalls.size();
4811 
4812   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4813 
4814   if (NumBadMallocs != BadMallocCalls.size())
4815     return ChangeStatus::CHANGED;
4816 
4817   return ChangeStatus::UNCHANGED;
4818 }
4819 
4820 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4821   AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
4822 
4823   /// See AbstractAttribute::trackStatistics().
4824   void trackStatistics() const override {
4825     STATS_DECL(
4826         MallocCalls, Function,
4827         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
4828     for (auto *C : MallocCalls)
4829       if (!BadMallocCalls.count(C))
4830         ++BUILD_STAT_NAME(MallocCalls, Function);
4831   }
4832 };
4833 
4834 /// ----------------------- Privatizable Pointers ------------------------------
4835 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
4836   AAPrivatizablePtrImpl(const IRPosition &IRP)
4837       : AAPrivatizablePtr(IRP), PrivatizableType(llvm::None) {}
4838 
4839   ChangeStatus indicatePessimisticFixpoint() override {
4840     AAPrivatizablePtr::indicatePessimisticFixpoint();
4841     PrivatizableType = nullptr;
4842     return ChangeStatus::CHANGED;
4843   }
4844 
4845   /// Identify the type we can chose for a private copy of the underlying
4846   /// argument. None means it is not clear yet, nullptr means there is none.
4847   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
4848 
4849   /// Return a privatizable type that encloses both T0 and T1.
4850   /// TODO: This is merely a stub for now as we should manage a mapping as well.
4851   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
4852     if (!T0.hasValue())
4853       return T1;
4854     if (!T1.hasValue())
4855       return T0;
4856     if (T0 == T1)
4857       return T0;
4858     return nullptr;
4859   }
4860 
4861   Optional<Type *> getPrivatizableType() const override {
4862     return PrivatizableType;
4863   }
4864 
4865   const std::string getAsStr() const override {
4866     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
4867   }
4868 
4869 protected:
4870   Optional<Type *> PrivatizableType;
4871 };
4872 
4873 // TODO: Do this for call site arguments (probably also other values) as well.
4874 
4875 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
4876   AAPrivatizablePtrArgument(const IRPosition &IRP)
4877       : AAPrivatizablePtrImpl(IRP) {}
4878 
4879   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
4880   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
4881     // If this is a byval argument and we know all the call sites (so we can
4882     // rewrite them), there is no need to check them explicitly.
4883     bool AllCallSitesKnown;
4884     if (getIRPosition().hasAttr(Attribute::ByVal) &&
4885         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
4886                                true, AllCallSitesKnown))
4887       return getAssociatedValue().getType()->getPointerElementType();
4888 
4889     Optional<Type *> Ty;
4890     unsigned ArgNo = getIRPosition().getArgNo();
4891 
4892     // Make sure the associated call site argument has the same type at all call
4893     // sites and it is an allocation we know is safe to privatize, for now that
4894     // means we only allow alloca instructions.
4895     // TODO: We can additionally analyze the accesses in the callee to  create
4896     //       the type from that information instead. That is a little more
4897     //       involved and will be done in a follow up patch.
4898     auto CallSiteCheck = [&](AbstractCallSite ACS) {
4899       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
4900       // Check if a coresponding argument was found or if it is one not
4901       // associated (which can happen for callback calls).
4902       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4903         return false;
4904 
4905       // Check that all call sites agree on a type.
4906       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
4907       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
4908 
4909       LLVM_DEBUG({
4910         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
4911         if (CSTy.hasValue() && CSTy.getValue())
4912           CSTy.getValue()->print(dbgs());
4913         else if (CSTy.hasValue())
4914           dbgs() << "<nullptr>";
4915         else
4916           dbgs() << "<none>";
4917       });
4918 
4919       Ty = combineTypes(Ty, CSTy);
4920 
4921       LLVM_DEBUG({
4922         dbgs() << " : New Type: ";
4923         if (Ty.hasValue() && Ty.getValue())
4924           Ty.getValue()->print(dbgs());
4925         else if (Ty.hasValue())
4926           dbgs() << "<nullptr>";
4927         else
4928           dbgs() << "<none>";
4929         dbgs() << "\n";
4930       });
4931 
4932       return !Ty.hasValue() || Ty.getValue();
4933     };
4934 
4935     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
4936       return nullptr;
4937     return Ty;
4938   }
4939 
4940   /// See AbstractAttribute::updateImpl(...).
4941   ChangeStatus updateImpl(Attributor &A) override {
4942     PrivatizableType = identifyPrivatizableType(A);
4943     if (!PrivatizableType.hasValue())
4944       return ChangeStatus::UNCHANGED;
4945     if (!PrivatizableType.getValue())
4946       return indicatePessimisticFixpoint();
4947 
4948     // Avoid arguments with padding for now.
4949     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
4950         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
4951                                                 A.getInfoCache().getDL())) {
4952       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
4953       return indicatePessimisticFixpoint();
4954     }
4955 
4956     // Verify callee and caller agree on how the promoted argument would be
4957     // passed.
4958     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
4959     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
4960     // which doesn't require the arguments ArgumentPromotion wanted to pass.
4961     Function &Fn = *getIRPosition().getAnchorScope();
4962     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
4963     ArgsToPromote.insert(getAssociatedArgument());
4964     const auto *TTI =
4965         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
4966     if (!TTI ||
4967         !ArgumentPromotionPass::areFunctionArgsABICompatible(
4968             Fn, *TTI, ArgsToPromote, Dummy) ||
4969         ArgsToPromote.empty()) {
4970       LLVM_DEBUG(
4971           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
4972                  << Fn.getName() << "\n");
4973       return indicatePessimisticFixpoint();
4974     }
4975 
4976     // Collect the types that will replace the privatizable type in the function
4977     // signature.
4978     SmallVector<Type *, 16> ReplacementTypes;
4979     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
4980 
4981     // Register a rewrite of the argument.
4982     Argument *Arg = getAssociatedArgument();
4983     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
4984       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
4985       return indicatePessimisticFixpoint();
4986     }
4987 
4988     unsigned ArgNo = Arg->getArgNo();
4989 
4990     // Helper to check if for the given call site the associated argument is
4991     // passed to a callback where the privatization would be different.
4992     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
4993       SmallVector<const Use *, 4> CallbackUses;
4994       AbstractCallSite::getCallbackUses(CB, CallbackUses);
4995       for (const Use *U : CallbackUses) {
4996         AbstractCallSite CBACS(U);
4997         assert(CBACS && CBACS.isCallbackCall());
4998         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
4999           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5000 
5001           LLVM_DEBUG({
5002             dbgs()
5003                 << "[AAPrivatizablePtr] Argument " << *Arg
5004                 << "check if can be privatized in the context of its parent ("
5005                 << Arg->getParent()->getName()
5006                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5007                    "callback ("
5008                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5009                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5010                 << CBACS.getCallArgOperand(CBArg) << " vs "
5011                 << CB.getArgOperand(ArgNo) << "\n"
5012                 << "[AAPrivatizablePtr] " << CBArg << " : "
5013                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5014           });
5015 
5016           if (CBArgNo != int(ArgNo))
5017             continue;
5018           const auto &CBArgPrivAA =
5019               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5020           if (CBArgPrivAA.isValidState()) {
5021             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5022             if (!CBArgPrivTy.hasValue())
5023               continue;
5024             if (CBArgPrivTy.getValue() == PrivatizableType)
5025               continue;
5026           }
5027 
5028           LLVM_DEBUG({
5029             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5030                    << " cannot be privatized in the context of its parent ("
5031                    << Arg->getParent()->getName()
5032                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5033                       "callback ("
5034                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5035                    << ").\n[AAPrivatizablePtr] for which the argument "
5036                       "privatization is not compatible.\n";
5037           });
5038           return false;
5039         }
5040       }
5041       return true;
5042     };
5043 
5044     // Helper to check if for the given call site the associated argument is
5045     // passed to a direct call where the privatization would be different.
5046     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5047       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5048       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5049       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5050              "Expected a direct call operand for callback call operand");
5051 
5052       LLVM_DEBUG({
5053         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5054                << " check if be privatized in the context of its parent ("
5055                << Arg->getParent()->getName()
5056                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5057                   "direct call of ("
5058                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5059                << ").\n";
5060       });
5061 
5062       Function *DCCallee = DC->getCalledFunction();
5063       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5064         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5065             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5066         if (DCArgPrivAA.isValidState()) {
5067           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5068           if (!DCArgPrivTy.hasValue())
5069             return true;
5070           if (DCArgPrivTy.getValue() == PrivatizableType)
5071             return true;
5072         }
5073       }
5074 
5075       LLVM_DEBUG({
5076         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5077                << " cannot be privatized in the context of its parent ("
5078                << Arg->getParent()->getName()
5079                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5080                   "direct call of ("
5081                << ACS.getInstruction()->getCalledFunction()->getName()
5082                << ").\n[AAPrivatizablePtr] for which the argument "
5083                   "privatization is not compatible.\n";
5084       });
5085       return false;
5086     };
5087 
5088     // Helper to check if the associated argument is used at the given abstract
5089     // call site in a way that is incompatible with the privatization assumed
5090     // here.
5091     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5092       if (ACS.isDirectCall())
5093         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5094       if (ACS.isCallbackCall())
5095         return IsCompatiblePrivArgOfDirectCS(ACS);
5096       return false;
5097     };
5098 
5099     bool AllCallSitesKnown;
5100     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5101                                 AllCallSitesKnown))
5102       return indicatePessimisticFixpoint();
5103 
5104     return ChangeStatus::UNCHANGED;
5105   }
5106 
5107   /// Given a type to private \p PrivType, collect the constituates (which are
5108   /// used) in \p ReplacementTypes.
5109   static void
5110   identifyReplacementTypes(Type *PrivType,
5111                            SmallVectorImpl<Type *> &ReplacementTypes) {
5112     // TODO: For now we expand the privatization type to the fullest which can
5113     //       lead to dead arguments that need to be removed later.
5114     assert(PrivType && "Expected privatizable type!");
5115 
5116     // Traverse the type, extract constituate types on the outermost level.
5117     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5118       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5119         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5120     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5121       ReplacementTypes.append(PrivArrayType->getNumElements(),
5122                               PrivArrayType->getElementType());
5123     } else {
5124       ReplacementTypes.push_back(PrivType);
5125     }
5126   }
5127 
5128   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5129   /// The values needed are taken from the arguments of \p F starting at
5130   /// position \p ArgNo.
5131   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5132                                    unsigned ArgNo, Instruction &IP) {
5133     assert(PrivType && "Expected privatizable type!");
5134 
5135     IRBuilder<NoFolder> IRB(&IP);
5136     const DataLayout &DL = F.getParent()->getDataLayout();
5137 
5138     // Traverse the type, build GEPs and stores.
5139     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5140       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5141       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5142         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5143         Value *Ptr = constructPointer(
5144             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5145         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5146       }
5147     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5148       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5149       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5150       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5151         Value *Ptr =
5152             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5153         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5154       }
5155     } else {
5156       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5157     }
5158   }
5159 
5160   /// Extract values from \p Base according to the type \p PrivType at the
5161   /// call position \p ACS. The values are appended to \p ReplacementValues.
5162   void createReplacementValues(Type *PrivType, AbstractCallSite ACS,
5163                                Value *Base,
5164                                SmallVectorImpl<Value *> &ReplacementValues) {
5165     assert(Base && "Expected base value!");
5166     assert(PrivType && "Expected privatizable type!");
5167     Instruction *IP = ACS.getInstruction();
5168 
5169     IRBuilder<NoFolder> IRB(IP);
5170     const DataLayout &DL = IP->getModule()->getDataLayout();
5171 
5172     if (Base->getType()->getPointerElementType() != PrivType)
5173       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5174                                                  "", ACS.getInstruction());
5175 
5176     // TODO: Improve the alignment of the loads.
5177     // Traverse the type, build GEPs and loads.
5178     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5179       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5180       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5181         Type *PointeeTy = PrivStructType->getElementType(u);
5182         Value *Ptr =
5183             constructPointer(PointeeTy->getPointerTo(), Base,
5184                              PrivStructLayout->getElementOffset(u), IRB, DL);
5185         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5186         L->setAlignment(Align(1));
5187         ReplacementValues.push_back(L);
5188       }
5189     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5190       Type *PointeeTy = PrivArrayType->getElementType();
5191       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5192       Type *PointeePtrTy = PointeeTy->getPointerTo();
5193       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5194         Value *Ptr =
5195             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5196         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5197         L->setAlignment(Align(1));
5198         ReplacementValues.push_back(L);
5199       }
5200     } else {
5201       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5202       L->setAlignment(Align(1));
5203       ReplacementValues.push_back(L);
5204     }
5205   }
5206 
5207   /// See AbstractAttribute::manifest(...)
5208   ChangeStatus manifest(Attributor &A) override {
5209     if (!PrivatizableType.hasValue())
5210       return ChangeStatus::UNCHANGED;
5211     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5212 
5213     // Collect all tail calls in the function as we cannot allow new allocas to
5214     // escape into tail recursion.
5215     // TODO: Be smarter about new allocas escaping into tail calls.
5216     SmallVector<CallInst *, 16> TailCalls;
5217     if (!A.checkForAllInstructions(
5218             [&](Instruction &I) {
5219               CallInst &CI = cast<CallInst>(I);
5220               if (CI.isTailCall())
5221                 TailCalls.push_back(&CI);
5222               return true;
5223             },
5224             *this, {Instruction::Call}))
5225       return ChangeStatus::UNCHANGED;
5226 
5227     Argument *Arg = getAssociatedArgument();
5228 
5229     // Callback to repair the associated function. A new alloca is placed at the
5230     // beginning and initialized with the values passed through arguments. The
5231     // new alloca replaces the use of the old pointer argument.
5232     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5233         [=](const Attributor::ArgumentReplacementInfo &ARI,
5234             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5235           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5236           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5237           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5238                                     Arg->getName() + ".priv", IP);
5239           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5240                                ArgIt->getArgNo(), *IP);
5241           Arg->replaceAllUsesWith(AI);
5242 
5243           for (CallInst *CI : TailCalls)
5244             CI->setTailCall(false);
5245         };
5246 
5247     // Callback to repair a call site of the associated function. The elements
5248     // of the privatizable type are loaded prior to the call and passed to the
5249     // new function version.
5250     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5251         [=](const Attributor::ArgumentReplacementInfo &ARI,
5252             AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
5253           createReplacementValues(
5254               PrivatizableType.getValue(), ACS,
5255               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5256               NewArgOperands);
5257         };
5258 
5259     // Collect the types that will replace the privatizable type in the function
5260     // signature.
5261     SmallVector<Type *, 16> ReplacementTypes;
5262     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5263 
5264     // Register a rewrite of the argument.
5265     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5266                                            std::move(FnRepairCB),
5267                                            std::move(ACSRepairCB)))
5268       return ChangeStatus::CHANGED;
5269     return ChangeStatus::UNCHANGED;
5270   }
5271 
5272   /// See AbstractAttribute::trackStatistics()
5273   void trackStatistics() const override {
5274     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5275   }
5276 };
5277 
5278 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5279   AAPrivatizablePtrFloating(const IRPosition &IRP)
5280       : AAPrivatizablePtrImpl(IRP) {}
5281 
5282   /// See AbstractAttribute::initialize(...).
5283   virtual void initialize(Attributor &A) override {
5284     // TODO: We can privatize more than arguments.
5285     indicatePessimisticFixpoint();
5286   }
5287 
5288   ChangeStatus updateImpl(Attributor &A) override {
5289     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5290                      "updateImpl will not be called");
5291   }
5292 
5293   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5294   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5295     Value *Obj =
5296         GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
5297     if (!Obj) {
5298       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5299       return nullptr;
5300     }
5301 
5302     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5303       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5304         if (CI->isOne())
5305           return Obj->getType()->getPointerElementType();
5306     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5307       auto &PrivArgAA =
5308           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5309       if (PrivArgAA.isAssumedPrivatizablePtr())
5310         return Obj->getType()->getPointerElementType();
5311     }
5312 
5313     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5314                          "alloca nor privatizable argument: "
5315                       << *Obj << "!\n");
5316     return nullptr;
5317   }
5318 
5319   /// See AbstractAttribute::trackStatistics()
5320   void trackStatistics() const override {
5321     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5322   }
5323 };
5324 
5325 struct AAPrivatizablePtrCallSiteArgument final
5326     : public AAPrivatizablePtrFloating {
5327   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP)
5328       : AAPrivatizablePtrFloating(IRP) {}
5329 
5330   /// See AbstractAttribute::initialize(...).
5331   void initialize(Attributor &A) override {
5332     if (getIRPosition().hasAttr(Attribute::ByVal))
5333       indicateOptimisticFixpoint();
5334   }
5335 
5336   /// See AbstractAttribute::updateImpl(...).
5337   ChangeStatus updateImpl(Attributor &A) override {
5338     PrivatizableType = identifyPrivatizableType(A);
5339     if (!PrivatizableType.hasValue())
5340       return ChangeStatus::UNCHANGED;
5341     if (!PrivatizableType.getValue())
5342       return indicatePessimisticFixpoint();
5343 
5344     const IRPosition &IRP = getIRPosition();
5345     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5346     if (!NoCaptureAA.isAssumedNoCapture()) {
5347       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5348       return indicatePessimisticFixpoint();
5349     }
5350 
5351     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5352     if (!NoAliasAA.isAssumedNoAlias()) {
5353       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5354       return indicatePessimisticFixpoint();
5355     }
5356 
5357     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5358     if (!MemBehaviorAA.isAssumedReadOnly()) {
5359       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5360       return indicatePessimisticFixpoint();
5361     }
5362 
5363     return ChangeStatus::UNCHANGED;
5364   }
5365 
5366   /// See AbstractAttribute::trackStatistics()
5367   void trackStatistics() const override {
5368     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5369   }
5370 };
5371 
5372 struct AAPrivatizablePtrCallSiteReturned final
5373     : public AAPrivatizablePtrFloating {
5374   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP)
5375       : AAPrivatizablePtrFloating(IRP) {}
5376 
5377   /// See AbstractAttribute::initialize(...).
5378   void initialize(Attributor &A) override {
5379     // TODO: We can privatize more than arguments.
5380     indicatePessimisticFixpoint();
5381   }
5382 
5383   /// See AbstractAttribute::trackStatistics()
5384   void trackStatistics() const override {
5385     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5386   }
5387 };
5388 
5389 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5390   AAPrivatizablePtrReturned(const IRPosition &IRP)
5391       : AAPrivatizablePtrFloating(IRP) {}
5392 
5393   /// See AbstractAttribute::initialize(...).
5394   void initialize(Attributor &A) override {
5395     // TODO: We can privatize more than arguments.
5396     indicatePessimisticFixpoint();
5397   }
5398 
5399   /// See AbstractAttribute::trackStatistics()
5400   void trackStatistics() const override {
5401     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5402   }
5403 };
5404 
5405 /// -------------------- Memory Behavior Attributes ----------------------------
5406 /// Includes read-none, read-only, and write-only.
5407 /// ----------------------------------------------------------------------------
5408 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5409   AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
5410 
5411   /// See AbstractAttribute::initialize(...).
5412   void initialize(Attributor &A) override {
5413     intersectAssumedBits(BEST_STATE);
5414     getKnownStateFromValue(getIRPosition(), getState());
5415     IRAttribute::initialize(A);
5416   }
5417 
5418   /// Return the memory behavior information encoded in the IR for \p IRP.
5419   static void getKnownStateFromValue(const IRPosition &IRP,
5420                                      BitIntegerState &State,
5421                                      bool IgnoreSubsumingPositions = false) {
5422     SmallVector<Attribute, 2> Attrs;
5423     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5424     for (const Attribute &Attr : Attrs) {
5425       switch (Attr.getKindAsEnum()) {
5426       case Attribute::ReadNone:
5427         State.addKnownBits(NO_ACCESSES);
5428         break;
5429       case Attribute::ReadOnly:
5430         State.addKnownBits(NO_WRITES);
5431         break;
5432       case Attribute::WriteOnly:
5433         State.addKnownBits(NO_READS);
5434         break;
5435       default:
5436         llvm_unreachable("Unexpected attribute!");
5437       }
5438     }
5439 
5440     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5441       if (!I->mayReadFromMemory())
5442         State.addKnownBits(NO_READS);
5443       if (!I->mayWriteToMemory())
5444         State.addKnownBits(NO_WRITES);
5445     }
5446   }
5447 
5448   /// See AbstractAttribute::getDeducedAttributes(...).
5449   void getDeducedAttributes(LLVMContext &Ctx,
5450                             SmallVectorImpl<Attribute> &Attrs) const override {
5451     assert(Attrs.size() == 0);
5452     if (isAssumedReadNone())
5453       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5454     else if (isAssumedReadOnly())
5455       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5456     else if (isAssumedWriteOnly())
5457       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5458     assert(Attrs.size() <= 1);
5459   }
5460 
5461   /// See AbstractAttribute::manifest(...).
5462   ChangeStatus manifest(Attributor &A) override {
5463     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5464       return ChangeStatus::UNCHANGED;
5465 
5466     const IRPosition &IRP = getIRPosition();
5467 
5468     // Check if we would improve the existing attributes first.
5469     SmallVector<Attribute, 4> DeducedAttrs;
5470     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5471     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5472           return IRP.hasAttr(Attr.getKindAsEnum(),
5473                              /* IgnoreSubsumingPositions */ true);
5474         }))
5475       return ChangeStatus::UNCHANGED;
5476 
5477     // Clear existing attributes.
5478     IRP.removeAttrs(AttrKinds);
5479 
5480     // Use the generic manifest method.
5481     return IRAttribute::manifest(A);
5482   }
5483 
5484   /// See AbstractState::getAsStr().
5485   const std::string getAsStr() const override {
5486     if (isAssumedReadNone())
5487       return "readnone";
5488     if (isAssumedReadOnly())
5489       return "readonly";
5490     if (isAssumedWriteOnly())
5491       return "writeonly";
5492     return "may-read/write";
5493   }
5494 
5495   /// The set of IR attributes AAMemoryBehavior deals with.
5496   static const Attribute::AttrKind AttrKinds[3];
5497 };
5498 
5499 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5500     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5501 
5502 /// Memory behavior attribute for a floating value.
5503 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5504   AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5505 
5506   /// See AbstractAttribute::initialize(...).
5507   void initialize(Attributor &A) override {
5508     AAMemoryBehaviorImpl::initialize(A);
5509     // Initialize the use vector with all direct uses of the associated value.
5510     for (const Use &U : getAssociatedValue().uses())
5511       Uses.insert(&U);
5512   }
5513 
5514   /// See AbstractAttribute::updateImpl(...).
5515   ChangeStatus updateImpl(Attributor &A) override;
5516 
5517   /// See AbstractAttribute::trackStatistics()
5518   void trackStatistics() const override {
5519     if (isAssumedReadNone())
5520       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5521     else if (isAssumedReadOnly())
5522       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5523     else if (isAssumedWriteOnly())
5524       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5525   }
5526 
5527 private:
5528   /// Return true if users of \p UserI might access the underlying
5529   /// variable/location described by \p U and should therefore be analyzed.
5530   bool followUsersOfUseIn(Attributor &A, const Use *U,
5531                           const Instruction *UserI);
5532 
5533   /// Update the state according to the effect of use \p U in \p UserI.
5534   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5535 
5536 protected:
5537   /// Container for (transitive) uses of the associated argument.
5538   SetVector<const Use *> Uses;
5539 };
5540 
5541 /// Memory behavior attribute for function argument.
5542 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5543   AAMemoryBehaviorArgument(const IRPosition &IRP)
5544       : AAMemoryBehaviorFloating(IRP) {}
5545 
5546   /// See AbstractAttribute::initialize(...).
5547   void initialize(Attributor &A) override {
5548     intersectAssumedBits(BEST_STATE);
5549     const IRPosition &IRP = getIRPosition();
5550     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5551     // can query it when we use has/getAttr. That would allow us to reuse the
5552     // initialize of the base class here.
5553     bool HasByVal =
5554         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5555     getKnownStateFromValue(IRP, getState(),
5556                            /* IgnoreSubsumingPositions */ HasByVal);
5557 
5558     // Initialize the use vector with all direct uses of the associated value.
5559     Argument *Arg = getAssociatedArgument();
5560     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5561       indicatePessimisticFixpoint();
5562     } else {
5563       // Initialize the use vector with all direct uses of the associated value.
5564       for (const Use &U : Arg->uses())
5565         Uses.insert(&U);
5566     }
5567   }
5568 
5569   ChangeStatus manifest(Attributor &A) override {
5570     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5571     if (!getAssociatedValue().getType()->isPointerTy())
5572       return ChangeStatus::UNCHANGED;
5573 
5574     // TODO: From readattrs.ll: "inalloca parameters are always
5575     //                           considered written"
5576     if (hasAttr({Attribute::InAlloca})) {
5577       removeKnownBits(NO_WRITES);
5578       removeAssumedBits(NO_WRITES);
5579     }
5580     return AAMemoryBehaviorFloating::manifest(A);
5581   }
5582 
5583   /// See AbstractAttribute::trackStatistics()
5584   void trackStatistics() const override {
5585     if (isAssumedReadNone())
5586       STATS_DECLTRACK_ARG_ATTR(readnone)
5587     else if (isAssumedReadOnly())
5588       STATS_DECLTRACK_ARG_ATTR(readonly)
5589     else if (isAssumedWriteOnly())
5590       STATS_DECLTRACK_ARG_ATTR(writeonly)
5591   }
5592 };
5593 
5594 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5595   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
5596       : AAMemoryBehaviorArgument(IRP) {}
5597 
5598   /// See AbstractAttribute::initialize(...).
5599   void initialize(Attributor &A) override {
5600     if (Argument *Arg = getAssociatedArgument()) {
5601       if (Arg->hasByValAttr()) {
5602         addKnownBits(NO_WRITES);
5603         removeKnownBits(NO_READS);
5604         removeAssumedBits(NO_READS);
5605       }
5606     } else {
5607     }
5608     AAMemoryBehaviorArgument::initialize(A);
5609   }
5610 
5611   /// See AbstractAttribute::updateImpl(...).
5612   ChangeStatus updateImpl(Attributor &A) override {
5613     // TODO: Once we have call site specific value information we can provide
5614     //       call site specific liveness liveness information and then it makes
5615     //       sense to specialize attributes for call sites arguments instead of
5616     //       redirecting requests to the callee argument.
5617     Argument *Arg = getAssociatedArgument();
5618     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5619     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5620     return clampStateAndIndicateChange(
5621         getState(),
5622         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
5623   }
5624 
5625   /// See AbstractAttribute::trackStatistics()
5626   void trackStatistics() const override {
5627     if (isAssumedReadNone())
5628       STATS_DECLTRACK_CSARG_ATTR(readnone)
5629     else if (isAssumedReadOnly())
5630       STATS_DECLTRACK_CSARG_ATTR(readonly)
5631     else if (isAssumedWriteOnly())
5632       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5633   }
5634 };
5635 
5636 /// Memory behavior attribute for a call site return position.
5637 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5638   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
5639       : AAMemoryBehaviorFloating(IRP) {}
5640 
5641   /// See AbstractAttribute::manifest(...).
5642   ChangeStatus manifest(Attributor &A) override {
5643     // We do not annotate returned values.
5644     return ChangeStatus::UNCHANGED;
5645   }
5646 
5647   /// See AbstractAttribute::trackStatistics()
5648   void trackStatistics() const override {}
5649 };
5650 
5651 /// An AA to represent the memory behavior function attributes.
5652 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
5653   AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5654 
5655   /// See AbstractAttribute::updateImpl(Attributor &A).
5656   virtual ChangeStatus updateImpl(Attributor &A) override;
5657 
5658   /// See AbstractAttribute::manifest(...).
5659   ChangeStatus manifest(Attributor &A) override {
5660     Function &F = cast<Function>(getAnchorValue());
5661     if (isAssumedReadNone()) {
5662       F.removeFnAttr(Attribute::ArgMemOnly);
5663       F.removeFnAttr(Attribute::InaccessibleMemOnly);
5664       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
5665     }
5666     return AAMemoryBehaviorImpl::manifest(A);
5667   }
5668 
5669   /// See AbstractAttribute::trackStatistics()
5670   void trackStatistics() const override {
5671     if (isAssumedReadNone())
5672       STATS_DECLTRACK_FN_ATTR(readnone)
5673     else if (isAssumedReadOnly())
5674       STATS_DECLTRACK_FN_ATTR(readonly)
5675     else if (isAssumedWriteOnly())
5676       STATS_DECLTRACK_FN_ATTR(writeonly)
5677   }
5678 };
5679 
5680 /// AAMemoryBehavior attribute for call sites.
5681 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
5682   AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5683 
5684   /// See AbstractAttribute::initialize(...).
5685   void initialize(Attributor &A) override {
5686     AAMemoryBehaviorImpl::initialize(A);
5687     Function *F = getAssociatedFunction();
5688     if (!F || !A.isFunctionIPOAmendable(*F))
5689       indicatePessimisticFixpoint();
5690   }
5691 
5692   /// See AbstractAttribute::updateImpl(...).
5693   ChangeStatus updateImpl(Attributor &A) override {
5694     // TODO: Once we have call site specific value information we can provide
5695     //       call site specific liveness liveness information and then it makes
5696     //       sense to specialize attributes for call sites arguments instead of
5697     //       redirecting requests to the callee argument.
5698     Function *F = getAssociatedFunction();
5699     const IRPosition &FnPos = IRPosition::function(*F);
5700     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5701     return clampStateAndIndicateChange(
5702         getState(),
5703         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
5704   }
5705 
5706   /// See AbstractAttribute::trackStatistics()
5707   void trackStatistics() const override {
5708     if (isAssumedReadNone())
5709       STATS_DECLTRACK_CS_ATTR(readnone)
5710     else if (isAssumedReadOnly())
5711       STATS_DECLTRACK_CS_ATTR(readonly)
5712     else if (isAssumedWriteOnly())
5713       STATS_DECLTRACK_CS_ATTR(writeonly)
5714   }
5715 };
5716 
5717 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
5718 
5719   // The current assumed state used to determine a change.
5720   auto AssumedState = getAssumed();
5721 
5722   auto CheckRWInst = [&](Instruction &I) {
5723     // If the instruction has an own memory behavior state, use it to restrict
5724     // the local state. No further analysis is required as the other memory
5725     // state is as optimistic as it gets.
5726     if (const auto *CB = dyn_cast<CallBase>(&I)) {
5727       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5728           *this, IRPosition::callsite_function(*CB));
5729       intersectAssumedBits(MemBehaviorAA.getAssumed());
5730       return !isAtFixpoint();
5731     }
5732 
5733     // Remove access kind modifiers if necessary.
5734     if (I.mayReadFromMemory())
5735       removeAssumedBits(NO_READS);
5736     if (I.mayWriteToMemory())
5737       removeAssumedBits(NO_WRITES);
5738     return !isAtFixpoint();
5739   };
5740 
5741   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5742     return indicatePessimisticFixpoint();
5743 
5744   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5745                                         : ChangeStatus::UNCHANGED;
5746 }
5747 
5748 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5749 
5750   const IRPosition &IRP = getIRPosition();
5751   const IRPosition &FnPos = IRPosition::function_scope(IRP);
5752   AAMemoryBehavior::StateType &S = getState();
5753 
5754   // First, check the function scope. We take the known information and we avoid
5755   // work if the assumed information implies the current assumed information for
5756   // this attribute. This is a valid for all but byval arguments.
5757   Argument *Arg = IRP.getAssociatedArgument();
5758   AAMemoryBehavior::base_t FnMemAssumedState =
5759       AAMemoryBehavior::StateType::getWorstState();
5760   if (!Arg || !Arg->hasByValAttr()) {
5761     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
5762         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5763     FnMemAssumedState = FnMemAA.getAssumed();
5764     S.addKnownBits(FnMemAA.getKnown());
5765     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
5766       return ChangeStatus::UNCHANGED;
5767   }
5768 
5769   // Make sure the value is not captured (except through "return"), if
5770   // it is, any information derived would be irrelevant anyway as we cannot
5771   // check the potential aliases introduced by the capture. However, no need
5772   // to fall back to anythign less optimistic than the function state.
5773   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5774       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5775   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5776     S.intersectAssumedBits(FnMemAssumedState);
5777     return ChangeStatus::CHANGED;
5778   }
5779 
5780   // The current assumed state used to determine a change.
5781   auto AssumedState = S.getAssumed();
5782 
5783   // Liveness information to exclude dead users.
5784   // TODO: Take the FnPos once we have call site specific liveness information.
5785   const auto &LivenessAA = A.getAAFor<AAIsDead>(
5786       *this, IRPosition::function(*IRP.getAssociatedFunction()),
5787       /* TrackDependence */ false);
5788 
5789   // Visit and expand uses until all are analyzed or a fixpoint is reached.
5790   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
5791     const Use *U = Uses[i];
5792     Instruction *UserI = cast<Instruction>(U->getUser());
5793     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
5794                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
5795                       << "]\n");
5796     if (A.isAssumedDead(*U, this, &LivenessAA))
5797       continue;
5798 
5799     // Droppable users, e.g., llvm::assume does not actually perform any action.
5800     if (UserI->isDroppable())
5801       continue;
5802 
5803     // Check if the users of UserI should also be visited.
5804     if (followUsersOfUseIn(A, U, UserI))
5805       for (const Use &UserIUse : UserI->uses())
5806         Uses.insert(&UserIUse);
5807 
5808     // If UserI might touch memory we analyze the use in detail.
5809     if (UserI->mayReadOrWriteMemory())
5810       analyzeUseIn(A, U, UserI);
5811   }
5812 
5813   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5814                                         : ChangeStatus::UNCHANGED;
5815 }
5816 
5817 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
5818                                                   const Instruction *UserI) {
5819   // The loaded value is unrelated to the pointer argument, no need to
5820   // follow the users of the load.
5821   if (isa<LoadInst>(UserI))
5822     return false;
5823 
5824   // By default we follow all uses assuming UserI might leak information on U,
5825   // we have special handling for call sites operands though.
5826   const auto *CB = dyn_cast<CallBase>(UserI);
5827   if (!CB || !CB->isArgOperand(U))
5828     return true;
5829 
5830   // If the use is a call argument known not to be captured, the users of
5831   // the call do not need to be visited because they have to be unrelated to
5832   // the input. Note that this check is not trivial even though we disallow
5833   // general capturing of the underlying argument. The reason is that the
5834   // call might the argument "through return", which we allow and for which we
5835   // need to check call users.
5836   if (U->get()->getType()->isPointerTy()) {
5837     unsigned ArgNo = CB->getArgOperandNo(U);
5838     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5839         *this, IRPosition::callsite_argument(*CB, ArgNo),
5840         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5841     return !ArgNoCaptureAA.isAssumedNoCapture();
5842   }
5843 
5844   return true;
5845 }
5846 
5847 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
5848                                             const Instruction *UserI) {
5849   assert(UserI->mayReadOrWriteMemory());
5850 
5851   switch (UserI->getOpcode()) {
5852   default:
5853     // TODO: Handle all atomics and other side-effect operations we know of.
5854     break;
5855   case Instruction::Load:
5856     // Loads cause the NO_READS property to disappear.
5857     removeAssumedBits(NO_READS);
5858     return;
5859 
5860   case Instruction::Store:
5861     // Stores cause the NO_WRITES property to disappear if the use is the
5862     // pointer operand. Note that we do assume that capturing was taken care of
5863     // somewhere else.
5864     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
5865       removeAssumedBits(NO_WRITES);
5866     return;
5867 
5868   case Instruction::Call:
5869   case Instruction::CallBr:
5870   case Instruction::Invoke: {
5871     // For call sites we look at the argument memory behavior attribute (this
5872     // could be recursive!) in order to restrict our own state.
5873     const auto *CB = cast<CallBase>(UserI);
5874 
5875     // Give up on operand bundles.
5876     if (CB->isBundleOperand(U)) {
5877       indicatePessimisticFixpoint();
5878       return;
5879     }
5880 
5881     // Calling a function does read the function pointer, maybe write it if the
5882     // function is self-modifying.
5883     if (CB->isCallee(U)) {
5884       removeAssumedBits(NO_READS);
5885       break;
5886     }
5887 
5888     // Adjust the possible access behavior based on the information on the
5889     // argument.
5890     IRPosition Pos;
5891     if (U->get()->getType()->isPointerTy())
5892       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
5893     else
5894       Pos = IRPosition::callsite_function(*CB);
5895     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5896         *this, Pos,
5897         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5898     // "assumed" has at most the same bits as the MemBehaviorAA assumed
5899     // and at least "known".
5900     intersectAssumedBits(MemBehaviorAA.getAssumed());
5901     return;
5902   }
5903   };
5904 
5905   // Generally, look at the "may-properties" and adjust the assumed state if we
5906   // did not trigger special handling before.
5907   if (UserI->mayReadFromMemory())
5908     removeAssumedBits(NO_READS);
5909   if (UserI->mayWriteToMemory())
5910     removeAssumedBits(NO_WRITES);
5911 }
5912 
5913 } // namespace
5914 
5915 /// -------------------- Memory Locations Attributes ---------------------------
5916 /// Includes read-none, argmemonly, inaccessiblememonly,
5917 /// inaccessiblememorargmemonly
5918 /// ----------------------------------------------------------------------------
5919 
5920 std::string AAMemoryLocation::getMemoryLocationsAsStr(
5921     AAMemoryLocation::MemoryLocationsKind MLK) {
5922   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
5923     return "all memory";
5924   if (MLK == AAMemoryLocation::NO_LOCATIONS)
5925     return "no memory";
5926   std::string S = "memory:";
5927   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
5928     S += "stack,";
5929   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
5930     S += "constant,";
5931   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
5932     S += "internal global,";
5933   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
5934     S += "external global,";
5935   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
5936     S += "argument,";
5937   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
5938     S += "inaccessible,";
5939   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
5940     S += "malloced,";
5941   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
5942     S += "unknown,";
5943   S.pop_back();
5944   return S;
5945 }
5946 
5947 struct AAMemoryLocationImpl : public AAMemoryLocation {
5948 
5949   AAMemoryLocationImpl(const IRPosition &IRP) : AAMemoryLocation(IRP) {}
5950 
5951   /// See AbstractAttribute::initialize(...).
5952   void initialize(Attributor &A) override {
5953     intersectAssumedBits(BEST_STATE);
5954     getKnownStateFromValue(getIRPosition(), getState());
5955     IRAttribute::initialize(A);
5956   }
5957 
5958   /// Return the memory behavior information encoded in the IR for \p IRP.
5959   static void getKnownStateFromValue(const IRPosition &IRP,
5960                                      BitIntegerState &State,
5961                                      bool IgnoreSubsumingPositions = false) {
5962     SmallVector<Attribute, 2> Attrs;
5963     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5964     for (const Attribute &Attr : Attrs) {
5965       switch (Attr.getKindAsEnum()) {
5966       case Attribute::ReadNone:
5967         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
5968         break;
5969       case Attribute::InaccessibleMemOnly:
5970         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
5971         break;
5972       case Attribute::ArgMemOnly:
5973         State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
5974         break;
5975       case Attribute::InaccessibleMemOrArgMemOnly:
5976         State.addKnownBits(
5977             inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
5978         break;
5979       default:
5980         llvm_unreachable("Unexpected attribute!");
5981       }
5982     }
5983   }
5984 
5985   /// See AbstractAttribute::getDeducedAttributes(...).
5986   void getDeducedAttributes(LLVMContext &Ctx,
5987                             SmallVectorImpl<Attribute> &Attrs) const override {
5988     assert(Attrs.size() == 0);
5989     if (isAssumedReadNone()) {
5990       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5991     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
5992       if (isAssumedInaccessibleMemOnly())
5993         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
5994       else if (isAssumedArgMemOnly())
5995         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
5996       else if (isAssumedInaccessibleOrArgMemOnly())
5997         Attrs.push_back(
5998             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
5999     }
6000     assert(Attrs.size() <= 1);
6001   }
6002 
6003   /// See AbstractAttribute::manifest(...).
6004   ChangeStatus manifest(Attributor &A) override {
6005     const IRPosition &IRP = getIRPosition();
6006 
6007     // Check if we would improve the existing attributes first.
6008     SmallVector<Attribute, 4> DeducedAttrs;
6009     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6010     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6011           return IRP.hasAttr(Attr.getKindAsEnum(),
6012                              /* IgnoreSubsumingPositions */ true);
6013         }))
6014       return ChangeStatus::UNCHANGED;
6015 
6016     // Clear existing attributes.
6017     IRP.removeAttrs(AttrKinds);
6018     if (isAssumedReadNone())
6019       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6020 
6021     // Use the generic manifest method.
6022     return IRAttribute::manifest(A);
6023   }
6024 
6025   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6026   bool checkForAllAccessesToMemoryKind(
6027       function_ref<bool(const Instruction *, const Value *, AccessKind,
6028                         MemoryLocationsKind)>
6029           Pred,
6030       MemoryLocationsKind RequestedMLK) const override {
6031     if (!isValidState())
6032       return false;
6033 
6034     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6035     if (AssumedMLK == NO_LOCATIONS)
6036       return true;
6037 
6038     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6039       if (CurMLK & RequestedMLK)
6040         continue;
6041 
6042       const auto &Accesses = AccessKindAccessesMap.lookup(CurMLK);
6043       for (const AccessInfo &AI : Accesses) {
6044         if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6045           return false;
6046       }
6047     }
6048 
6049     return true;
6050   }
6051 
6052   ChangeStatus indicatePessimisticFixpoint() override {
6053     // If we give up and indicate a pessimistic fixpoint this instruction will
6054     // become an access for all potential access kinds:
6055     // TODO: Add pointers for argmemonly and globals to improve the results of
6056     //       checkForAllAccessesToMemoryKind.
6057     bool Changed = false;
6058     MemoryLocationsKind KnownMLK = getKnown();
6059     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6060     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6061       if (!(CurMLK & KnownMLK))
6062         updateStateAndAccessesMap(getState(), AccessKindAccessesMap, CurMLK, I,
6063                                   nullptr, Changed);
6064     return AAMemoryLocation::indicatePessimisticFixpoint();
6065   }
6066 
6067 protected:
6068   /// Helper struct to tie together an instruction that has a read or write
6069   /// effect with the pointer it accesses (if any).
6070   struct AccessInfo {
6071 
6072     /// The instruction that caused the access.
6073     const Instruction *I;
6074 
6075     /// The base pointer that is accessed, or null if unknown.
6076     const Value *Ptr;
6077 
6078     /// The kind of access (read/write/read+write).
6079     AccessKind Kind;
6080 
6081     bool operator==(const AccessInfo &RHS) const {
6082       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6083     }
6084     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6085       if (LHS.I != RHS.I)
6086         return LHS.I < RHS.I;
6087       if (LHS.Ptr != RHS.Ptr)
6088         return LHS.Ptr < RHS.Ptr;
6089       if (LHS.Kind != RHS.Kind)
6090         return LHS.Kind < RHS.Kind;
6091       return false;
6092     }
6093   };
6094 
6095   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6096   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6097   using AccessKindAccessesMapTy =
6098       DenseMap<unsigned, SmallSet<AccessInfo, 8, AccessInfo>>;
6099   AccessKindAccessesMapTy AccessKindAccessesMap;
6100 
6101   /// Return the kind(s) of location that may be accessed by \p V.
6102   AAMemoryLocation::MemoryLocationsKind
6103   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6104 
6105   /// Update the state \p State and the AccessKindAccessesMap given that \p I is
6106   /// an access to a \p MLK memory location with the access pointer \p Ptr.
6107   static void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6108                                         AccessKindAccessesMapTy &AccessMap,
6109                                         MemoryLocationsKind MLK,
6110                                         const Instruction *I, const Value *Ptr,
6111                                         bool &Changed) {
6112     // TODO: The kind should be determined at the call sites based on the
6113     // information we have there.
6114     AccessKind Kind = READ_WRITE;
6115     if (I) {
6116       Kind = I->mayReadFromMemory() ? READ : NONE;
6117       Kind = AccessKind(Kind | (I->mayWriteToMemory() ? WRITE : NONE));
6118     }
6119 
6120     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6121     Changed |= AccessMap[MLK].insert(AccessInfo{I, Ptr, Kind}).second;
6122     State.removeAssumedBits(MLK);
6123   }
6124 
6125   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6126   /// arguments, and update the state and access map accordingly.
6127   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6128                           AAMemoryLocation::StateType &State, bool &Changed);
6129 
6130   /// The set of IR attributes AAMemoryLocation deals with.
6131   static const Attribute::AttrKind AttrKinds[4];
6132 };
6133 
6134 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6135     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6136     Attribute::InaccessibleMemOrArgMemOnly};
6137 
6138 void AAMemoryLocationImpl::categorizePtrValue(
6139     Attributor &A, const Instruction &I, const Value &Ptr,
6140     AAMemoryLocation::StateType &State, bool &Changed) {
6141   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6142                     << Ptr << " ["
6143                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6144 
6145   auto StripGEPCB = [](Value *V) -> Value * {
6146     auto *GEP = dyn_cast<GEPOperator>(V);
6147     while (GEP) {
6148       V = GEP->getPointerOperand();
6149       GEP = dyn_cast<GEPOperator>(V);
6150     }
6151     return V;
6152   };
6153 
6154   auto VisitValueCB = [&](Value &V, const Instruction *,
6155                           AAMemoryLocation::StateType &T,
6156                           bool Stripped) -> bool {
6157     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6158     if (isa<UndefValue>(V))
6159       return true;
6160     if (auto *Arg = dyn_cast<Argument>(&V)) {
6161       if (Arg->hasByValAttr())
6162         updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_LOCAL_MEM, &I,
6163                                   &V, Changed);
6164       else
6165         updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_ARGUMENT_MEM, &I,
6166                                   &V, Changed);
6167       return true;
6168     }
6169     if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6170       if (GV->hasLocalLinkage())
6171         updateStateAndAccessesMap(T, AccessKindAccessesMap,
6172                                   NO_GLOBAL_INTERNAL_MEM, &I, &V, Changed);
6173       else
6174         updateStateAndAccessesMap(T, AccessKindAccessesMap,
6175                                   NO_GLOBAL_EXTERNAL_MEM, &I, &V, Changed);
6176       return true;
6177     }
6178     if (isa<AllocaInst>(V)) {
6179       updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_LOCAL_MEM, &I, &V,
6180                                 Changed);
6181       return true;
6182     }
6183     if (const auto *CB = dyn_cast<CallBase>(&V)) {
6184       const auto &NoAliasAA =
6185           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB));
6186       if (NoAliasAA.isAssumedNoAlias()) {
6187         updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_MALLOCED_MEM, &I,
6188                                   &V, Changed);
6189         return true;
6190       }
6191     }
6192 
6193     updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_UNKOWN_MEM, &I, &V,
6194                               Changed);
6195     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6196                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6197                       << "\n");
6198     return true;
6199   };
6200 
6201   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6202           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6203           /* MaxValues */ 32, StripGEPCB)) {
6204     LLVM_DEBUG(
6205         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6206     updateStateAndAccessesMap(State, AccessKindAccessesMap, NO_UNKOWN_MEM, &I,
6207                               nullptr, Changed);
6208   } else {
6209     LLVM_DEBUG(
6210         dbgs()
6211         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6212         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6213   }
6214 }
6215 
6216 AAMemoryLocation::MemoryLocationsKind
6217 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6218                                                   bool &Changed) {
6219   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6220                     << I << "\n");
6221 
6222   AAMemoryLocation::StateType AccessedLocs;
6223   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6224 
6225   if (auto *CB = dyn_cast<CallBase>(&I)) {
6226 
6227     // First check if we assume any memory is access is visible.
6228     const auto &CBMemLocationAA =
6229         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB));
6230     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6231                       << " [" << CBMemLocationAA << "]\n");
6232 
6233     if (CBMemLocationAA.isAssumedReadNone())
6234       return NO_LOCATIONS;
6235 
6236     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6237       updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap,
6238                                 NO_INACCESSIBLE_MEM, &I, nullptr, Changed);
6239       return AccessedLocs.getAssumed();
6240     }
6241 
6242     uint32_t CBAssumedNotAccessedLocs =
6243         CBMemLocationAA.getAssumedNotAccessedLocation();
6244 
6245     // Set the argmemonly and global bit as we handle them separately below.
6246     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6247         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6248 
6249     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6250       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6251         continue;
6252       updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap, CurMLK, &I,
6253                                 nullptr, Changed);
6254     }
6255 
6256     // Now handle global memory if it might be accessed. This is slightly tricky
6257     // as NO_GLOBAL_MEM has multiple bits set.
6258     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6259     if (HasGlobalAccesses) {
6260       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6261                             AccessKind Kind, MemoryLocationsKind MLK) {
6262         updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap, MLK, &I,
6263                                   Ptr, Changed);
6264         return true;
6265       };
6266       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6267               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6268         return AccessedLocs.getWorstState();
6269     }
6270 
6271     LLVM_DEBUG(
6272         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6273                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6274 
6275     // Now handle argument memory if it might be accessed.
6276     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6277     if (HasArgAccesses) {
6278       for (unsigned ArgNo = 0, E = CB->getNumArgOperands(); ArgNo < E;
6279            ++ArgNo) {
6280 
6281         // Skip non-pointer arguments.
6282         const Value *ArgOp = CB->getArgOperand(ArgNo);
6283         if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6284           continue;
6285 
6286         // Skip readnone arguments.
6287         const IRPosition &ArgOpIRP = IRPosition::callsite_argument(*CB, ArgNo);
6288         const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6289             *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6290 
6291         if (ArgOpMemLocationAA.isAssumedReadNone())
6292           continue;
6293 
6294         // Categorize potentially accessed pointer arguments as if there was an
6295         // access instruction with them as pointer.
6296         categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed);
6297       }
6298     }
6299 
6300     LLVM_DEBUG(
6301         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6302                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6303 
6304     return AccessedLocs.getAssumed();
6305   }
6306 
6307   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6308     LLVM_DEBUG(
6309         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6310                << I << " [" << *Ptr << "]\n");
6311     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6312     return AccessedLocs.getAssumed();
6313   }
6314 
6315   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6316                     << I << "\n");
6317   updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap, NO_UNKOWN_MEM,
6318                             &I, nullptr, Changed);
6319   return AccessedLocs.getAssumed();
6320 }
6321 
6322 /// An AA to represent the memory behavior function attributes.
6323 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6324   AAMemoryLocationFunction(const IRPosition &IRP) : AAMemoryLocationImpl(IRP) {}
6325 
6326   /// See AbstractAttribute::updateImpl(Attributor &A).
6327   virtual ChangeStatus updateImpl(Attributor &A) override {
6328 
6329     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6330         *this, getIRPosition(), /* TrackDependence */ false);
6331     if (MemBehaviorAA.isAssumedReadNone()) {
6332       if (MemBehaviorAA.isKnownReadNone())
6333         return indicateOptimisticFixpoint();
6334       assert(isAssumedReadNone() &&
6335              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6336       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6337       return ChangeStatus::UNCHANGED;
6338     }
6339 
6340     // The current assumed state used to determine a change.
6341     auto AssumedState = getAssumed();
6342     bool Changed = false;
6343 
6344     auto CheckRWInst = [&](Instruction &I) {
6345       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6346       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6347                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6348       removeAssumedBits(inverseLocation(MLK, false, false));
6349       return true;
6350     };
6351 
6352     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6353       return indicatePessimisticFixpoint();
6354 
6355     Changed |= AssumedState != getAssumed();
6356     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6357   }
6358 
6359   /// See AbstractAttribute::trackStatistics()
6360   void trackStatistics() const override {
6361     if (isAssumedReadNone())
6362       STATS_DECLTRACK_FN_ATTR(readnone)
6363     else if (isAssumedArgMemOnly())
6364       STATS_DECLTRACK_FN_ATTR(argmemonly)
6365     else if (isAssumedInaccessibleMemOnly())
6366       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6367     else if (isAssumedInaccessibleOrArgMemOnly())
6368       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6369   }
6370 };
6371 
6372 /// AAMemoryLocation attribute for call sites.
6373 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6374   AAMemoryLocationCallSite(const IRPosition &IRP) : AAMemoryLocationImpl(IRP) {}
6375 
6376   /// See AbstractAttribute::initialize(...).
6377   void initialize(Attributor &A) override {
6378     AAMemoryLocationImpl::initialize(A);
6379     Function *F = getAssociatedFunction();
6380     if (!F || !A.isFunctionIPOAmendable(*F))
6381       indicatePessimisticFixpoint();
6382   }
6383 
6384   /// See AbstractAttribute::updateImpl(...).
6385   ChangeStatus updateImpl(Attributor &A) override {
6386     // TODO: Once we have call site specific value information we can provide
6387     //       call site specific liveness liveness information and then it makes
6388     //       sense to specialize attributes for call sites arguments instead of
6389     //       redirecting requests to the callee argument.
6390     Function *F = getAssociatedFunction();
6391     const IRPosition &FnPos = IRPosition::function(*F);
6392     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6393     bool Changed = false;
6394     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6395                           AccessKind Kind, MemoryLocationsKind MLK) {
6396       updateStateAndAccessesMap(getState(), AccessKindAccessesMap, MLK, I, Ptr,
6397                                 Changed);
6398       return true;
6399     };
6400     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6401       return indicatePessimisticFixpoint();
6402     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6403   }
6404 
6405   /// See AbstractAttribute::trackStatistics()
6406   void trackStatistics() const override {
6407     if (isAssumedReadNone())
6408       STATS_DECLTRACK_CS_ATTR(readnone)
6409   }
6410 };
6411 
6412 /// ------------------ Value Constant Range Attribute -------------------------
6413 
6414 struct AAValueConstantRangeImpl : AAValueConstantRange {
6415   using StateType = IntegerRangeState;
6416   AAValueConstantRangeImpl(const IRPosition &IRP) : AAValueConstantRange(IRP) {}
6417 
6418   /// See AbstractAttribute::getAsStr().
6419   const std::string getAsStr() const override {
6420     std::string Str;
6421     llvm::raw_string_ostream OS(Str);
6422     OS << "range(" << getBitWidth() << ")<";
6423     getKnown().print(OS);
6424     OS << " / ";
6425     getAssumed().print(OS);
6426     OS << ">";
6427     return OS.str();
6428   }
6429 
6430   /// Helper function to get a SCEV expr for the associated value at program
6431   /// point \p I.
6432   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6433     if (!getAnchorScope())
6434       return nullptr;
6435 
6436     ScalarEvolution *SE =
6437         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6438             *getAnchorScope());
6439 
6440     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6441         *getAnchorScope());
6442 
6443     if (!SE || !LI)
6444       return nullptr;
6445 
6446     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6447     if (!I)
6448       return S;
6449 
6450     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6451   }
6452 
6453   /// Helper function to get a range from SCEV for the associated value at
6454   /// program point \p I.
6455   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6456                                          const Instruction *I = nullptr) const {
6457     if (!getAnchorScope())
6458       return getWorstState(getBitWidth());
6459 
6460     ScalarEvolution *SE =
6461         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6462             *getAnchorScope());
6463 
6464     const SCEV *S = getSCEV(A, I);
6465     if (!SE || !S)
6466       return getWorstState(getBitWidth());
6467 
6468     return SE->getUnsignedRange(S);
6469   }
6470 
6471   /// Helper function to get a range from LVI for the associated value at
6472   /// program point \p I.
6473   ConstantRange
6474   getConstantRangeFromLVI(Attributor &A,
6475                           const Instruction *CtxI = nullptr) const {
6476     if (!getAnchorScope())
6477       return getWorstState(getBitWidth());
6478 
6479     LazyValueInfo *LVI =
6480         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6481             *getAnchorScope());
6482 
6483     if (!LVI || !CtxI)
6484       return getWorstState(getBitWidth());
6485     return LVI->getConstantRange(&getAssociatedValue(),
6486                                  const_cast<BasicBlock *>(CtxI->getParent()),
6487                                  const_cast<Instruction *>(CtxI));
6488   }
6489 
6490   /// See AAValueConstantRange::getKnownConstantRange(..).
6491   ConstantRange
6492   getKnownConstantRange(Attributor &A,
6493                         const Instruction *CtxI = nullptr) const override {
6494     if (!CtxI || CtxI == getCtxI())
6495       return getKnown();
6496 
6497     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6498     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6499     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6500   }
6501 
6502   /// See AAValueConstantRange::getAssumedConstantRange(..).
6503   ConstantRange
6504   getAssumedConstantRange(Attributor &A,
6505                           const Instruction *CtxI = nullptr) const override {
6506     // TODO: Make SCEV use Attributor assumption.
6507     //       We may be able to bound a variable range via assumptions in
6508     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6509     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6510 
6511     if (!CtxI || CtxI == getCtxI())
6512       return getAssumed();
6513 
6514     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6515     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6516     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6517   }
6518 
6519   /// See AbstractAttribute::initialize(..).
6520   void initialize(Attributor &A) override {
6521     // Intersect a range given by SCEV.
6522     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6523 
6524     // Intersect a range given by LVI.
6525     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6526   }
6527 
6528   /// Helper function to create MDNode for range metadata.
6529   static MDNode *
6530   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6531                             const ConstantRange &AssumedConstantRange) {
6532     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6533                                   Ty, AssumedConstantRange.getLower())),
6534                               ConstantAsMetadata::get(ConstantInt::get(
6535                                   Ty, AssumedConstantRange.getUpper()))};
6536     return MDNode::get(Ctx, LowAndHigh);
6537   }
6538 
6539   /// Return true if \p Assumed is included in \p KnownRanges.
6540   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6541 
6542     if (Assumed.isFullSet())
6543       return false;
6544 
6545     if (!KnownRanges)
6546       return true;
6547 
6548     // If multiple ranges are annotated in IR, we give up to annotate assumed
6549     // range for now.
6550 
6551     // TODO:  If there exists a known range which containts assumed range, we
6552     // can say assumed range is better.
6553     if (KnownRanges->getNumOperands() > 2)
6554       return false;
6555 
6556     ConstantInt *Lower =
6557         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6558     ConstantInt *Upper =
6559         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6560 
6561     ConstantRange Known(Lower->getValue(), Upper->getValue());
6562     return Known.contains(Assumed) && Known != Assumed;
6563   }
6564 
6565   /// Helper function to set range metadata.
6566   static bool
6567   setRangeMetadataIfisBetterRange(Instruction *I,
6568                                   const ConstantRange &AssumedConstantRange) {
6569     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6570     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6571       if (!AssumedConstantRange.isEmptySet()) {
6572         I->setMetadata(LLVMContext::MD_range,
6573                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6574                                                  AssumedConstantRange));
6575         return true;
6576       }
6577     }
6578     return false;
6579   }
6580 
6581   /// See AbstractAttribute::manifest()
6582   ChangeStatus manifest(Attributor &A) override {
6583     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6584     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
6585     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
6586 
6587     auto &V = getAssociatedValue();
6588     if (!AssumedConstantRange.isEmptySet() &&
6589         !AssumedConstantRange.isSingleElement()) {
6590       if (Instruction *I = dyn_cast<Instruction>(&V))
6591         if (isa<CallInst>(I) || isa<LoadInst>(I))
6592           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
6593             Changed = ChangeStatus::CHANGED;
6594     }
6595 
6596     return Changed;
6597   }
6598 };
6599 
6600 struct AAValueConstantRangeArgument final
6601     : AAArgumentFromCallSiteArguments<
6602           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
6603   using Base = AAArgumentFromCallSiteArguments<
6604       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
6605   AAValueConstantRangeArgument(const IRPosition &IRP) : Base(IRP) {}
6606 
6607   /// See AbstractAttribute::initialize(..).
6608   void initialize(Attributor &A) override {
6609     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
6610       indicatePessimisticFixpoint();
6611     } else {
6612       Base::initialize(A);
6613     }
6614   }
6615 
6616   /// See AbstractAttribute::trackStatistics()
6617   void trackStatistics() const override {
6618     STATS_DECLTRACK_ARG_ATTR(value_range)
6619   }
6620 };
6621 
6622 struct AAValueConstantRangeReturned
6623     : AAReturnedFromReturnedValues<AAValueConstantRange,
6624                                    AAValueConstantRangeImpl> {
6625   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
6626                                             AAValueConstantRangeImpl>;
6627   AAValueConstantRangeReturned(const IRPosition &IRP) : Base(IRP) {}
6628 
6629   /// See AbstractAttribute::initialize(...).
6630   void initialize(Attributor &A) override {}
6631 
6632   /// See AbstractAttribute::trackStatistics()
6633   void trackStatistics() const override {
6634     STATS_DECLTRACK_FNRET_ATTR(value_range)
6635   }
6636 };
6637 
6638 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
6639   AAValueConstantRangeFloating(const IRPosition &IRP)
6640       : AAValueConstantRangeImpl(IRP) {}
6641 
6642   /// See AbstractAttribute::initialize(...).
6643   void initialize(Attributor &A) override {
6644     AAValueConstantRangeImpl::initialize(A);
6645     Value &V = getAssociatedValue();
6646 
6647     if (auto *C = dyn_cast<ConstantInt>(&V)) {
6648       unionAssumed(ConstantRange(C->getValue()));
6649       indicateOptimisticFixpoint();
6650       return;
6651     }
6652 
6653     if (isa<UndefValue>(&V)) {
6654       // Collapse the undef state to 0.
6655       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
6656       indicateOptimisticFixpoint();
6657       return;
6658     }
6659 
6660     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
6661       return;
6662     // If it is a load instruction with range metadata, use it.
6663     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
6664       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
6665         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6666         return;
6667       }
6668 
6669     // We can work with PHI and select instruction as we traverse their operands
6670     // during update.
6671     if (isa<SelectInst>(V) || isa<PHINode>(V))
6672       return;
6673 
6674     // Otherwise we give up.
6675     indicatePessimisticFixpoint();
6676 
6677     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
6678                       << getAssociatedValue() << "\n");
6679   }
6680 
6681   bool calculateBinaryOperator(
6682       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
6683       const Instruction *CtxI,
6684       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6685     Value *LHS = BinOp->getOperand(0);
6686     Value *RHS = BinOp->getOperand(1);
6687     // TODO: Allow non integers as well.
6688     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6689       return false;
6690 
6691     auto &LHSAA =
6692         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6693     QuerriedAAs.push_back(&LHSAA);
6694     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6695 
6696     auto &RHSAA =
6697         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6698     QuerriedAAs.push_back(&RHSAA);
6699     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6700 
6701     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
6702 
6703     T.unionAssumed(AssumedRange);
6704 
6705     // TODO: Track a known state too.
6706 
6707     return T.isValidState();
6708   }
6709 
6710   bool calculateCastInst(
6711       Attributor &A, CastInst *CastI, IntegerRangeState &T,
6712       const Instruction *CtxI,
6713       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6714     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
6715     // TODO: Allow non integers as well.
6716     Value &OpV = *CastI->getOperand(0);
6717     if (!OpV.getType()->isIntegerTy())
6718       return false;
6719 
6720     auto &OpAA =
6721         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
6722     QuerriedAAs.push_back(&OpAA);
6723     T.unionAssumed(
6724         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
6725     return T.isValidState();
6726   }
6727 
6728   bool
6729   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
6730                    const Instruction *CtxI,
6731                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6732     Value *LHS = CmpI->getOperand(0);
6733     Value *RHS = CmpI->getOperand(1);
6734     // TODO: Allow non integers as well.
6735     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6736       return false;
6737 
6738     auto &LHSAA =
6739         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6740     QuerriedAAs.push_back(&LHSAA);
6741     auto &RHSAA =
6742         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6743     QuerriedAAs.push_back(&RHSAA);
6744 
6745     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6746     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6747 
6748     // If one of them is empty set, we can't decide.
6749     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
6750       return true;
6751 
6752     bool MustTrue = false, MustFalse = false;
6753 
6754     auto AllowedRegion =
6755         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
6756 
6757     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
6758         CmpI->getPredicate(), RHSAARange);
6759 
6760     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
6761       MustFalse = true;
6762 
6763     if (SatisfyingRegion.contains(LHSAARange))
6764       MustTrue = true;
6765 
6766     assert((!MustTrue || !MustFalse) &&
6767            "Either MustTrue or MustFalse should be false!");
6768 
6769     if (MustTrue)
6770       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
6771     else if (MustFalse)
6772       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
6773     else
6774       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
6775 
6776     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
6777                       << " " << RHSAA << "\n");
6778 
6779     // TODO: Track a known state too.
6780     return T.isValidState();
6781   }
6782 
6783   /// See AbstractAttribute::updateImpl(...).
6784   ChangeStatus updateImpl(Attributor &A) override {
6785     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
6786                             IntegerRangeState &T, bool Stripped) -> bool {
6787       Instruction *I = dyn_cast<Instruction>(&V);
6788       if (!I || isa<CallBase>(I)) {
6789 
6790         // If the value is not instruction, we query AA to Attributor.
6791         const auto &AA =
6792             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
6793 
6794         // Clamp operator is not used to utilize a program point CtxI.
6795         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
6796 
6797         return T.isValidState();
6798       }
6799 
6800       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
6801       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
6802         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
6803           return false;
6804       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
6805         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
6806           return false;
6807       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
6808         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
6809           return false;
6810       } else {
6811         // Give up with other instructions.
6812         // TODO: Add other instructions
6813 
6814         T.indicatePessimisticFixpoint();
6815         return false;
6816       }
6817 
6818       // Catch circular reasoning in a pessimistic way for now.
6819       // TODO: Check how the range evolves and if we stripped anything, see also
6820       //       AADereferenceable or AAAlign for similar situations.
6821       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
6822         if (QueriedAA != this)
6823           continue;
6824         // If we are in a stady state we do not need to worry.
6825         if (T.getAssumed() == getState().getAssumed())
6826           continue;
6827         T.indicatePessimisticFixpoint();
6828       }
6829 
6830       return T.isValidState();
6831     };
6832 
6833     IntegerRangeState T(getBitWidth());
6834 
6835     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
6836             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
6837       return indicatePessimisticFixpoint();
6838 
6839     return clampStateAndIndicateChange(getState(), T);
6840   }
6841 
6842   /// See AbstractAttribute::trackStatistics()
6843   void trackStatistics() const override {
6844     STATS_DECLTRACK_FLOATING_ATTR(value_range)
6845   }
6846 };
6847 
6848 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
6849   AAValueConstantRangeFunction(const IRPosition &IRP)
6850       : AAValueConstantRangeImpl(IRP) {}
6851 
6852   /// See AbstractAttribute::initialize(...).
6853   ChangeStatus updateImpl(Attributor &A) override {
6854     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
6855                      "not be called");
6856   }
6857 
6858   /// See AbstractAttribute::trackStatistics()
6859   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
6860 };
6861 
6862 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
6863   AAValueConstantRangeCallSite(const IRPosition &IRP)
6864       : AAValueConstantRangeFunction(IRP) {}
6865 
6866   /// See AbstractAttribute::trackStatistics()
6867   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
6868 };
6869 
6870 struct AAValueConstantRangeCallSiteReturned
6871     : AACallSiteReturnedFromReturned<AAValueConstantRange,
6872                                      AAValueConstantRangeImpl> {
6873   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP)
6874       : AACallSiteReturnedFromReturned<AAValueConstantRange,
6875                                        AAValueConstantRangeImpl>(IRP) {}
6876 
6877   /// See AbstractAttribute::initialize(...).
6878   void initialize(Attributor &A) override {
6879     // If it is a load instruction with range metadata, use the metadata.
6880     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
6881       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
6882         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6883 
6884     AAValueConstantRangeImpl::initialize(A);
6885   }
6886 
6887   /// See AbstractAttribute::trackStatistics()
6888   void trackStatistics() const override {
6889     STATS_DECLTRACK_CSRET_ATTR(value_range)
6890   }
6891 };
6892 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
6893   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP)
6894       : AAValueConstantRangeFloating(IRP) {}
6895 
6896   /// See AbstractAttribute::trackStatistics()
6897   void trackStatistics() const override {
6898     STATS_DECLTRACK_CSARG_ATTR(value_range)
6899   }
6900 };
6901 
6902 const char AAReturnedValues::ID = 0;
6903 const char AANoUnwind::ID = 0;
6904 const char AANoSync::ID = 0;
6905 const char AANoFree::ID = 0;
6906 const char AANonNull::ID = 0;
6907 const char AANoRecurse::ID = 0;
6908 const char AAWillReturn::ID = 0;
6909 const char AAUndefinedBehavior::ID = 0;
6910 const char AANoAlias::ID = 0;
6911 const char AAReachability::ID = 0;
6912 const char AANoReturn::ID = 0;
6913 const char AAIsDead::ID = 0;
6914 const char AADereferenceable::ID = 0;
6915 const char AAAlign::ID = 0;
6916 const char AANoCapture::ID = 0;
6917 const char AAValueSimplify::ID = 0;
6918 const char AAHeapToStack::ID = 0;
6919 const char AAPrivatizablePtr::ID = 0;
6920 const char AAMemoryBehavior::ID = 0;
6921 const char AAMemoryLocation::ID = 0;
6922 const char AAValueConstantRange::ID = 0;
6923 
6924 // Macro magic to create the static generator function for attributes that
6925 // follow the naming scheme.
6926 
6927 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
6928   case IRPosition::PK:                                                         \
6929     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
6930 
6931 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
6932   case IRPosition::PK:                                                         \
6933     AA = new (A.Allocator) CLASS##SUFFIX(IRP);                                 \
6934     break;
6935 
6936 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
6937   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6938     CLASS *AA = nullptr;                                                       \
6939     switch (IRP.getPositionKind()) {                                           \
6940       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6941       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
6942       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
6943       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6944       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
6945       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
6946       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6947       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6948     }                                                                          \
6949     return *AA;                                                                \
6950   }
6951 
6952 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
6953   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6954     CLASS *AA = nullptr;                                                       \
6955     switch (IRP.getPositionKind()) {                                           \
6956       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6957       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
6958       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
6959       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
6960       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
6961       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
6962       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
6963       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
6964     }                                                                          \
6965     return *AA;                                                                \
6966   }
6967 
6968 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
6969   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6970     CLASS *AA = nullptr;                                                       \
6971     switch (IRP.getPositionKind()) {                                           \
6972       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6973       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6974       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6975       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
6976       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
6977       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
6978       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
6979       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
6980     }                                                                          \
6981     return *AA;                                                                \
6982   }
6983 
6984 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
6985   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6986     CLASS *AA = nullptr;                                                       \
6987     switch (IRP.getPositionKind()) {                                           \
6988       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6989       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
6990       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
6991       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6992       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
6993       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
6994       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
6995       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6996     }                                                                          \
6997     return *AA;                                                                \
6998   }
6999 
7000 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
7001   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7002     CLASS *AA = nullptr;                                                       \
7003     switch (IRP.getPositionKind()) {                                           \
7004       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7005       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7006       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7007       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7008       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7009       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7010       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7011       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7012     }                                                                          \
7013     return *AA;                                                                \
7014   }
7015 
7016 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
7017 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
7018 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
7019 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
7020 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
7021 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
7022 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
7023 
7024 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
7025 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
7026 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
7027 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
7028 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
7029 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
7030 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
7031 
7032 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
7033 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
7034 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
7035 
7036 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
7037 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
7038 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
7039 
7040 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
7041 
7042 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
7043 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
7044 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
7045 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
7046 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
7047 #undef SWITCH_PK_CREATE
7048 #undef SWITCH_PK_INV
7049