1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/CaptureTracking.h"
19 #include "llvm/Analysis/LazyValueInfo.h"
20 #include "llvm/Analysis/MemoryBuiltins.h"
21 #include "llvm/Analysis/ScalarEvolution.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/IRBuilder.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/NoFolder.h"
26 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
27 #include "llvm/Transforms/Utils/Local.h"
28 
29 #include <cassert>
30 
31 using namespace llvm;
32 
33 #define DEBUG_TYPE "attributor"
34 
35 static cl::opt<bool> ManifestInternal(
36     "attributor-manifest-internal", cl::Hidden,
37     cl::desc("Manifest Attributor internal string attributes."),
38     cl::init(false));
39 
40 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
41                                        cl::Hidden);
42 
43 STATISTIC(NumAAs, "Number of abstract attributes created");
44 
45 // Some helper macros to deal with statistics tracking.
46 //
47 // Usage:
48 // For simple IR attribute tracking overload trackStatistics in the abstract
49 // attribute and choose the right STATS_DECLTRACK_********* macro,
50 // e.g.,:
51 //  void trackStatistics() const override {
52 //    STATS_DECLTRACK_ARG_ATTR(returned)
53 //  }
54 // If there is a single "increment" side one can use the macro
55 // STATS_DECLTRACK with a custom message. If there are multiple increment
56 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
57 //
58 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
59   ("Number of " #TYPE " marked '" #NAME "'")
60 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
61 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
62 #define STATS_DECL(NAME, TYPE, MSG)                                            \
63   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
64 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
65 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
66   {                                                                            \
67     STATS_DECL(NAME, TYPE, MSG)                                                \
68     STATS_TRACK(NAME, TYPE)                                                    \
69   }
70 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
71   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
72 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
73   STATS_DECLTRACK(NAME, CSArguments,                                           \
74                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
75 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
76   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
77 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
78   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
79 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
80   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
81                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
82 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
83   STATS_DECLTRACK(NAME, CSReturn,                                              \
84                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
85 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
86   STATS_DECLTRACK(NAME, Floating,                                              \
87                   ("Number of floating values known to be '" #NAME "'"))
88 
89 // Specialization of the operator<< for abstract attributes subclasses. This
90 // disambiguates situations where multiple operators are applicable.
91 namespace llvm {
92 #define PIPE_OPERATOR(CLASS)                                                   \
93   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
94     return OS << static_cast<const AbstractAttribute &>(AA);                   \
95   }
96 
97 PIPE_OPERATOR(AAIsDead)
98 PIPE_OPERATOR(AANoUnwind)
99 PIPE_OPERATOR(AANoSync)
100 PIPE_OPERATOR(AANoRecurse)
101 PIPE_OPERATOR(AAWillReturn)
102 PIPE_OPERATOR(AANoReturn)
103 PIPE_OPERATOR(AAReturnedValues)
104 PIPE_OPERATOR(AANonNull)
105 PIPE_OPERATOR(AANoAlias)
106 PIPE_OPERATOR(AADereferenceable)
107 PIPE_OPERATOR(AAAlign)
108 PIPE_OPERATOR(AANoCapture)
109 PIPE_OPERATOR(AAValueSimplify)
110 PIPE_OPERATOR(AANoFree)
111 PIPE_OPERATOR(AAHeapToStack)
112 PIPE_OPERATOR(AAReachability)
113 PIPE_OPERATOR(AAMemoryBehavior)
114 PIPE_OPERATOR(AAMemoryLocation)
115 PIPE_OPERATOR(AAValueConstantRange)
116 PIPE_OPERATOR(AAPrivatizablePtr)
117 
118 #undef PIPE_OPERATOR
119 } // namespace llvm
120 
121 namespace {
122 
123 static Optional<ConstantInt *>
124 getAssumedConstantInt(Attributor &A, const Value &V,
125                       const AbstractAttribute &AA,
126                       bool &UsedAssumedInformation) {
127   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
128   if (C.hasValue())
129     return dyn_cast_or_null<ConstantInt>(C.getValue());
130   return llvm::None;
131 }
132 
133 /// Get pointer operand of memory accessing instruction. If \p I is
134 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
135 /// is set to false and the instruction is volatile, return nullptr.
136 static const Value *getPointerOperand(const Instruction *I,
137                                       bool AllowVolatile) {
138   if (auto *LI = dyn_cast<LoadInst>(I)) {
139     if (!AllowVolatile && LI->isVolatile())
140       return nullptr;
141     return LI->getPointerOperand();
142   }
143 
144   if (auto *SI = dyn_cast<StoreInst>(I)) {
145     if (!AllowVolatile && SI->isVolatile())
146       return nullptr;
147     return SI->getPointerOperand();
148   }
149 
150   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
151     if (!AllowVolatile && CXI->isVolatile())
152       return nullptr;
153     return CXI->getPointerOperand();
154   }
155 
156   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
157     if (!AllowVolatile && RMWI->isVolatile())
158       return nullptr;
159     return RMWI->getPointerOperand();
160   }
161 
162   return nullptr;
163 }
164 
165 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
166 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
167 /// getelement pointer instructions that traverse the natural type of \p Ptr if
168 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
169 /// through a cast to i8*.
170 ///
171 /// TODO: This could probably live somewhere more prominantly if it doesn't
172 ///       already exist.
173 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
174                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
175   assert(Offset >= 0 && "Negative offset not supported yet!");
176   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
177                     << "-bytes as " << *ResTy << "\n");
178 
179   // The initial type we are trying to traverse to get nice GEPs.
180   Type *Ty = Ptr->getType();
181 
182   SmallVector<Value *, 4> Indices;
183   std::string GEPName = Ptr->getName().str();
184   while (Offset) {
185     uint64_t Idx, Rem;
186 
187     if (auto *STy = dyn_cast<StructType>(Ty)) {
188       const StructLayout *SL = DL.getStructLayout(STy);
189       if (int64_t(SL->getSizeInBytes()) < Offset)
190         break;
191       Idx = SL->getElementContainingOffset(Offset);
192       assert(Idx < STy->getNumElements() && "Offset calculation error!");
193       Rem = Offset - SL->getElementOffset(Idx);
194       Ty = STy->getElementType(Idx);
195     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
196       Ty = PTy->getElementType();
197       if (!Ty->isSized())
198         break;
199       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
200       assert(ElementSize && "Expected type with size!");
201       Idx = Offset / ElementSize;
202       Rem = Offset % ElementSize;
203     } else {
204       // Non-aggregate type, we cast and make byte-wise progress now.
205       break;
206     }
207 
208     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
209                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
210 
211     GEPName += "." + std::to_string(Idx);
212     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
213     Offset = Rem;
214   }
215 
216   // Create a GEP if we collected indices above.
217   if (Indices.size())
218     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
219 
220   // If an offset is left we use byte-wise adjustment.
221   if (Offset) {
222     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
223     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
224                         GEPName + ".b" + Twine(Offset));
225   }
226 
227   // Ensure the result has the requested type.
228   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
229 
230   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
231   return Ptr;
232 }
233 
234 /// Recursively visit all values that might become \p IRP at some point. This
235 /// will be done by looking through cast instructions, selects, phis, and calls
236 /// with the "returned" attribute. Once we cannot look through the value any
237 /// further, the callback \p VisitValueCB is invoked and passed the current
238 /// value, the \p State, and a flag to indicate if we stripped anything.
239 /// Stripped means that we unpacked the value associated with \p IRP at least
240 /// once. Note that the value used for the callback may still be the value
241 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
242 /// we will never visit more values than specified by \p MaxValues.
243 template <typename AAType, typename StateTy>
244 static bool genericValueTraversal(
245     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
246     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
247         VisitValueCB,
248     const Instruction *CtxI, int MaxValues = 16,
249     function_ref<Value *(Value *)> StripCB = nullptr) {
250 
251   const AAIsDead *LivenessAA = nullptr;
252   if (IRP.getAnchorScope())
253     LivenessAA = &A.getAAFor<AAIsDead>(
254         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
255         /* TrackDependence */ false);
256   bool AnyDead = false;
257 
258   using Item = std::pair<Value *, const Instruction *>;
259   SmallSet<Item, 16> Visited;
260   SmallVector<Item, 16> Worklist;
261   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
262 
263   int Iteration = 0;
264   do {
265     Item I = Worklist.pop_back_val();
266     Value *V = I.first;
267     CtxI = I.second;
268     if (StripCB)
269       V = StripCB(V);
270 
271     // Check if we should process the current value. To prevent endless
272     // recursion keep a record of the values we followed!
273     if (!Visited.insert(I).second)
274       continue;
275 
276     // Make sure we limit the compile time for complex expressions.
277     if (Iteration++ >= MaxValues)
278       return false;
279 
280     // Explicitly look through calls with a "returned" attribute if we do
281     // not have a pointer as stripPointerCasts only works on them.
282     Value *NewV = nullptr;
283     if (V->getType()->isPointerTy()) {
284       NewV = V->stripPointerCasts();
285     } else {
286       auto *CB = dyn_cast<CallBase>(V);
287       if (CB && CB->getCalledFunction()) {
288         for (Argument &Arg : CB->getCalledFunction()->args())
289           if (Arg.hasReturnedAttr()) {
290             NewV = CB->getArgOperand(Arg.getArgNo());
291             break;
292           }
293       }
294     }
295     if (NewV && NewV != V) {
296       Worklist.push_back({NewV, CtxI});
297       continue;
298     }
299 
300     // Look through select instructions, visit both potential values.
301     if (auto *SI = dyn_cast<SelectInst>(V)) {
302       Worklist.push_back({SI->getTrueValue(), CtxI});
303       Worklist.push_back({SI->getFalseValue(), CtxI});
304       continue;
305     }
306 
307     // Look through phi nodes, visit all live operands.
308     if (auto *PHI = dyn_cast<PHINode>(V)) {
309       assert(LivenessAA &&
310              "Expected liveness in the presence of instructions!");
311       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
312         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
313         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
314                             LivenessAA,
315                             /* CheckBBLivenessOnly */ true)) {
316           AnyDead = true;
317           continue;
318         }
319         Worklist.push_back(
320             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
321       }
322       continue;
323     }
324 
325     // Once a leaf is reached we inform the user through the callback.
326     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
327       return false;
328   } while (!Worklist.empty());
329 
330   // If we actually used liveness information so we have to record a dependence.
331   if (AnyDead)
332     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
333 
334   // All values have been visited.
335   return true;
336 }
337 
338 static const Value *
339 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
340                                      const DataLayout &DL,
341                                      bool AllowNonInbounds = false) {
342   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
343   if (!Ptr)
344     return nullptr;
345 
346   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
347                                           AllowNonInbounds);
348 }
349 
350 /// Helper function to clamp a state \p S of type \p StateType with the
351 /// information in \p R and indicate/return if \p S did change (as-in update is
352 /// required to be run again).
353 template <typename StateType>
354 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
355   auto Assumed = S.getAssumed();
356   S ^= R;
357   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
358                                    : ChangeStatus::CHANGED;
359 }
360 
361 /// Clamp the information known for all returned values of a function
362 /// (identified by \p QueryingAA) into \p S.
363 template <typename AAType, typename StateType = typename AAType::StateType>
364 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
365                                      StateType &S) {
366   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
367                     << QueryingAA << " into " << S << "\n");
368 
369   assert((QueryingAA.getIRPosition().getPositionKind() ==
370               IRPosition::IRP_RETURNED ||
371           QueryingAA.getIRPosition().getPositionKind() ==
372               IRPosition::IRP_CALL_SITE_RETURNED) &&
373          "Can only clamp returned value states for a function returned or call "
374          "site returned position!");
375 
376   // Use an optional state as there might not be any return values and we want
377   // to join (IntegerState::operator&) the state of all there are.
378   Optional<StateType> T;
379 
380   // Callback for each possibly returned value.
381   auto CheckReturnValue = [&](Value &RV) -> bool {
382     const IRPosition &RVPos = IRPosition::value(RV);
383     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
384     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
385                       << " @ " << RVPos << "\n");
386     const StateType &AAS = static_cast<const StateType &>(AA.getState());
387     if (T.hasValue())
388       *T &= AAS;
389     else
390       T = AAS;
391     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
392                       << "\n");
393     return T->isValidState();
394   };
395 
396   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
397     S.indicatePessimisticFixpoint();
398   else if (T.hasValue())
399     S ^= *T;
400 }
401 
402 /// Helper class to compose two generic deduction
403 template <typename AAType, typename Base, typename StateType,
404           template <typename...> class F, template <typename...> class G>
405 struct AAComposeTwoGenericDeduction
406     : public F<AAType, G<AAType, Base, StateType>, StateType> {
407   AAComposeTwoGenericDeduction(const IRPosition &IRP, Attributor &A)
408       : F<AAType, G<AAType, Base, StateType>, StateType>(IRP, A) {}
409 
410   void initialize(Attributor &A) override {
411     F<AAType, G<AAType, Base, StateType>, StateType>::initialize(A);
412     G<AAType, Base, StateType>::initialize(A);
413   }
414 
415   /// See AbstractAttribute::updateImpl(...).
416   ChangeStatus updateImpl(Attributor &A) override {
417     ChangeStatus ChangedF =
418         F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
419     ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
420     return ChangedF | ChangedG;
421   }
422 };
423 
424 /// Helper class for generic deduction: return value -> returned position.
425 template <typename AAType, typename Base,
426           typename StateType = typename Base::StateType>
427 struct AAReturnedFromReturnedValues : public Base {
428   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
429       : Base(IRP, A) {}
430 
431   /// See AbstractAttribute::updateImpl(...).
432   ChangeStatus updateImpl(Attributor &A) override {
433     StateType S(StateType::getBestState(this->getState()));
434     clampReturnedValueStates<AAType, StateType>(A, *this, S);
435     // TODO: If we know we visited all returned values, thus no are assumed
436     // dead, we can take the known information from the state T.
437     return clampStateAndIndicateChange<StateType>(this->getState(), S);
438   }
439 };
440 
441 /// Clamp the information known at all call sites for a given argument
442 /// (identified by \p QueryingAA) into \p S.
443 template <typename AAType, typename StateType = typename AAType::StateType>
444 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
445                                         StateType &S) {
446   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
447                     << QueryingAA << " into " << S << "\n");
448 
449   assert(QueryingAA.getIRPosition().getPositionKind() ==
450              IRPosition::IRP_ARGUMENT &&
451          "Can only clamp call site argument states for an argument position!");
452 
453   // Use an optional state as there might not be any return values and we want
454   // to join (IntegerState::operator&) the state of all there are.
455   Optional<StateType> T;
456 
457   // The argument number which is also the call site argument number.
458   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
459 
460   auto CallSiteCheck = [&](AbstractCallSite ACS) {
461     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
462     // Check if a coresponding argument was found or if it is on not associated
463     // (which can happen for callback calls).
464     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
465       return false;
466 
467     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
468     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
469                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
470     const StateType &AAS = static_cast<const StateType &>(AA.getState());
471     if (T.hasValue())
472       *T &= AAS;
473     else
474       T = AAS;
475     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
476                       << "\n");
477     return T->isValidState();
478   };
479 
480   bool AllCallSitesKnown;
481   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
482                               AllCallSitesKnown))
483     S.indicatePessimisticFixpoint();
484   else if (T.hasValue())
485     S ^= *T;
486 }
487 
488 /// Helper class for generic deduction: call site argument -> argument position.
489 template <typename AAType, typename Base,
490           typename StateType = typename AAType::StateType>
491 struct AAArgumentFromCallSiteArguments : public Base {
492   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
493       : Base(IRP, A) {}
494 
495   /// See AbstractAttribute::updateImpl(...).
496   ChangeStatus updateImpl(Attributor &A) override {
497     StateType S(StateType::getBestState(this->getState()));
498     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
499     // TODO: If we know we visited all incoming values, thus no are assumed
500     // dead, we can take the known information from the state T.
501     return clampStateAndIndicateChange<StateType>(this->getState(), S);
502   }
503 };
504 
505 /// Helper class for generic replication: function returned -> cs returned.
506 template <typename AAType, typename Base,
507           typename StateType = typename Base::StateType>
508 struct AACallSiteReturnedFromReturned : public Base {
509   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
510       : Base(IRP, A) {}
511 
512   /// See AbstractAttribute::updateImpl(...).
513   ChangeStatus updateImpl(Attributor &A) override {
514     assert(this->getIRPosition().getPositionKind() ==
515                IRPosition::IRP_CALL_SITE_RETURNED &&
516            "Can only wrap function returned positions for call site returned "
517            "positions!");
518     auto &S = this->getState();
519 
520     const Function *AssociatedFunction =
521         this->getIRPosition().getAssociatedFunction();
522     if (!AssociatedFunction)
523       return S.indicatePessimisticFixpoint();
524 
525     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
526     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
527     return clampStateAndIndicateChange(
528         S, static_cast<const StateType &>(AA.getState()));
529   }
530 };
531 
532 /// Helper class for generic deduction using must-be-executed-context
533 /// Base class is required to have `followUse` method.
534 
535 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
536 /// U - Underlying use.
537 /// I - The user of the \p U.
538 /// `followUse` returns true if the value should be tracked transitively.
539 
540 template <typename AAType, typename Base,
541           typename StateType = typename AAType::StateType>
542 struct AAFromMustBeExecutedContext : public Base {
543   AAFromMustBeExecutedContext(const IRPosition &IRP, Attributor &A)
544       : Base(IRP, A) {}
545 
546   void initialize(Attributor &A) override {
547     Base::initialize(A);
548     const IRPosition &IRP = this->getIRPosition();
549     Instruction *CtxI = IRP.getCtxI();
550 
551     if (!CtxI)
552       return;
553 
554     for (const Use &U : IRP.getAssociatedValue().uses())
555       Uses.insert(&U);
556   }
557 
558   /// Helper function to accumulate uses.
559   void followUsesInContext(Attributor &A,
560                            MustBeExecutedContextExplorer &Explorer,
561                            const Instruction *CtxI,
562                            SetVector<const Use *> &Uses, StateType &State) {
563     auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
564     for (unsigned u = 0; u < Uses.size(); ++u) {
565       const Use *U = Uses[u];
566       if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
567         bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
568         if (Found && Base::followUse(A, U, UserI, State))
569           for (const Use &Us : UserI->uses())
570             Uses.insert(&Us);
571       }
572     }
573   }
574 
575   /// See AbstractAttribute::updateImpl(...).
576   ChangeStatus updateImpl(Attributor &A) override {
577     auto BeforeState = this->getState();
578     auto &S = this->getState();
579     Instruction *CtxI = this->getIRPosition().getCtxI();
580     if (!CtxI)
581       return ChangeStatus::UNCHANGED;
582 
583     MustBeExecutedContextExplorer &Explorer =
584         A.getInfoCache().getMustBeExecutedContextExplorer();
585 
586     followUsesInContext(A, Explorer, CtxI, Uses, S);
587 
588     if (this->isAtFixpoint())
589       return ChangeStatus::CHANGED;
590 
591     SmallVector<const BranchInst *, 4> BrInsts;
592     auto Pred = [&](const Instruction *I) {
593       if (const BranchInst *Br = dyn_cast<BranchInst>(I))
594         if (Br->isConditional())
595           BrInsts.push_back(Br);
596       return true;
597     };
598 
599     // Here, accumulate conditional branch instructions in the context. We
600     // explore the child paths and collect the known states. The disjunction of
601     // those states can be merged to its own state. Let ParentState_i be a state
602     // to indicate the known information for an i-th branch instruction in the
603     // context. ChildStates are created for its successors respectively.
604     //
605     // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
606     // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
607     //      ...
608     // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
609     //
610     // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
611     //
612     // FIXME: Currently, recursive branches are not handled. For example, we
613     // can't deduce that ptr must be dereferenced in below function.
614     //
615     // void f(int a, int c, int *ptr) {
616     //    if(a)
617     //      if (b) {
618     //        *ptr = 0;
619     //      } else {
620     //        *ptr = 1;
621     //      }
622     //    else {
623     //      if (b) {
624     //        *ptr = 0;
625     //      } else {
626     //        *ptr = 1;
627     //      }
628     //    }
629     // }
630 
631     Explorer.checkForAllContext(CtxI, Pred);
632     for (const BranchInst *Br : BrInsts) {
633       StateType ParentState;
634 
635       // The known state of the parent state is a conjunction of children's
636       // known states so it is initialized with a best state.
637       ParentState.indicateOptimisticFixpoint();
638 
639       for (const BasicBlock *BB : Br->successors()) {
640         StateType ChildState;
641 
642         size_t BeforeSize = Uses.size();
643         followUsesInContext(A, Explorer, &BB->front(), Uses, ChildState);
644 
645         // Erase uses which only appear in the child.
646         for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
647           It = Uses.erase(It);
648 
649         ParentState &= ChildState;
650       }
651 
652       // Use only known state.
653       S += ParentState;
654     }
655 
656     return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
657   }
658 
659 private:
660   /// Container for (transitive) uses of the associated value.
661   SetVector<const Use *> Uses;
662 };
663 
664 template <typename AAType, typename Base,
665           typename StateType = typename AAType::StateType>
666 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
667     AAComposeTwoGenericDeduction<AAType, Base, StateType,
668                                  AAFromMustBeExecutedContext,
669                                  AAArgumentFromCallSiteArguments>;
670 
671 template <typename AAType, typename Base,
672           typename StateType = typename AAType::StateType>
673 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
674     AAComposeTwoGenericDeduction<AAType, Base, StateType,
675                                  AAFromMustBeExecutedContext,
676                                  AACallSiteReturnedFromReturned>;
677 
678 /// -----------------------NoUnwind Function Attribute--------------------------
679 
680 struct AANoUnwindImpl : AANoUnwind {
681   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
682 
683   const std::string getAsStr() const override {
684     return getAssumed() ? "nounwind" : "may-unwind";
685   }
686 
687   /// See AbstractAttribute::updateImpl(...).
688   ChangeStatus updateImpl(Attributor &A) override {
689     auto Opcodes = {
690         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
691         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
692         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
693 
694     auto CheckForNoUnwind = [&](Instruction &I) {
695       if (!I.mayThrow())
696         return true;
697 
698       if (const auto *CB = dyn_cast<CallBase>(&I)) {
699         const auto &NoUnwindAA =
700             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB));
701         return NoUnwindAA.isAssumedNoUnwind();
702       }
703       return false;
704     };
705 
706     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
707       return indicatePessimisticFixpoint();
708 
709     return ChangeStatus::UNCHANGED;
710   }
711 };
712 
713 struct AANoUnwindFunction final : public AANoUnwindImpl {
714   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
715       : AANoUnwindImpl(IRP, A) {}
716 
717   /// See AbstractAttribute::trackStatistics()
718   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
719 };
720 
721 /// NoUnwind attribute deduction for a call sites.
722 struct AANoUnwindCallSite final : AANoUnwindImpl {
723   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
724       : AANoUnwindImpl(IRP, A) {}
725 
726   /// See AbstractAttribute::initialize(...).
727   void initialize(Attributor &A) override {
728     AANoUnwindImpl::initialize(A);
729     Function *F = getAssociatedFunction();
730     if (!F)
731       indicatePessimisticFixpoint();
732   }
733 
734   /// See AbstractAttribute::updateImpl(...).
735   ChangeStatus updateImpl(Attributor &A) override {
736     // TODO: Once we have call site specific value information we can provide
737     //       call site specific liveness information and then it makes
738     //       sense to specialize attributes for call sites arguments instead of
739     //       redirecting requests to the callee argument.
740     Function *F = getAssociatedFunction();
741     const IRPosition &FnPos = IRPosition::function(*F);
742     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
743     return clampStateAndIndicateChange(
744         getState(),
745         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
746   }
747 
748   /// See AbstractAttribute::trackStatistics()
749   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
750 };
751 
752 /// --------------------- Function Return Values -------------------------------
753 
754 /// "Attribute" that collects all potential returned values and the return
755 /// instructions that they arise from.
756 ///
757 /// If there is a unique returned value R, the manifest method will:
758 ///   - mark R with the "returned" attribute, if R is an argument.
759 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
760 
761   /// Mapping of values potentially returned by the associated function to the
762   /// return instructions that might return them.
763   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
764 
765   /// Mapping to remember the number of returned values for a call site such
766   /// that we can avoid updates if nothing changed.
767   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
768 
769   /// Set of unresolved calls returned by the associated function.
770   SmallSetVector<CallBase *, 4> UnresolvedCalls;
771 
772   /// State flags
773   ///
774   ///{
775   bool IsFixed = false;
776   bool IsValidState = true;
777   ///}
778 
779 public:
780   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
781       : AAReturnedValues(IRP, A) {}
782 
783   /// See AbstractAttribute::initialize(...).
784   void initialize(Attributor &A) override {
785     // Reset the state.
786     IsFixed = false;
787     IsValidState = true;
788     ReturnedValues.clear();
789 
790     Function *F = getAssociatedFunction();
791     if (!F) {
792       indicatePessimisticFixpoint();
793       return;
794     }
795     assert(!F->getReturnType()->isVoidTy() &&
796            "Did not expect a void return type!");
797 
798     // The map from instruction opcodes to those instructions in the function.
799     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
800 
801     // Look through all arguments, if one is marked as returned we are done.
802     for (Argument &Arg : F->args()) {
803       if (Arg.hasReturnedAttr()) {
804         auto &ReturnInstSet = ReturnedValues[&Arg];
805         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
806           for (Instruction *RI : *Insts)
807             ReturnInstSet.insert(cast<ReturnInst>(RI));
808 
809         indicateOptimisticFixpoint();
810         return;
811       }
812     }
813 
814     if (!A.isFunctionIPOAmendable(*F))
815       indicatePessimisticFixpoint();
816   }
817 
818   /// See AbstractAttribute::manifest(...).
819   ChangeStatus manifest(Attributor &A) override;
820 
821   /// See AbstractAttribute::getState(...).
822   AbstractState &getState() override { return *this; }
823 
824   /// See AbstractAttribute::getState(...).
825   const AbstractState &getState() const override { return *this; }
826 
827   /// See AbstractAttribute::updateImpl(Attributor &A).
828   ChangeStatus updateImpl(Attributor &A) override;
829 
830   llvm::iterator_range<iterator> returned_values() override {
831     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
832   }
833 
834   llvm::iterator_range<const_iterator> returned_values() const override {
835     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
836   }
837 
838   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
839     return UnresolvedCalls;
840   }
841 
842   /// Return the number of potential return values, -1 if unknown.
843   size_t getNumReturnValues() const override {
844     return isValidState() ? ReturnedValues.size() : -1;
845   }
846 
847   /// Return an assumed unique return value if a single candidate is found. If
848   /// there cannot be one, return a nullptr. If it is not clear yet, return the
849   /// Optional::NoneType.
850   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
851 
852   /// See AbstractState::checkForAllReturnedValues(...).
853   bool checkForAllReturnedValuesAndReturnInsts(
854       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
855       const override;
856 
857   /// Pretty print the attribute similar to the IR representation.
858   const std::string getAsStr() const override;
859 
860   /// See AbstractState::isAtFixpoint().
861   bool isAtFixpoint() const override { return IsFixed; }
862 
863   /// See AbstractState::isValidState().
864   bool isValidState() const override { return IsValidState; }
865 
866   /// See AbstractState::indicateOptimisticFixpoint(...).
867   ChangeStatus indicateOptimisticFixpoint() override {
868     IsFixed = true;
869     return ChangeStatus::UNCHANGED;
870   }
871 
872   ChangeStatus indicatePessimisticFixpoint() override {
873     IsFixed = true;
874     IsValidState = false;
875     return ChangeStatus::CHANGED;
876   }
877 };
878 
879 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
880   ChangeStatus Changed = ChangeStatus::UNCHANGED;
881 
882   // Bookkeeping.
883   assert(isValidState());
884   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
885                   "Number of function with known return values");
886 
887   // Check if we have an assumed unique return value that we could manifest.
888   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
889 
890   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
891     return Changed;
892 
893   // Bookkeeping.
894   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
895                   "Number of function with unique return");
896 
897   // Callback to replace the uses of CB with the constant C.
898   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
899     if (CB.getNumUses() == 0)
900       return ChangeStatus::UNCHANGED;
901     if (A.changeValueAfterManifest(CB, C))
902       return ChangeStatus::CHANGED;
903     return ChangeStatus::UNCHANGED;
904   };
905 
906   // If the assumed unique return value is an argument, annotate it.
907   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
908     // TODO: This should be handled differently!
909     this->AnchorVal = UniqueRVArg;
910     this->KindOrArgNo = UniqueRVArg->getArgNo();
911     Changed = IRAttribute::manifest(A);
912   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
913     // We can replace the returned value with the unique returned constant.
914     Value &AnchorValue = getAnchorValue();
915     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
916       for (const Use &U : F->uses())
917         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
918           if (CB->isCallee(&U)) {
919             Constant *RVCCast =
920                 CB->getType() == RVC->getType()
921                     ? RVC
922                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
923             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
924           }
925     } else {
926       assert(isa<CallBase>(AnchorValue) &&
927              "Expcected a function or call base anchor!");
928       Constant *RVCCast =
929           AnchorValue.getType() == RVC->getType()
930               ? RVC
931               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
932       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
933     }
934     if (Changed == ChangeStatus::CHANGED)
935       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
936                       "Number of function returns replaced by constant return");
937   }
938 
939   return Changed;
940 }
941 
942 const std::string AAReturnedValuesImpl::getAsStr() const {
943   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
944          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
945          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
946 }
947 
948 Optional<Value *>
949 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
950   // If checkForAllReturnedValues provides a unique value, ignoring potential
951   // undef values that can also be present, it is assumed to be the actual
952   // return value and forwarded to the caller of this method. If there are
953   // multiple, a nullptr is returned indicating there cannot be a unique
954   // returned value.
955   Optional<Value *> UniqueRV;
956 
957   auto Pred = [&](Value &RV) -> bool {
958     // If we found a second returned value and neither the current nor the saved
959     // one is an undef, there is no unique returned value. Undefs are special
960     // since we can pretend they have any value.
961     if (UniqueRV.hasValue() && UniqueRV != &RV &&
962         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
963       UniqueRV = nullptr;
964       return false;
965     }
966 
967     // Do not overwrite a value with an undef.
968     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
969       UniqueRV = &RV;
970 
971     return true;
972   };
973 
974   if (!A.checkForAllReturnedValues(Pred, *this))
975     UniqueRV = nullptr;
976 
977   return UniqueRV;
978 }
979 
980 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
981     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
982     const {
983   if (!isValidState())
984     return false;
985 
986   // Check all returned values but ignore call sites as long as we have not
987   // encountered an overdefined one during an update.
988   for (auto &It : ReturnedValues) {
989     Value *RV = It.first;
990 
991     CallBase *CB = dyn_cast<CallBase>(RV);
992     if (CB && !UnresolvedCalls.count(CB))
993       continue;
994 
995     if (!Pred(*RV, It.second))
996       return false;
997   }
998 
999   return true;
1000 }
1001 
1002 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1003   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1004   bool Changed = false;
1005 
1006   // State used in the value traversals starting in returned values.
1007   struct RVState {
1008     // The map in which we collect return values -> return instrs.
1009     decltype(ReturnedValues) &RetValsMap;
1010     // The flag to indicate a change.
1011     bool &Changed;
1012     // The return instrs we come from.
1013     SmallSetVector<ReturnInst *, 4> RetInsts;
1014   };
1015 
1016   // Callback for a leaf value returned by the associated function.
1017   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1018                          bool) -> bool {
1019     auto Size = RVS.RetValsMap[&Val].size();
1020     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1021     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1022     RVS.Changed |= Inserted;
1023     LLVM_DEBUG({
1024       if (Inserted)
1025         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1026                << " => " << RVS.RetInsts.size() << "\n";
1027     });
1028     return true;
1029   };
1030 
1031   // Helper method to invoke the generic value traversal.
1032   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1033                                 const Instruction *CtxI) {
1034     IRPosition RetValPos = IRPosition::value(RV);
1035     return genericValueTraversal<AAReturnedValues, RVState>(
1036         A, RetValPos, *this, RVS, VisitValueCB, CtxI);
1037   };
1038 
1039   // Callback for all "return intructions" live in the associated function.
1040   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1041     ReturnInst &Ret = cast<ReturnInst>(I);
1042     RVState RVS({ReturnedValues, Changed, {}});
1043     RVS.RetInsts.insert(&Ret);
1044     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1045   };
1046 
1047   // Start by discovering returned values from all live returned instructions in
1048   // the associated function.
1049   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1050     return indicatePessimisticFixpoint();
1051 
1052   // Once returned values "directly" present in the code are handled we try to
1053   // resolve returned calls.
1054   decltype(ReturnedValues) NewRVsMap;
1055   for (auto &It : ReturnedValues) {
1056     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1057                       << " by #" << It.second.size() << " RIs\n");
1058     CallBase *CB = dyn_cast<CallBase>(It.first);
1059     if (!CB || UnresolvedCalls.count(CB))
1060       continue;
1061 
1062     if (!CB->getCalledFunction()) {
1063       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1064                         << "\n");
1065       UnresolvedCalls.insert(CB);
1066       continue;
1067     }
1068 
1069     // TODO: use the function scope once we have call site AAReturnedValues.
1070     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1071         *this, IRPosition::function(*CB->getCalledFunction()));
1072     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1073                       << RetValAA << "\n");
1074 
1075     // Skip dead ends, thus if we do not know anything about the returned
1076     // call we mark it as unresolved and it will stay that way.
1077     if (!RetValAA.getState().isValidState()) {
1078       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1079                         << "\n");
1080       UnresolvedCalls.insert(CB);
1081       continue;
1082     }
1083 
1084     // Do not try to learn partial information. If the callee has unresolved
1085     // return values we will treat the call as unresolved/opaque.
1086     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1087     if (!RetValAAUnresolvedCalls.empty()) {
1088       UnresolvedCalls.insert(CB);
1089       continue;
1090     }
1091 
1092     // Now check if we can track transitively returned values. If possible, thus
1093     // if all return value can be represented in the current scope, do so.
1094     bool Unresolved = false;
1095     for (auto &RetValAAIt : RetValAA.returned_values()) {
1096       Value *RetVal = RetValAAIt.first;
1097       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1098           isa<Constant>(RetVal))
1099         continue;
1100       // Anything that did not fit in the above categories cannot be resolved,
1101       // mark the call as unresolved.
1102       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1103                            "cannot be translated: "
1104                         << *RetVal << "\n");
1105       UnresolvedCalls.insert(CB);
1106       Unresolved = true;
1107       break;
1108     }
1109 
1110     if (Unresolved)
1111       continue;
1112 
1113     // Now track transitively returned values.
1114     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1115     if (NumRetAA == RetValAA.getNumReturnValues()) {
1116       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1117                            "changed since it was seen last\n");
1118       continue;
1119     }
1120     NumRetAA = RetValAA.getNumReturnValues();
1121 
1122     for (auto &RetValAAIt : RetValAA.returned_values()) {
1123       Value *RetVal = RetValAAIt.first;
1124       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1125         // Arguments are mapped to call site operands and we begin the traversal
1126         // again.
1127         bool Unused = false;
1128         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1129         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1130         continue;
1131       } else if (isa<CallBase>(RetVal)) {
1132         // Call sites are resolved by the callee attribute over time, no need to
1133         // do anything for us.
1134         continue;
1135       } else if (isa<Constant>(RetVal)) {
1136         // Constants are valid everywhere, we can simply take them.
1137         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1138         continue;
1139       }
1140     }
1141   }
1142 
1143   // To avoid modifications to the ReturnedValues map while we iterate over it
1144   // we kept record of potential new entries in a copy map, NewRVsMap.
1145   for (auto &It : NewRVsMap) {
1146     assert(!It.second.empty() && "Entry does not add anything.");
1147     auto &ReturnInsts = ReturnedValues[It.first];
1148     for (ReturnInst *RI : It.second)
1149       if (ReturnInsts.insert(RI)) {
1150         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1151                           << *It.first << " => " << *RI << "\n");
1152         Changed = true;
1153       }
1154   }
1155 
1156   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1157   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1158 }
1159 
1160 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1161   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1162       : AAReturnedValuesImpl(IRP, A) {}
1163 
1164   /// See AbstractAttribute::trackStatistics()
1165   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1166 };
1167 
1168 /// Returned values information for a call sites.
1169 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1170   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1171       : AAReturnedValuesImpl(IRP, A) {}
1172 
1173   /// See AbstractAttribute::initialize(...).
1174   void initialize(Attributor &A) override {
1175     // TODO: Once we have call site specific value information we can provide
1176     //       call site specific liveness information and then it makes
1177     //       sense to specialize attributes for call sites instead of
1178     //       redirecting requests to the callee.
1179     llvm_unreachable("Abstract attributes for returned values are not "
1180                      "supported for call sites yet!");
1181   }
1182 
1183   /// See AbstractAttribute::updateImpl(...).
1184   ChangeStatus updateImpl(Attributor &A) override {
1185     return indicatePessimisticFixpoint();
1186   }
1187 
1188   /// See AbstractAttribute::trackStatistics()
1189   void trackStatistics() const override {}
1190 };
1191 
1192 /// ------------------------ NoSync Function Attribute -------------------------
1193 
1194 struct AANoSyncImpl : AANoSync {
1195   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1196 
1197   const std::string getAsStr() const override {
1198     return getAssumed() ? "nosync" : "may-sync";
1199   }
1200 
1201   /// See AbstractAttribute::updateImpl(...).
1202   ChangeStatus updateImpl(Attributor &A) override;
1203 
1204   /// Helper function used to determine whether an instruction is non-relaxed
1205   /// atomic. In other words, if an atomic instruction does not have unordered
1206   /// or monotonic ordering
1207   static bool isNonRelaxedAtomic(Instruction *I);
1208 
1209   /// Helper function used to determine whether an instruction is volatile.
1210   static bool isVolatile(Instruction *I);
1211 
1212   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1213   /// memset).
1214   static bool isNoSyncIntrinsic(Instruction *I);
1215 };
1216 
1217 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1218   if (!I->isAtomic())
1219     return false;
1220 
1221   AtomicOrdering Ordering;
1222   switch (I->getOpcode()) {
1223   case Instruction::AtomicRMW:
1224     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1225     break;
1226   case Instruction::Store:
1227     Ordering = cast<StoreInst>(I)->getOrdering();
1228     break;
1229   case Instruction::Load:
1230     Ordering = cast<LoadInst>(I)->getOrdering();
1231     break;
1232   case Instruction::Fence: {
1233     auto *FI = cast<FenceInst>(I);
1234     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1235       return false;
1236     Ordering = FI->getOrdering();
1237     break;
1238   }
1239   case Instruction::AtomicCmpXchg: {
1240     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1241     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1242     // Only if both are relaxed, than it can be treated as relaxed.
1243     // Otherwise it is non-relaxed.
1244     if (Success != AtomicOrdering::Unordered &&
1245         Success != AtomicOrdering::Monotonic)
1246       return true;
1247     if (Failure != AtomicOrdering::Unordered &&
1248         Failure != AtomicOrdering::Monotonic)
1249       return true;
1250     return false;
1251   }
1252   default:
1253     llvm_unreachable(
1254         "New atomic operations need to be known in the attributor.");
1255   }
1256 
1257   // Relaxed.
1258   if (Ordering == AtomicOrdering::Unordered ||
1259       Ordering == AtomicOrdering::Monotonic)
1260     return false;
1261   return true;
1262 }
1263 
1264 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1265 /// FIXME: We should ipmrove the handling of intrinsics.
1266 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1267   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1268     switch (II->getIntrinsicID()) {
1269     /// Element wise atomic memory intrinsics are can only be unordered,
1270     /// therefore nosync.
1271     case Intrinsic::memset_element_unordered_atomic:
1272     case Intrinsic::memmove_element_unordered_atomic:
1273     case Intrinsic::memcpy_element_unordered_atomic:
1274       return true;
1275     case Intrinsic::memset:
1276     case Intrinsic::memmove:
1277     case Intrinsic::memcpy:
1278       if (!cast<MemIntrinsic>(II)->isVolatile())
1279         return true;
1280       return false;
1281     default:
1282       return false;
1283     }
1284   }
1285   return false;
1286 }
1287 
1288 bool AANoSyncImpl::isVolatile(Instruction *I) {
1289   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1290 
1291   switch (I->getOpcode()) {
1292   case Instruction::AtomicRMW:
1293     return cast<AtomicRMWInst>(I)->isVolatile();
1294   case Instruction::Store:
1295     return cast<StoreInst>(I)->isVolatile();
1296   case Instruction::Load:
1297     return cast<LoadInst>(I)->isVolatile();
1298   case Instruction::AtomicCmpXchg:
1299     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1300   default:
1301     return false;
1302   }
1303 }
1304 
1305 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1306 
1307   auto CheckRWInstForNoSync = [&](Instruction &I) {
1308     /// We are looking for volatile instructions or Non-Relaxed atomics.
1309     /// FIXME: We should improve the handling of intrinsics.
1310 
1311     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1312       return true;
1313 
1314     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1315       if (CB->hasFnAttr(Attribute::NoSync))
1316         return true;
1317 
1318       const auto &NoSyncAA =
1319           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB));
1320       if (NoSyncAA.isAssumedNoSync())
1321         return true;
1322       return false;
1323     }
1324 
1325     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1326       return true;
1327 
1328     return false;
1329   };
1330 
1331   auto CheckForNoSync = [&](Instruction &I) {
1332     // At this point we handled all read/write effects and they are all
1333     // nosync, so they can be skipped.
1334     if (I.mayReadOrWriteMemory())
1335       return true;
1336 
1337     // non-convergent and readnone imply nosync.
1338     return !cast<CallBase>(I).isConvergent();
1339   };
1340 
1341   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1342       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1343     return indicatePessimisticFixpoint();
1344 
1345   return ChangeStatus::UNCHANGED;
1346 }
1347 
1348 struct AANoSyncFunction final : public AANoSyncImpl {
1349   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1350       : AANoSyncImpl(IRP, A) {}
1351 
1352   /// See AbstractAttribute::trackStatistics()
1353   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1354 };
1355 
1356 /// NoSync attribute deduction for a call sites.
1357 struct AANoSyncCallSite final : AANoSyncImpl {
1358   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1359       : AANoSyncImpl(IRP, A) {}
1360 
1361   /// See AbstractAttribute::initialize(...).
1362   void initialize(Attributor &A) override {
1363     AANoSyncImpl::initialize(A);
1364     Function *F = getAssociatedFunction();
1365     if (!F)
1366       indicatePessimisticFixpoint();
1367   }
1368 
1369   /// See AbstractAttribute::updateImpl(...).
1370   ChangeStatus updateImpl(Attributor &A) override {
1371     // TODO: Once we have call site specific value information we can provide
1372     //       call site specific liveness information and then it makes
1373     //       sense to specialize attributes for call sites arguments instead of
1374     //       redirecting requests to the callee argument.
1375     Function *F = getAssociatedFunction();
1376     const IRPosition &FnPos = IRPosition::function(*F);
1377     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1378     return clampStateAndIndicateChange(
1379         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1380   }
1381 
1382   /// See AbstractAttribute::trackStatistics()
1383   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1384 };
1385 
1386 /// ------------------------ No-Free Attributes ----------------------------
1387 
1388 struct AANoFreeImpl : public AANoFree {
1389   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1390 
1391   /// See AbstractAttribute::updateImpl(...).
1392   ChangeStatus updateImpl(Attributor &A) override {
1393     auto CheckForNoFree = [&](Instruction &I) {
1394       const auto &CB = cast<CallBase>(I);
1395       if (CB.hasFnAttr(Attribute::NoFree))
1396         return true;
1397 
1398       const auto &NoFreeAA =
1399           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB));
1400       return NoFreeAA.isAssumedNoFree();
1401     };
1402 
1403     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1404       return indicatePessimisticFixpoint();
1405     return ChangeStatus::UNCHANGED;
1406   }
1407 
1408   /// See AbstractAttribute::getAsStr().
1409   const std::string getAsStr() const override {
1410     return getAssumed() ? "nofree" : "may-free";
1411   }
1412 };
1413 
1414 struct AANoFreeFunction final : public AANoFreeImpl {
1415   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1416       : AANoFreeImpl(IRP, A) {}
1417 
1418   /// See AbstractAttribute::trackStatistics()
1419   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1420 };
1421 
1422 /// NoFree attribute deduction for a call sites.
1423 struct AANoFreeCallSite final : AANoFreeImpl {
1424   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1425       : AANoFreeImpl(IRP, A) {}
1426 
1427   /// See AbstractAttribute::initialize(...).
1428   void initialize(Attributor &A) override {
1429     AANoFreeImpl::initialize(A);
1430     Function *F = getAssociatedFunction();
1431     if (!F)
1432       indicatePessimisticFixpoint();
1433   }
1434 
1435   /// See AbstractAttribute::updateImpl(...).
1436   ChangeStatus updateImpl(Attributor &A) override {
1437     // TODO: Once we have call site specific value information we can provide
1438     //       call site specific liveness information and then it makes
1439     //       sense to specialize attributes for call sites arguments instead of
1440     //       redirecting requests to the callee argument.
1441     Function *F = getAssociatedFunction();
1442     const IRPosition &FnPos = IRPosition::function(*F);
1443     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1444     return clampStateAndIndicateChange(
1445         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1446   }
1447 
1448   /// See AbstractAttribute::trackStatistics()
1449   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1450 };
1451 
1452 /// NoFree attribute for floating values.
1453 struct AANoFreeFloating : AANoFreeImpl {
1454   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1455       : AANoFreeImpl(IRP, A) {}
1456 
1457   /// See AbstractAttribute::trackStatistics()
1458   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1459 
1460   /// See Abstract Attribute::updateImpl(...).
1461   ChangeStatus updateImpl(Attributor &A) override {
1462     const IRPosition &IRP = getIRPosition();
1463 
1464     const auto &NoFreeAA =
1465         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1466     if (NoFreeAA.isAssumedNoFree())
1467       return ChangeStatus::UNCHANGED;
1468 
1469     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1470     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1471       Instruction *UserI = cast<Instruction>(U.getUser());
1472       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1473         if (CB->isBundleOperand(&U))
1474           return false;
1475         if (!CB->isArgOperand(&U))
1476           return true;
1477         unsigned ArgNo = CB->getArgOperandNo(&U);
1478 
1479         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1480             *this, IRPosition::callsite_argument(*CB, ArgNo));
1481         return NoFreeArg.isAssumedNoFree();
1482       }
1483 
1484       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1485           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1486         Follow = true;
1487         return true;
1488       }
1489       if (isa<ReturnInst>(UserI))
1490         return true;
1491 
1492       // Unknown user.
1493       return false;
1494     };
1495     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1496       return indicatePessimisticFixpoint();
1497 
1498     return ChangeStatus::UNCHANGED;
1499   }
1500 };
1501 
1502 /// NoFree attribute for a call site argument.
1503 struct AANoFreeArgument final : AANoFreeFloating {
1504   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1505       : AANoFreeFloating(IRP, A) {}
1506 
1507   /// See AbstractAttribute::trackStatistics()
1508   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1509 };
1510 
1511 /// NoFree attribute for call site arguments.
1512 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1513   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1514       : AANoFreeFloating(IRP, A) {}
1515 
1516   /// See AbstractAttribute::updateImpl(...).
1517   ChangeStatus updateImpl(Attributor &A) override {
1518     // TODO: Once we have call site specific value information we can provide
1519     //       call site specific liveness information and then it makes
1520     //       sense to specialize attributes for call sites arguments instead of
1521     //       redirecting requests to the callee argument.
1522     Argument *Arg = getAssociatedArgument();
1523     if (!Arg)
1524       return indicatePessimisticFixpoint();
1525     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1526     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1527     return clampStateAndIndicateChange(
1528         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1529   }
1530 
1531   /// See AbstractAttribute::trackStatistics()
1532   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1533 };
1534 
1535 /// NoFree attribute for function return value.
1536 struct AANoFreeReturned final : AANoFreeFloating {
1537   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1538       : AANoFreeFloating(IRP, A) {
1539     llvm_unreachable("NoFree is not applicable to function returns!");
1540   }
1541 
1542   /// See AbstractAttribute::initialize(...).
1543   void initialize(Attributor &A) override {
1544     llvm_unreachable("NoFree is not applicable to function returns!");
1545   }
1546 
1547   /// See AbstractAttribute::updateImpl(...).
1548   ChangeStatus updateImpl(Attributor &A) override {
1549     llvm_unreachable("NoFree is not applicable to function returns!");
1550   }
1551 
1552   /// See AbstractAttribute::trackStatistics()
1553   void trackStatistics() const override {}
1554 };
1555 
1556 /// NoFree attribute deduction for a call site return value.
1557 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1558   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1559       : AANoFreeFloating(IRP, A) {}
1560 
1561   ChangeStatus manifest(Attributor &A) override {
1562     return ChangeStatus::UNCHANGED;
1563   }
1564   /// See AbstractAttribute::trackStatistics()
1565   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1566 };
1567 
1568 /// ------------------------ NonNull Argument Attribute ------------------------
1569 static int64_t getKnownNonNullAndDerefBytesForUse(
1570     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1571     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1572   TrackUse = false;
1573 
1574   const Value *UseV = U->get();
1575   if (!UseV->getType()->isPointerTy())
1576     return 0;
1577 
1578   Type *PtrTy = UseV->getType();
1579   const Function *F = I->getFunction();
1580   bool NullPointerIsDefined =
1581       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1582   const DataLayout &DL = A.getInfoCache().getDL();
1583   if (const auto *CB = dyn_cast<CallBase>(I)) {
1584     if (CB->isBundleOperand(U))
1585       return 0;
1586 
1587     if (CB->isCallee(U)) {
1588       IsNonNull |= !NullPointerIsDefined;
1589       return 0;
1590     }
1591 
1592     unsigned ArgNo = CB->getArgOperandNo(U);
1593     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1594     // As long as we only use known information there is no need to track
1595     // dependences here.
1596     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1597                                                   /* TrackDependence */ false);
1598     IsNonNull |= DerefAA.isKnownNonNull();
1599     return DerefAA.getKnownDereferenceableBytes();
1600   }
1601 
1602   // We need to follow common pointer manipulation uses to the accesses they
1603   // feed into. We can try to be smart to avoid looking through things we do not
1604   // like for now, e.g., non-inbounds GEPs.
1605   if (isa<CastInst>(I)) {
1606     TrackUse = true;
1607     return 0;
1608   }
1609   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1610     if (GEP->hasAllConstantIndices()) {
1611       TrackUse = true;
1612       return 0;
1613     }
1614 
1615   int64_t Offset;
1616   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1617     if (Base == &AssociatedValue &&
1618         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1619       int64_t DerefBytes =
1620           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1621 
1622       IsNonNull |= !NullPointerIsDefined;
1623       return std::max(int64_t(0), DerefBytes);
1624     }
1625   }
1626 
1627   /// Corner case when an offset is 0.
1628   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1629           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1630     if (Offset == 0 && Base == &AssociatedValue &&
1631         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1632       int64_t DerefBytes =
1633           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1634       IsNonNull |= !NullPointerIsDefined;
1635       return std::max(int64_t(0), DerefBytes);
1636     }
1637   }
1638 
1639   return 0;
1640 }
1641 
1642 struct AANonNullImpl : AANonNull {
1643   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1644       : AANonNull(IRP, A),
1645         NullIsDefined(NullPointerIsDefined(
1646             getAnchorScope(),
1647             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1648 
1649   /// See AbstractAttribute::initialize(...).
1650   void initialize(Attributor &A) override {
1651     if (!NullIsDefined &&
1652         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1653                 /* IgnoreSubsumingPositions */ false, &A))
1654       indicateOptimisticFixpoint();
1655     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1656       indicatePessimisticFixpoint();
1657     else
1658       AANonNull::initialize(A);
1659   }
1660 
1661   /// See AAFromMustBeExecutedContext
1662   bool followUse(Attributor &A, const Use *U, const Instruction *I,
1663                  AANonNull::StateType &State) {
1664     bool IsNonNull = false;
1665     bool TrackUse = false;
1666     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1667                                        IsNonNull, TrackUse);
1668     State.setKnown(IsNonNull);
1669     return TrackUse;
1670   }
1671 
1672   /// See AbstractAttribute::getAsStr().
1673   const std::string getAsStr() const override {
1674     return getAssumed() ? "nonnull" : "may-null";
1675   }
1676 
1677   /// Flag to determine if the underlying value can be null and still allow
1678   /// valid accesses.
1679   const bool NullIsDefined;
1680 };
1681 
1682 /// NonNull attribute for a floating value.
1683 struct AANonNullFloating
1684     : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1685   using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1686   AANonNullFloating(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
1687 
1688   /// See AbstractAttribute::updateImpl(...).
1689   ChangeStatus updateImpl(Attributor &A) override {
1690     ChangeStatus Change = Base::updateImpl(A);
1691     if (isKnownNonNull())
1692       return Change;
1693 
1694     if (!NullIsDefined) {
1695       const auto &DerefAA =
1696           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1697       if (DerefAA.getAssumedDereferenceableBytes())
1698         return Change;
1699     }
1700 
1701     const DataLayout &DL = A.getDataLayout();
1702 
1703     DominatorTree *DT = nullptr;
1704     AssumptionCache *AC = nullptr;
1705     InformationCache &InfoCache = A.getInfoCache();
1706     if (const Function *Fn = getAnchorScope()) {
1707       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1708       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1709     }
1710 
1711     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1712                             AANonNull::StateType &T, bool Stripped) -> bool {
1713       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1714       if (!Stripped && this == &AA) {
1715         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1716           T.indicatePessimisticFixpoint();
1717       } else {
1718         // Use abstract attribute information.
1719         const AANonNull::StateType &NS =
1720             static_cast<const AANonNull::StateType &>(AA.getState());
1721         T ^= NS;
1722       }
1723       return T.isValidState();
1724     };
1725 
1726     StateType T;
1727     if (!genericValueTraversal<AANonNull, StateType>(
1728             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1729       return indicatePessimisticFixpoint();
1730 
1731     return clampStateAndIndicateChange(getState(), T);
1732   }
1733 
1734   /// See AbstractAttribute::trackStatistics()
1735   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1736 };
1737 
1738 /// NonNull attribute for function return value.
1739 struct AANonNullReturned final
1740     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1741   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1742       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP, A) {}
1743 
1744   /// See AbstractAttribute::trackStatistics()
1745   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1746 };
1747 
1748 /// NonNull attribute for function argument.
1749 struct AANonNullArgument final
1750     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1751                                                               AANonNullImpl> {
1752   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1753       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1754                                                                 AANonNullImpl>(
1755             IRP, A) {}
1756 
1757   /// See AbstractAttribute::trackStatistics()
1758   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1759 };
1760 
1761 struct AANonNullCallSiteArgument final : AANonNullFloating {
1762   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1763       : AANonNullFloating(IRP, A) {}
1764 
1765   /// See AbstractAttribute::trackStatistics()
1766   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1767 };
1768 
1769 /// NonNull attribute for a call site return position.
1770 struct AANonNullCallSiteReturned final
1771     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1772                                                              AANonNullImpl> {
1773   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1774       : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1775                                                                AANonNullImpl>(
1776             IRP, A) {}
1777 
1778   /// See AbstractAttribute::trackStatistics()
1779   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1780 };
1781 
1782 /// ------------------------ No-Recurse Attributes ----------------------------
1783 
1784 struct AANoRecurseImpl : public AANoRecurse {
1785   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1786 
1787   /// See AbstractAttribute::getAsStr()
1788   const std::string getAsStr() const override {
1789     return getAssumed() ? "norecurse" : "may-recurse";
1790   }
1791 };
1792 
1793 struct AANoRecurseFunction final : AANoRecurseImpl {
1794   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1795       : AANoRecurseImpl(IRP, A) {}
1796 
1797   /// See AbstractAttribute::initialize(...).
1798   void initialize(Attributor &A) override {
1799     AANoRecurseImpl::initialize(A);
1800     if (const Function *F = getAnchorScope())
1801       if (A.getInfoCache().getSccSize(*F) != 1)
1802         indicatePessimisticFixpoint();
1803   }
1804 
1805   /// See AbstractAttribute::updateImpl(...).
1806   ChangeStatus updateImpl(Attributor &A) override {
1807 
1808     // If all live call sites are known to be no-recurse, we are as well.
1809     auto CallSitePred = [&](AbstractCallSite ACS) {
1810       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1811           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1812           /* TrackDependence */ false, DepClassTy::OPTIONAL);
1813       return NoRecurseAA.isKnownNoRecurse();
1814     };
1815     bool AllCallSitesKnown;
1816     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1817       // If we know all call sites and all are known no-recurse, we are done.
1818       // If all known call sites, which might not be all that exist, are known
1819       // to be no-recurse, we are not done but we can continue to assume
1820       // no-recurse. If one of the call sites we have not visited will become
1821       // live, another update is triggered.
1822       if (AllCallSitesKnown)
1823         indicateOptimisticFixpoint();
1824       return ChangeStatus::UNCHANGED;
1825     }
1826 
1827     // If the above check does not hold anymore we look at the calls.
1828     auto CheckForNoRecurse = [&](Instruction &I) {
1829       const auto &CB = cast<CallBase>(I);
1830       if (CB.hasFnAttr(Attribute::NoRecurse))
1831         return true;
1832 
1833       const auto &NoRecurseAA =
1834           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB));
1835       if (!NoRecurseAA.isAssumedNoRecurse())
1836         return false;
1837 
1838       // Recursion to the same function
1839       if (CB.getCalledFunction() == getAnchorScope())
1840         return false;
1841 
1842       return true;
1843     };
1844 
1845     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1846       return indicatePessimisticFixpoint();
1847     return ChangeStatus::UNCHANGED;
1848   }
1849 
1850   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1851 };
1852 
1853 /// NoRecurse attribute deduction for a call sites.
1854 struct AANoRecurseCallSite final : AANoRecurseImpl {
1855   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1856       : AANoRecurseImpl(IRP, A) {}
1857 
1858   /// See AbstractAttribute::initialize(...).
1859   void initialize(Attributor &A) override {
1860     AANoRecurseImpl::initialize(A);
1861     Function *F = getAssociatedFunction();
1862     if (!F)
1863       indicatePessimisticFixpoint();
1864   }
1865 
1866   /// See AbstractAttribute::updateImpl(...).
1867   ChangeStatus updateImpl(Attributor &A) override {
1868     // TODO: Once we have call site specific value information we can provide
1869     //       call site specific liveness information and then it makes
1870     //       sense to specialize attributes for call sites arguments instead of
1871     //       redirecting requests to the callee argument.
1872     Function *F = getAssociatedFunction();
1873     const IRPosition &FnPos = IRPosition::function(*F);
1874     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1875     return clampStateAndIndicateChange(
1876         getState(),
1877         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1878   }
1879 
1880   /// See AbstractAttribute::trackStatistics()
1881   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1882 };
1883 
1884 /// -------------------- Undefined-Behavior Attributes ------------------------
1885 
1886 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1887   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1888       : AAUndefinedBehavior(IRP, A) {}
1889 
1890   /// See AbstractAttribute::updateImpl(...).
1891   // through a pointer (i.e. also branches etc.)
1892   ChangeStatus updateImpl(Attributor &A) override {
1893     const size_t UBPrevSize = KnownUBInsts.size();
1894     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1895 
1896     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1897       // Skip instructions that are already saved.
1898       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1899         return true;
1900 
1901       // If we reach here, we know we have an instruction
1902       // that accesses memory through a pointer operand,
1903       // for which getPointerOperand() should give it to us.
1904       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1905       assert(PtrOp &&
1906              "Expected pointer operand of memory accessing instruction");
1907 
1908       // Either we stopped and the appropriate action was taken,
1909       // or we got back a simplified value to continue.
1910       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1911       if (!SimplifiedPtrOp.hasValue())
1912         return true;
1913       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1914 
1915       // A memory access through a pointer is considered UB
1916       // only if the pointer has constant null value.
1917       // TODO: Expand it to not only check constant values.
1918       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1919         AssumedNoUBInsts.insert(&I);
1920         return true;
1921       }
1922       const Type *PtrTy = PtrOpVal->getType();
1923 
1924       // Because we only consider instructions inside functions,
1925       // assume that a parent function exists.
1926       const Function *F = I.getFunction();
1927 
1928       // A memory access using constant null pointer is only considered UB
1929       // if null pointer is _not_ defined for the target platform.
1930       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1931         AssumedNoUBInsts.insert(&I);
1932       else
1933         KnownUBInsts.insert(&I);
1934       return true;
1935     };
1936 
1937     auto InspectBrInstForUB = [&](Instruction &I) {
1938       // A conditional branch instruction is considered UB if it has `undef`
1939       // condition.
1940 
1941       // Skip instructions that are already saved.
1942       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1943         return true;
1944 
1945       // We know we have a branch instruction.
1946       auto BrInst = cast<BranchInst>(&I);
1947 
1948       // Unconditional branches are never considered UB.
1949       if (BrInst->isUnconditional())
1950         return true;
1951 
1952       // Either we stopped and the appropriate action was taken,
1953       // or we got back a simplified value to continue.
1954       Optional<Value *> SimplifiedCond =
1955           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1956       if (!SimplifiedCond.hasValue())
1957         return true;
1958       AssumedNoUBInsts.insert(&I);
1959       return true;
1960     };
1961 
1962     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
1963                               {Instruction::Load, Instruction::Store,
1964                                Instruction::AtomicCmpXchg,
1965                                Instruction::AtomicRMW},
1966                               /* CheckBBLivenessOnly */ true);
1967     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
1968                               /* CheckBBLivenessOnly */ true);
1969     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
1970         UBPrevSize != KnownUBInsts.size())
1971       return ChangeStatus::CHANGED;
1972     return ChangeStatus::UNCHANGED;
1973   }
1974 
1975   bool isKnownToCauseUB(Instruction *I) const override {
1976     return KnownUBInsts.count(I);
1977   }
1978 
1979   bool isAssumedToCauseUB(Instruction *I) const override {
1980     // In simple words, if an instruction is not in the assumed to _not_
1981     // cause UB, then it is assumed UB (that includes those
1982     // in the KnownUBInsts set). The rest is boilerplate
1983     // is to ensure that it is one of the instructions we test
1984     // for UB.
1985 
1986     switch (I->getOpcode()) {
1987     case Instruction::Load:
1988     case Instruction::Store:
1989     case Instruction::AtomicCmpXchg:
1990     case Instruction::AtomicRMW:
1991       return !AssumedNoUBInsts.count(I);
1992     case Instruction::Br: {
1993       auto BrInst = cast<BranchInst>(I);
1994       if (BrInst->isUnconditional())
1995         return false;
1996       return !AssumedNoUBInsts.count(I);
1997     } break;
1998     default:
1999       return false;
2000     }
2001     return false;
2002   }
2003 
2004   ChangeStatus manifest(Attributor &A) override {
2005     if (KnownUBInsts.empty())
2006       return ChangeStatus::UNCHANGED;
2007     for (Instruction *I : KnownUBInsts)
2008       A.changeToUnreachableAfterManifest(I);
2009     return ChangeStatus::CHANGED;
2010   }
2011 
2012   /// See AbstractAttribute::getAsStr()
2013   const std::string getAsStr() const override {
2014     return getAssumed() ? "undefined-behavior" : "no-ub";
2015   }
2016 
2017   /// Note: The correctness of this analysis depends on the fact that the
2018   /// following 2 sets will stop changing after some point.
2019   /// "Change" here means that their size changes.
2020   /// The size of each set is monotonically increasing
2021   /// (we only add items to them) and it is upper bounded by the number of
2022   /// instructions in the processed function (we can never save more
2023   /// elements in either set than this number). Hence, at some point,
2024   /// they will stop increasing.
2025   /// Consequently, at some point, both sets will have stopped
2026   /// changing, effectively making the analysis reach a fixpoint.
2027 
2028   /// Note: These 2 sets are disjoint and an instruction can be considered
2029   /// one of 3 things:
2030   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2031   ///    the KnownUBInsts set.
2032   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2033   ///    has a reason to assume it).
2034   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2035   ///    could not find a reason to assume or prove that it can cause UB,
2036   ///    hence it assumes it doesn't. We have a set for these instructions
2037   ///    so that we don't reprocess them in every update.
2038   ///    Note however that instructions in this set may cause UB.
2039 
2040 protected:
2041   /// A set of all live instructions _known_ to cause UB.
2042   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2043 
2044 private:
2045   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2046   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2047 
2048   // Should be called on updates in which if we're processing an instruction
2049   // \p I that depends on a value \p V, one of the following has to happen:
2050   // - If the value is assumed, then stop.
2051   // - If the value is known but undef, then consider it UB.
2052   // - Otherwise, do specific processing with the simplified value.
2053   // We return None in the first 2 cases to signify that an appropriate
2054   // action was taken and the caller should stop.
2055   // Otherwise, we return the simplified value that the caller should
2056   // use for specific processing.
2057   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2058                                          Instruction *I) {
2059     const auto &ValueSimplifyAA =
2060         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2061     Optional<Value *> SimplifiedV =
2062         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2063     if (!ValueSimplifyAA.isKnown()) {
2064       // Don't depend on assumed values.
2065       return llvm::None;
2066     }
2067     if (!SimplifiedV.hasValue()) {
2068       // If it is known (which we tested above) but it doesn't have a value,
2069       // then we can assume `undef` and hence the instruction is UB.
2070       KnownUBInsts.insert(I);
2071       return llvm::None;
2072     }
2073     Value *Val = SimplifiedV.getValue();
2074     if (isa<UndefValue>(Val)) {
2075       KnownUBInsts.insert(I);
2076       return llvm::None;
2077     }
2078     return Val;
2079   }
2080 };
2081 
2082 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2083   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2084       : AAUndefinedBehaviorImpl(IRP, A) {}
2085 
2086   /// See AbstractAttribute::trackStatistics()
2087   void trackStatistics() const override {
2088     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2089                "Number of instructions known to have UB");
2090     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2091         KnownUBInsts.size();
2092   }
2093 };
2094 
2095 /// ------------------------ Will-Return Attributes ----------------------------
2096 
2097 // Helper function that checks whether a function has any cycle which we don't
2098 // know if it is bounded or not.
2099 // Loops with maximum trip count are considered bounded, any other cycle not.
2100 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2101   ScalarEvolution *SE =
2102       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2103   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2104   // If either SCEV or LoopInfo is not available for the function then we assume
2105   // any cycle to be unbounded cycle.
2106   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2107   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2108   if (!SE || !LI) {
2109     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2110       if (SCCI.hasCycle())
2111         return true;
2112     return false;
2113   }
2114 
2115   // If there's irreducible control, the function may contain non-loop cycles.
2116   if (mayContainIrreducibleControl(F, LI))
2117     return true;
2118 
2119   // Any loop that does not have a max trip count is considered unbounded cycle.
2120   for (auto *L : LI->getLoopsInPreorder()) {
2121     if (!SE->getSmallConstantMaxTripCount(L))
2122       return true;
2123   }
2124   return false;
2125 }
2126 
2127 struct AAWillReturnImpl : public AAWillReturn {
2128   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2129       : AAWillReturn(IRP, A) {}
2130 
2131   /// See AbstractAttribute::initialize(...).
2132   void initialize(Attributor &A) override {
2133     AAWillReturn::initialize(A);
2134 
2135     Function *F = getAnchorScope();
2136     if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A))
2137       indicatePessimisticFixpoint();
2138   }
2139 
2140   /// See AbstractAttribute::updateImpl(...).
2141   ChangeStatus updateImpl(Attributor &A) override {
2142     auto CheckForWillReturn = [&](Instruction &I) {
2143       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2144       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2145       if (WillReturnAA.isKnownWillReturn())
2146         return true;
2147       if (!WillReturnAA.isAssumedWillReturn())
2148         return false;
2149       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2150       return NoRecurseAA.isAssumedNoRecurse();
2151     };
2152 
2153     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2154       return indicatePessimisticFixpoint();
2155 
2156     return ChangeStatus::UNCHANGED;
2157   }
2158 
2159   /// See AbstractAttribute::getAsStr()
2160   const std::string getAsStr() const override {
2161     return getAssumed() ? "willreturn" : "may-noreturn";
2162   }
2163 };
2164 
2165 struct AAWillReturnFunction final : AAWillReturnImpl {
2166   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2167       : AAWillReturnImpl(IRP, A) {}
2168 
2169   /// See AbstractAttribute::trackStatistics()
2170   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2171 };
2172 
2173 /// WillReturn attribute deduction for a call sites.
2174 struct AAWillReturnCallSite final : AAWillReturnImpl {
2175   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2176       : AAWillReturnImpl(IRP, A) {}
2177 
2178   /// See AbstractAttribute::initialize(...).
2179   void initialize(Attributor &A) override {
2180     AAWillReturnImpl::initialize(A);
2181     Function *F = getAssociatedFunction();
2182     if (!F)
2183       indicatePessimisticFixpoint();
2184   }
2185 
2186   /// See AbstractAttribute::updateImpl(...).
2187   ChangeStatus updateImpl(Attributor &A) override {
2188     // TODO: Once we have call site specific value information we can provide
2189     //       call site specific liveness information and then it makes
2190     //       sense to specialize attributes for call sites arguments instead of
2191     //       redirecting requests to the callee argument.
2192     Function *F = getAssociatedFunction();
2193     const IRPosition &FnPos = IRPosition::function(*F);
2194     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2195     return clampStateAndIndicateChange(
2196         getState(),
2197         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2198   }
2199 
2200   /// See AbstractAttribute::trackStatistics()
2201   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2202 };
2203 
2204 /// -------------------AAReachability Attribute--------------------------
2205 
2206 struct AAReachabilityImpl : AAReachability {
2207   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2208       : AAReachability(IRP, A) {}
2209 
2210   const std::string getAsStr() const override {
2211     // TODO: Return the number of reachable queries.
2212     return "reachable";
2213   }
2214 
2215   /// See AbstractAttribute::initialize(...).
2216   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2217 
2218   /// See AbstractAttribute::updateImpl(...).
2219   ChangeStatus updateImpl(Attributor &A) override {
2220     return indicatePessimisticFixpoint();
2221   }
2222 };
2223 
2224 struct AAReachabilityFunction final : public AAReachabilityImpl {
2225   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2226       : AAReachabilityImpl(IRP, A) {}
2227 
2228   /// See AbstractAttribute::trackStatistics()
2229   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2230 };
2231 
2232 /// ------------------------ NoAlias Argument Attribute ------------------------
2233 
2234 struct AANoAliasImpl : AANoAlias {
2235   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2236     assert(getAssociatedType()->isPointerTy() &&
2237            "Noalias is a pointer attribute");
2238   }
2239 
2240   const std::string getAsStr() const override {
2241     return getAssumed() ? "noalias" : "may-alias";
2242   }
2243 };
2244 
2245 /// NoAlias attribute for a floating value.
2246 struct AANoAliasFloating final : AANoAliasImpl {
2247   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2248       : AANoAliasImpl(IRP, A) {}
2249 
2250   /// See AbstractAttribute::initialize(...).
2251   void initialize(Attributor &A) override {
2252     AANoAliasImpl::initialize(A);
2253     Value *Val = &getAssociatedValue();
2254     do {
2255       CastInst *CI = dyn_cast<CastInst>(Val);
2256       if (!CI)
2257         break;
2258       Value *Base = CI->getOperand(0);
2259       if (Base->getNumUses() != 1)
2260         break;
2261       Val = Base;
2262     } while (true);
2263 
2264     if (!Val->getType()->isPointerTy()) {
2265       indicatePessimisticFixpoint();
2266       return;
2267     }
2268 
2269     if (isa<AllocaInst>(Val))
2270       indicateOptimisticFixpoint();
2271     else if (isa<ConstantPointerNull>(Val) &&
2272              !NullPointerIsDefined(getAnchorScope(),
2273                                    Val->getType()->getPointerAddressSpace()))
2274       indicateOptimisticFixpoint();
2275     else if (Val != &getAssociatedValue()) {
2276       const auto &ValNoAliasAA =
2277           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2278       if (ValNoAliasAA.isKnownNoAlias())
2279         indicateOptimisticFixpoint();
2280     }
2281   }
2282 
2283   /// See AbstractAttribute::updateImpl(...).
2284   ChangeStatus updateImpl(Attributor &A) override {
2285     // TODO: Implement this.
2286     return indicatePessimisticFixpoint();
2287   }
2288 
2289   /// See AbstractAttribute::trackStatistics()
2290   void trackStatistics() const override {
2291     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2292   }
2293 };
2294 
2295 /// NoAlias attribute for an argument.
2296 struct AANoAliasArgument final
2297     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2298   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2299   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2300 
2301   /// See AbstractAttribute::initialize(...).
2302   void initialize(Attributor &A) override {
2303     Base::initialize(A);
2304     // See callsite argument attribute and callee argument attribute.
2305     if (hasAttr({Attribute::ByVal}))
2306       indicateOptimisticFixpoint();
2307   }
2308 
2309   /// See AbstractAttribute::update(...).
2310   ChangeStatus updateImpl(Attributor &A) override {
2311     // We have to make sure no-alias on the argument does not break
2312     // synchronization when this is a callback argument, see also [1] below.
2313     // If synchronization cannot be affected, we delegate to the base updateImpl
2314     // function, otherwise we give up for now.
2315 
2316     // If the function is no-sync, no-alias cannot break synchronization.
2317     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2318         *this, IRPosition::function_scope(getIRPosition()));
2319     if (NoSyncAA.isAssumedNoSync())
2320       return Base::updateImpl(A);
2321 
2322     // If the argument is read-only, no-alias cannot break synchronization.
2323     const auto &MemBehaviorAA =
2324         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2325     if (MemBehaviorAA.isAssumedReadOnly())
2326       return Base::updateImpl(A);
2327 
2328     // If the argument is never passed through callbacks, no-alias cannot break
2329     // synchronization.
2330     bool AllCallSitesKnown;
2331     if (A.checkForAllCallSites(
2332             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2333             true, AllCallSitesKnown))
2334       return Base::updateImpl(A);
2335 
2336     // TODO: add no-alias but make sure it doesn't break synchronization by
2337     // introducing fake uses. See:
2338     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2339     //     International Workshop on OpenMP 2018,
2340     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2341 
2342     return indicatePessimisticFixpoint();
2343   }
2344 
2345   /// See AbstractAttribute::trackStatistics()
2346   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2347 };
2348 
2349 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2350   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2351       : AANoAliasImpl(IRP, A) {}
2352 
2353   /// See AbstractAttribute::initialize(...).
2354   void initialize(Attributor &A) override {
2355     // See callsite argument attribute and callee argument attribute.
2356     const auto &CB = cast<CallBase>(getAnchorValue());
2357     if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias))
2358       indicateOptimisticFixpoint();
2359     Value &Val = getAssociatedValue();
2360     if (isa<ConstantPointerNull>(Val) &&
2361         !NullPointerIsDefined(getAnchorScope(),
2362                               Val.getType()->getPointerAddressSpace()))
2363       indicateOptimisticFixpoint();
2364   }
2365 
2366   /// Determine if the underlying value may alias with the call site argument
2367   /// \p OtherArgNo of \p ICS (= the underlying call site).
2368   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2369                             const AAMemoryBehavior &MemBehaviorAA,
2370                             const CallBase &CB, unsigned OtherArgNo) {
2371     // We do not need to worry about aliasing with the underlying IRP.
2372     if (this->getArgNo() == (int)OtherArgNo)
2373       return false;
2374 
2375     // If it is not a pointer or pointer vector we do not alias.
2376     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2377     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2378       return false;
2379 
2380     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2381         *this, IRPosition::callsite_argument(CB, OtherArgNo),
2382         /* TrackDependence */ false);
2383 
2384     // If the argument is readnone, there is no read-write aliasing.
2385     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2386       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2387       return false;
2388     }
2389 
2390     // If the argument is readonly and the underlying value is readonly, there
2391     // is no read-write aliasing.
2392     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2393     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2394       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2395       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2396       return false;
2397     }
2398 
2399     // We have to utilize actual alias analysis queries so we need the object.
2400     if (!AAR)
2401       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2402 
2403     // Try to rule it out at the call site.
2404     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2405     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2406                          "callsite arguments: "
2407                       << getAssociatedValue() << " " << *ArgOp << " => "
2408                       << (IsAliasing ? "" : "no-") << "alias \n");
2409 
2410     return IsAliasing;
2411   }
2412 
2413   bool
2414   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2415                                          const AAMemoryBehavior &MemBehaviorAA,
2416                                          const AANoAlias &NoAliasAA) {
2417     // We can deduce "noalias" if the following conditions hold.
2418     // (i)   Associated value is assumed to be noalias in the definition.
2419     // (ii)  Associated value is assumed to be no-capture in all the uses
2420     //       possibly executed before this callsite.
2421     // (iii) There is no other pointer argument which could alias with the
2422     //       value.
2423 
2424     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2425     if (!AssociatedValueIsNoAliasAtDef) {
2426       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2427                         << " is not no-alias at the definition\n");
2428       return false;
2429     }
2430 
2431     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2432 
2433     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2434     auto &NoCaptureAA =
2435         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2436     // Check whether the value is captured in the scope using AANoCapture.
2437     //      Look at CFG and check only uses possibly executed before this
2438     //      callsite.
2439     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2440       Instruction *UserI = cast<Instruction>(U.getUser());
2441 
2442       // If user if curr instr and only use.
2443       if ((UserI == getCtxI()) && (UserI->getNumUses() == 1))
2444         return true;
2445 
2446       const Function *ScopeFn = VIRP.getAnchorScope();
2447       if (ScopeFn) {
2448         const auto &ReachabilityAA =
2449             A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn));
2450 
2451         if (!ReachabilityAA.isAssumedReachable(UserI, getCtxI()))
2452           return true;
2453 
2454         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2455           if (CB->isArgOperand(&U)) {
2456 
2457             unsigned ArgNo = CB->getArgOperandNo(&U);
2458 
2459             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2460                 *this, IRPosition::callsite_argument(*CB, ArgNo));
2461 
2462             if (NoCaptureAA.isAssumedNoCapture())
2463               return true;
2464           }
2465         }
2466       }
2467 
2468       // For cases which can potentially have more users
2469       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2470           isa<SelectInst>(U)) {
2471         Follow = true;
2472         return true;
2473       }
2474 
2475       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2476       return false;
2477     };
2478 
2479     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2480       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2481         LLVM_DEBUG(
2482             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2483                    << " cannot be noalias as it is potentially captured\n");
2484         return false;
2485       }
2486     }
2487     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2488 
2489     // Check there is no other pointer argument which could alias with the
2490     // value passed at this call site.
2491     // TODO: AbstractCallSite
2492     const auto &CB = cast<CallBase>(getAnchorValue());
2493     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2494          OtherArgNo++)
2495       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2496         return false;
2497 
2498     return true;
2499   }
2500 
2501   /// See AbstractAttribute::updateImpl(...).
2502   ChangeStatus updateImpl(Attributor &A) override {
2503     // If the argument is readnone we are done as there are no accesses via the
2504     // argument.
2505     auto &MemBehaviorAA =
2506         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2507                                      /* TrackDependence */ false);
2508     if (MemBehaviorAA.isAssumedReadNone()) {
2509       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2510       return ChangeStatus::UNCHANGED;
2511     }
2512 
2513     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2514     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2515                                                   /* TrackDependence */ false);
2516 
2517     AAResults *AAR = nullptr;
2518     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2519                                                NoAliasAA)) {
2520       LLVM_DEBUG(
2521           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2522       return ChangeStatus::UNCHANGED;
2523     }
2524 
2525     return indicatePessimisticFixpoint();
2526   }
2527 
2528   /// See AbstractAttribute::trackStatistics()
2529   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2530 };
2531 
2532 /// NoAlias attribute for function return value.
2533 struct AANoAliasReturned final : AANoAliasImpl {
2534   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2535       : AANoAliasImpl(IRP, A) {}
2536 
2537   /// See AbstractAttribute::updateImpl(...).
2538   virtual ChangeStatus updateImpl(Attributor &A) override {
2539 
2540     auto CheckReturnValue = [&](Value &RV) -> bool {
2541       if (Constant *C = dyn_cast<Constant>(&RV))
2542         if (C->isNullValue() || isa<UndefValue>(C))
2543           return true;
2544 
2545       /// For now, we can only deduce noalias if we have call sites.
2546       /// FIXME: add more support.
2547       if (!isa<CallBase>(&RV))
2548         return false;
2549 
2550       const IRPosition &RVPos = IRPosition::value(RV);
2551       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2552       if (!NoAliasAA.isAssumedNoAlias())
2553         return false;
2554 
2555       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2556       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2557     };
2558 
2559     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2560       return indicatePessimisticFixpoint();
2561 
2562     return ChangeStatus::UNCHANGED;
2563   }
2564 
2565   /// See AbstractAttribute::trackStatistics()
2566   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2567 };
2568 
2569 /// NoAlias attribute deduction for a call site return value.
2570 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2571   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2572       : AANoAliasImpl(IRP, A) {}
2573 
2574   /// See AbstractAttribute::initialize(...).
2575   void initialize(Attributor &A) override {
2576     AANoAliasImpl::initialize(A);
2577     Function *F = getAssociatedFunction();
2578     if (!F)
2579       indicatePessimisticFixpoint();
2580   }
2581 
2582   /// See AbstractAttribute::updateImpl(...).
2583   ChangeStatus updateImpl(Attributor &A) override {
2584     // TODO: Once we have call site specific value information we can provide
2585     //       call site specific liveness information and then it makes
2586     //       sense to specialize attributes for call sites arguments instead of
2587     //       redirecting requests to the callee argument.
2588     Function *F = getAssociatedFunction();
2589     const IRPosition &FnPos = IRPosition::returned(*F);
2590     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2591     return clampStateAndIndicateChange(
2592         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2593   }
2594 
2595   /// See AbstractAttribute::trackStatistics()
2596   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2597 };
2598 
2599 /// -------------------AAIsDead Function Attribute-----------------------
2600 
2601 struct AAIsDeadValueImpl : public AAIsDead {
2602   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2603 
2604   /// See AAIsDead::isAssumedDead().
2605   bool isAssumedDead() const override { return getAssumed(); }
2606 
2607   /// See AAIsDead::isKnownDead().
2608   bool isKnownDead() const override { return getKnown(); }
2609 
2610   /// See AAIsDead::isAssumedDead(BasicBlock *).
2611   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2612 
2613   /// See AAIsDead::isKnownDead(BasicBlock *).
2614   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2615 
2616   /// See AAIsDead::isAssumedDead(Instruction *I).
2617   bool isAssumedDead(const Instruction *I) const override {
2618     return I == getCtxI() && isAssumedDead();
2619   }
2620 
2621   /// See AAIsDead::isKnownDead(Instruction *I).
2622   bool isKnownDead(const Instruction *I) const override {
2623     return isAssumedDead(I) && getKnown();
2624   }
2625 
2626   /// See AbstractAttribute::getAsStr().
2627   const std::string getAsStr() const override {
2628     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2629   }
2630 
2631   /// Check if all uses are assumed dead.
2632   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2633     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2634     // Explicitly set the dependence class to required because we want a long
2635     // chain of N dependent instructions to be considered live as soon as one is
2636     // without going through N update cycles. This is not required for
2637     // correctness.
2638     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2639   }
2640 
2641   /// Determine if \p I is assumed to be side-effect free.
2642   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2643     if (!I || wouldInstructionBeTriviallyDead(I))
2644       return true;
2645 
2646     auto *CB = dyn_cast<CallBase>(I);
2647     if (!CB || isa<IntrinsicInst>(CB))
2648       return false;
2649 
2650     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2651     const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, CallIRP);
2652     if (!NoUnwindAA.isAssumedNoUnwind())
2653       return false;
2654 
2655     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, CallIRP);
2656     if (!MemBehaviorAA.isAssumedReadOnly())
2657       return false;
2658 
2659     return true;
2660   }
2661 };
2662 
2663 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2664   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2665       : AAIsDeadValueImpl(IRP, A) {}
2666 
2667   /// See AbstractAttribute::initialize(...).
2668   void initialize(Attributor &A) override {
2669     if (isa<UndefValue>(getAssociatedValue())) {
2670       indicatePessimisticFixpoint();
2671       return;
2672     }
2673 
2674     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2675     if (!isAssumedSideEffectFree(A, I))
2676       indicatePessimisticFixpoint();
2677   }
2678 
2679   /// See AbstractAttribute::updateImpl(...).
2680   ChangeStatus updateImpl(Attributor &A) override {
2681     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2682     if (!isAssumedSideEffectFree(A, I))
2683       return indicatePessimisticFixpoint();
2684 
2685     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2686       return indicatePessimisticFixpoint();
2687     return ChangeStatus::UNCHANGED;
2688   }
2689 
2690   /// See AbstractAttribute::manifest(...).
2691   ChangeStatus manifest(Attributor &A) override {
2692     Value &V = getAssociatedValue();
2693     if (auto *I = dyn_cast<Instruction>(&V)) {
2694       // If we get here we basically know the users are all dead. We check if
2695       // isAssumedSideEffectFree returns true here again because it might not be
2696       // the case and only the users are dead but the instruction (=call) is
2697       // still needed.
2698       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2699         A.deleteAfterManifest(*I);
2700         return ChangeStatus::CHANGED;
2701       }
2702     }
2703     if (V.use_empty())
2704       return ChangeStatus::UNCHANGED;
2705 
2706     bool UsedAssumedInformation = false;
2707     Optional<Constant *> C =
2708         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2709     if (C.hasValue() && C.getValue())
2710       return ChangeStatus::UNCHANGED;
2711 
2712     // Replace the value with undef as it is dead but keep droppable uses around
2713     // as they provide information we don't want to give up on just yet.
2714     UndefValue &UV = *UndefValue::get(V.getType());
2715     bool AnyChange =
2716         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2717     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2718   }
2719 
2720   /// See AbstractAttribute::trackStatistics()
2721   void trackStatistics() const override {
2722     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2723   }
2724 };
2725 
2726 struct AAIsDeadArgument : public AAIsDeadFloating {
2727   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2728       : AAIsDeadFloating(IRP, A) {}
2729 
2730   /// See AbstractAttribute::initialize(...).
2731   void initialize(Attributor &A) override {
2732     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2733       indicatePessimisticFixpoint();
2734   }
2735 
2736   /// See AbstractAttribute::manifest(...).
2737   ChangeStatus manifest(Attributor &A) override {
2738     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2739     Argument &Arg = *getAssociatedArgument();
2740     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2741       if (A.registerFunctionSignatureRewrite(
2742               Arg, /* ReplacementTypes */ {},
2743               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2744               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2745         Arg.dropDroppableUses();
2746         return ChangeStatus::CHANGED;
2747       }
2748     return Changed;
2749   }
2750 
2751   /// See AbstractAttribute::trackStatistics()
2752   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2753 };
2754 
2755 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2756   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2757       : AAIsDeadValueImpl(IRP, A) {}
2758 
2759   /// See AbstractAttribute::initialize(...).
2760   void initialize(Attributor &A) override {
2761     if (isa<UndefValue>(getAssociatedValue()))
2762       indicatePessimisticFixpoint();
2763   }
2764 
2765   /// See AbstractAttribute::updateImpl(...).
2766   ChangeStatus updateImpl(Attributor &A) override {
2767     // TODO: Once we have call site specific value information we can provide
2768     //       call site specific liveness information and then it makes
2769     //       sense to specialize attributes for call sites arguments instead of
2770     //       redirecting requests to the callee argument.
2771     Argument *Arg = getAssociatedArgument();
2772     if (!Arg)
2773       return indicatePessimisticFixpoint();
2774     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2775     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2776     return clampStateAndIndicateChange(
2777         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2778   }
2779 
2780   /// See AbstractAttribute::manifest(...).
2781   ChangeStatus manifest(Attributor &A) override {
2782     CallBase &CB = cast<CallBase>(getAnchorValue());
2783     Use &U = CB.getArgOperandUse(getArgNo());
2784     assert(!isa<UndefValue>(U.get()) &&
2785            "Expected undef values to be filtered out!");
2786     UndefValue &UV = *UndefValue::get(U->getType());
2787     if (A.changeUseAfterManifest(U, UV))
2788       return ChangeStatus::CHANGED;
2789     return ChangeStatus::UNCHANGED;
2790   }
2791 
2792   /// See AbstractAttribute::trackStatistics()
2793   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2794 };
2795 
2796 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2797   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2798       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2799 
2800   /// See AAIsDead::isAssumedDead().
2801   bool isAssumedDead() const override {
2802     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2803   }
2804 
2805   /// See AbstractAttribute::initialize(...).
2806   void initialize(Attributor &A) override {
2807     if (isa<UndefValue>(getAssociatedValue())) {
2808       indicatePessimisticFixpoint();
2809       return;
2810     }
2811 
2812     // We track this separately as a secondary state.
2813     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2814   }
2815 
2816   /// See AbstractAttribute::updateImpl(...).
2817   ChangeStatus updateImpl(Attributor &A) override {
2818     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2819     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2820       IsAssumedSideEffectFree = false;
2821       Changed = ChangeStatus::CHANGED;
2822     }
2823 
2824     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2825       return indicatePessimisticFixpoint();
2826     return Changed;
2827   }
2828 
2829   /// See AbstractAttribute::trackStatistics()
2830   void trackStatistics() const override {
2831     if (IsAssumedSideEffectFree)
2832       STATS_DECLTRACK_CSRET_ATTR(IsDead)
2833     else
2834       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
2835   }
2836 
2837   /// See AbstractAttribute::getAsStr().
2838   const std::string getAsStr() const override {
2839     return isAssumedDead()
2840                ? "assumed-dead"
2841                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
2842   }
2843 
2844 private:
2845   bool IsAssumedSideEffectFree;
2846 };
2847 
2848 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2849   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
2850       : AAIsDeadValueImpl(IRP, A) {}
2851 
2852   /// See AbstractAttribute::updateImpl(...).
2853   ChangeStatus updateImpl(Attributor &A) override {
2854 
2855     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
2856                               {Instruction::Ret});
2857 
2858     auto PredForCallSite = [&](AbstractCallSite ACS) {
2859       if (ACS.isCallbackCall() || !ACS.getInstruction())
2860         return false;
2861       return areAllUsesAssumedDead(A, *ACS.getInstruction());
2862     };
2863 
2864     bool AllCallSitesKnown;
2865     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
2866                                 AllCallSitesKnown))
2867       return indicatePessimisticFixpoint();
2868 
2869     return ChangeStatus::UNCHANGED;
2870   }
2871 
2872   /// See AbstractAttribute::manifest(...).
2873   ChangeStatus manifest(Attributor &A) override {
2874     // TODO: Rewrite the signature to return void?
2875     bool AnyChange = false;
2876     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2877     auto RetInstPred = [&](Instruction &I) {
2878       ReturnInst &RI = cast<ReturnInst>(I);
2879       if (!isa<UndefValue>(RI.getReturnValue()))
2880         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2881       return true;
2882     };
2883     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2884     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2885   }
2886 
2887   /// See AbstractAttribute::trackStatistics()
2888   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2889 };
2890 
2891 struct AAIsDeadFunction : public AAIsDead {
2892   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2893 
2894   /// See AbstractAttribute::initialize(...).
2895   void initialize(Attributor &A) override {
2896     const Function *F = getAnchorScope();
2897     if (F && !F->isDeclaration()) {
2898       ToBeExploredFrom.insert(&F->getEntryBlock().front());
2899       assumeLive(A, F->getEntryBlock());
2900     }
2901   }
2902 
2903   /// See AbstractAttribute::getAsStr().
2904   const std::string getAsStr() const override {
2905     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2906            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
2907            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2908            std::to_string(KnownDeadEnds.size()) + "]";
2909   }
2910 
2911   /// See AbstractAttribute::manifest(...).
2912   ChangeStatus manifest(Attributor &A) override {
2913     assert(getState().isValidState() &&
2914            "Attempted to manifest an invalid state!");
2915 
2916     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2917     Function &F = *getAnchorScope();
2918 
2919     if (AssumedLiveBlocks.empty()) {
2920       A.deleteAfterManifest(F);
2921       return ChangeStatus::CHANGED;
2922     }
2923 
2924     // Flag to determine if we can change an invoke to a call assuming the
2925     // callee is nounwind. This is not possible if the personality of the
2926     // function allows to catch asynchronous exceptions.
2927     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2928 
2929     KnownDeadEnds.set_union(ToBeExploredFrom);
2930     for (const Instruction *DeadEndI : KnownDeadEnds) {
2931       auto *CB = dyn_cast<CallBase>(DeadEndI);
2932       if (!CB)
2933         continue;
2934       const auto &NoReturnAA =
2935           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
2936       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2937       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2938         continue;
2939 
2940       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
2941         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
2942       else
2943         A.changeToUnreachableAfterManifest(
2944             const_cast<Instruction *>(DeadEndI->getNextNode()));
2945       HasChanged = ChangeStatus::CHANGED;
2946     }
2947 
2948     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
2949     for (BasicBlock &BB : F)
2950       if (!AssumedLiveBlocks.count(&BB)) {
2951         A.deleteAfterManifest(BB);
2952         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
2953       }
2954 
2955     return HasChanged;
2956   }
2957 
2958   /// See AbstractAttribute::updateImpl(...).
2959   ChangeStatus updateImpl(Attributor &A) override;
2960 
2961   /// See AbstractAttribute::trackStatistics()
2962   void trackStatistics() const override {}
2963 
2964   /// Returns true if the function is assumed dead.
2965   bool isAssumedDead() const override { return false; }
2966 
2967   /// See AAIsDead::isKnownDead().
2968   bool isKnownDead() const override { return false; }
2969 
2970   /// See AAIsDead::isAssumedDead(BasicBlock *).
2971   bool isAssumedDead(const BasicBlock *BB) const override {
2972     assert(BB->getParent() == getAnchorScope() &&
2973            "BB must be in the same anchor scope function.");
2974 
2975     if (!getAssumed())
2976       return false;
2977     return !AssumedLiveBlocks.count(BB);
2978   }
2979 
2980   /// See AAIsDead::isKnownDead(BasicBlock *).
2981   bool isKnownDead(const BasicBlock *BB) const override {
2982     return getKnown() && isAssumedDead(BB);
2983   }
2984 
2985   /// See AAIsDead::isAssumed(Instruction *I).
2986   bool isAssumedDead(const Instruction *I) const override {
2987     assert(I->getParent()->getParent() == getAnchorScope() &&
2988            "Instruction must be in the same anchor scope function.");
2989 
2990     if (!getAssumed())
2991       return false;
2992 
2993     // If it is not in AssumedLiveBlocks then it for sure dead.
2994     // Otherwise, it can still be after noreturn call in a live block.
2995     if (!AssumedLiveBlocks.count(I->getParent()))
2996       return true;
2997 
2998     // If it is not after a liveness barrier it is live.
2999     const Instruction *PrevI = I->getPrevNode();
3000     while (PrevI) {
3001       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3002         return true;
3003       PrevI = PrevI->getPrevNode();
3004     }
3005     return false;
3006   }
3007 
3008   /// See AAIsDead::isKnownDead(Instruction *I).
3009   bool isKnownDead(const Instruction *I) const override {
3010     return getKnown() && isAssumedDead(I);
3011   }
3012 
3013   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3014   /// that internal function called from \p BB should now be looked at.
3015   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3016     if (!AssumedLiveBlocks.insert(&BB).second)
3017       return false;
3018 
3019     // We assume that all of BB is (probably) live now and if there are calls to
3020     // internal functions we will assume that those are now live as well. This
3021     // is a performance optimization for blocks with calls to a lot of internal
3022     // functions. It can however cause dead functions to be treated as live.
3023     for (const Instruction &I : BB)
3024       if (const auto *CB = dyn_cast<CallBase>(&I))
3025         if (const Function *F = CB->getCalledFunction())
3026           if (F->hasLocalLinkage())
3027             A.markLiveInternalFunction(*F);
3028     return true;
3029   }
3030 
3031   /// Collection of instructions that need to be explored again, e.g., we
3032   /// did assume they do not transfer control to (one of their) successors.
3033   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3034 
3035   /// Collection of instructions that are known to not transfer control.
3036   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3037 
3038   /// Collection of all assumed live BasicBlocks.
3039   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3040 };
3041 
3042 static bool
3043 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3044                         AbstractAttribute &AA,
3045                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3046   const IRPosition &IPos = IRPosition::callsite_function(CB);
3047 
3048   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
3049   if (NoReturnAA.isAssumedNoReturn())
3050     return !NoReturnAA.isKnownNoReturn();
3051   if (CB.isTerminator())
3052     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3053   else
3054     AliveSuccessors.push_back(CB.getNextNode());
3055   return false;
3056 }
3057 
3058 static bool
3059 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3060                         AbstractAttribute &AA,
3061                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3062   bool UsedAssumedInformation =
3063       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3064 
3065   // First, determine if we can change an invoke to a call assuming the
3066   // callee is nounwind. This is not possible if the personality of the
3067   // function allows to catch asynchronous exceptions.
3068   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3069     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3070   } else {
3071     const IRPosition &IPos = IRPosition::callsite_function(II);
3072     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
3073     if (AANoUnw.isAssumedNoUnwind()) {
3074       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3075     } else {
3076       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3077     }
3078   }
3079   return UsedAssumedInformation;
3080 }
3081 
3082 static bool
3083 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3084                         AbstractAttribute &AA,
3085                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3086   bool UsedAssumedInformation = false;
3087   if (BI.getNumSuccessors() == 1) {
3088     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3089   } else {
3090     Optional<ConstantInt *> CI = getAssumedConstantInt(
3091         A, *BI.getCondition(), AA, UsedAssumedInformation);
3092     if (!CI.hasValue()) {
3093       // No value yet, assume both edges are dead.
3094     } else if (CI.getValue()) {
3095       const BasicBlock *SuccBB =
3096           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3097       AliveSuccessors.push_back(&SuccBB->front());
3098     } else {
3099       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3100       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3101       UsedAssumedInformation = false;
3102     }
3103   }
3104   return UsedAssumedInformation;
3105 }
3106 
3107 static bool
3108 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3109                         AbstractAttribute &AA,
3110                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3111   bool UsedAssumedInformation = false;
3112   Optional<ConstantInt *> CI =
3113       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3114   if (!CI.hasValue()) {
3115     // No value yet, assume all edges are dead.
3116   } else if (CI.getValue()) {
3117     for (auto &CaseIt : SI.cases()) {
3118       if (CaseIt.getCaseValue() == CI.getValue()) {
3119         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3120         return UsedAssumedInformation;
3121       }
3122     }
3123     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3124     return UsedAssumedInformation;
3125   } else {
3126     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3127       AliveSuccessors.push_back(&SuccBB->front());
3128   }
3129   return UsedAssumedInformation;
3130 }
3131 
3132 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3133   ChangeStatus Change = ChangeStatus::UNCHANGED;
3134 
3135   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3136                     << getAnchorScope()->size() << "] BBs and "
3137                     << ToBeExploredFrom.size() << " exploration points and "
3138                     << KnownDeadEnds.size() << " known dead ends\n");
3139 
3140   // Copy and clear the list of instructions we need to explore from. It is
3141   // refilled with instructions the next update has to look at.
3142   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3143                                                ToBeExploredFrom.end());
3144   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3145 
3146   SmallVector<const Instruction *, 8> AliveSuccessors;
3147   while (!Worklist.empty()) {
3148     const Instruction *I = Worklist.pop_back_val();
3149     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3150 
3151     AliveSuccessors.clear();
3152 
3153     bool UsedAssumedInformation = false;
3154     switch (I->getOpcode()) {
3155     // TODO: look for (assumed) UB to backwards propagate "deadness".
3156     default:
3157       if (I->isTerminator()) {
3158         for (const BasicBlock *SuccBB : successors(I->getParent()))
3159           AliveSuccessors.push_back(&SuccBB->front());
3160       } else {
3161         AliveSuccessors.push_back(I->getNextNode());
3162       }
3163       break;
3164     case Instruction::Call:
3165       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3166                                                        *this, AliveSuccessors);
3167       break;
3168     case Instruction::Invoke:
3169       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3170                                                        *this, AliveSuccessors);
3171       break;
3172     case Instruction::Br:
3173       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3174                                                        *this, AliveSuccessors);
3175       break;
3176     case Instruction::Switch:
3177       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3178                                                        *this, AliveSuccessors);
3179       break;
3180     }
3181 
3182     if (UsedAssumedInformation) {
3183       NewToBeExploredFrom.insert(I);
3184     } else {
3185       Change = ChangeStatus::CHANGED;
3186       if (AliveSuccessors.empty() ||
3187           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3188         KnownDeadEnds.insert(I);
3189     }
3190 
3191     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3192                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3193                       << UsedAssumedInformation << "\n");
3194 
3195     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3196       if (!I->isTerminator()) {
3197         assert(AliveSuccessors.size() == 1 &&
3198                "Non-terminator expected to have a single successor!");
3199         Worklist.push_back(AliveSuccessor);
3200       } else {
3201         if (assumeLive(A, *AliveSuccessor->getParent()))
3202           Worklist.push_back(AliveSuccessor);
3203       }
3204     }
3205   }
3206 
3207   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3208 
3209   // If we know everything is live there is no need to query for liveness.
3210   // Instead, indicating a pessimistic fixpoint will cause the state to be
3211   // "invalid" and all queries to be answered conservatively without lookups.
3212   // To be in this state we have to (1) finished the exploration and (3) not
3213   // discovered any non-trivial dead end and (2) not ruled unreachable code
3214   // dead.
3215   if (ToBeExploredFrom.empty() &&
3216       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3217       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3218         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3219       }))
3220     return indicatePessimisticFixpoint();
3221   return Change;
3222 }
3223 
3224 /// Liveness information for a call sites.
3225 struct AAIsDeadCallSite final : AAIsDeadFunction {
3226   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3227       : AAIsDeadFunction(IRP, A) {}
3228 
3229   /// See AbstractAttribute::initialize(...).
3230   void initialize(Attributor &A) override {
3231     // TODO: Once we have call site specific value information we can provide
3232     //       call site specific liveness information and then it makes
3233     //       sense to specialize attributes for call sites instead of
3234     //       redirecting requests to the callee.
3235     llvm_unreachable("Abstract attributes for liveness are not "
3236                      "supported for call sites yet!");
3237   }
3238 
3239   /// See AbstractAttribute::updateImpl(...).
3240   ChangeStatus updateImpl(Attributor &A) override {
3241     return indicatePessimisticFixpoint();
3242   }
3243 
3244   /// See AbstractAttribute::trackStatistics()
3245   void trackStatistics() const override {}
3246 };
3247 
3248 /// -------------------- Dereferenceable Argument Attribute --------------------
3249 
3250 template <>
3251 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3252                                                      const DerefState &R) {
3253   ChangeStatus CS0 =
3254       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3255   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3256   return CS0 | CS1;
3257 }
3258 
3259 struct AADereferenceableImpl : AADereferenceable {
3260   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3261       : AADereferenceable(IRP, A) {}
3262   using StateType = DerefState;
3263 
3264   void initialize(Attributor &A) override {
3265     SmallVector<Attribute, 4> Attrs;
3266     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3267              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3268     for (const Attribute &Attr : Attrs)
3269       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3270 
3271     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition(),
3272                                        /* TrackDependence */ false);
3273 
3274     const IRPosition &IRP = this->getIRPosition();
3275     bool IsFnInterface = IRP.isFnInterfaceKind();
3276     Function *FnScope = IRP.getAnchorScope();
3277     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope)))
3278       indicatePessimisticFixpoint();
3279   }
3280 
3281   /// See AbstractAttribute::getState()
3282   /// {
3283   StateType &getState() override { return *this; }
3284   const StateType &getState() const override { return *this; }
3285   /// }
3286 
3287   /// Helper function for collecting accessed bytes in must-be-executed-context
3288   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3289                               DerefState &State) {
3290     const Value *UseV = U->get();
3291     if (!UseV->getType()->isPointerTy())
3292       return;
3293 
3294     Type *PtrTy = UseV->getType();
3295     const DataLayout &DL = A.getDataLayout();
3296     int64_t Offset;
3297     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3298             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3299       if (Base == &getAssociatedValue() &&
3300           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3301         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3302         State.addAccessedBytes(Offset, Size);
3303       }
3304     }
3305     return;
3306   }
3307 
3308   /// See AAFromMustBeExecutedContext
3309   bool followUse(Attributor &A, const Use *U, const Instruction *I,
3310                  AADereferenceable::StateType &State) {
3311     bool IsNonNull = false;
3312     bool TrackUse = false;
3313     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3314         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3315 
3316     addAccessedBytesForUse(A, U, I, State);
3317     State.takeKnownDerefBytesMaximum(DerefBytes);
3318     return TrackUse;
3319   }
3320 
3321   /// See AbstractAttribute::manifest(...).
3322   ChangeStatus manifest(Attributor &A) override {
3323     ChangeStatus Change = AADereferenceable::manifest(A);
3324     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3325       removeAttrs({Attribute::DereferenceableOrNull});
3326       return ChangeStatus::CHANGED;
3327     }
3328     return Change;
3329   }
3330 
3331   void getDeducedAttributes(LLVMContext &Ctx,
3332                             SmallVectorImpl<Attribute> &Attrs) const override {
3333     // TODO: Add *_globally support
3334     if (isAssumedNonNull())
3335       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3336           Ctx, getAssumedDereferenceableBytes()));
3337     else
3338       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3339           Ctx, getAssumedDereferenceableBytes()));
3340   }
3341 
3342   /// See AbstractAttribute::getAsStr().
3343   const std::string getAsStr() const override {
3344     if (!getAssumedDereferenceableBytes())
3345       return "unknown-dereferenceable";
3346     return std::string("dereferenceable") +
3347            (isAssumedNonNull() ? "" : "_or_null") +
3348            (isAssumedGlobal() ? "_globally" : "") + "<" +
3349            std::to_string(getKnownDereferenceableBytes()) + "-" +
3350            std::to_string(getAssumedDereferenceableBytes()) + ">";
3351   }
3352 };
3353 
3354 /// Dereferenceable attribute for a floating value.
3355 struct AADereferenceableFloating
3356     : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
3357   using Base =
3358       AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
3359   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3360       : Base(IRP, A) {}
3361 
3362   /// See AbstractAttribute::updateImpl(...).
3363   ChangeStatus updateImpl(Attributor &A) override {
3364     ChangeStatus Change = Base::updateImpl(A);
3365 
3366     const DataLayout &DL = A.getDataLayout();
3367 
3368     auto VisitValueCB = [&](Value &V, const Instruction *, DerefState &T,
3369                             bool Stripped) -> bool {
3370       unsigned IdxWidth =
3371           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3372       APInt Offset(IdxWidth, 0);
3373       const Value *Base =
3374           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3375 
3376       const auto &AA =
3377           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3378       int64_t DerefBytes = 0;
3379       if (!Stripped && this == &AA) {
3380         // Use IR information if we did not strip anything.
3381         // TODO: track globally.
3382         bool CanBeNull;
3383         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3384         T.GlobalState.indicatePessimisticFixpoint();
3385       } else {
3386         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3387         DerefBytes = DS.DerefBytesState.getAssumed();
3388         T.GlobalState &= DS.GlobalState;
3389       }
3390 
3391       // TODO: Use `AAConstantRange` to infer dereferenceable bytes.
3392 
3393       // For now we do not try to "increase" dereferenceability due to negative
3394       // indices as we first have to come up with code to deal with loops and
3395       // for overflows of the dereferenceable bytes.
3396       int64_t OffsetSExt = Offset.getSExtValue();
3397       if (OffsetSExt < 0)
3398         OffsetSExt = 0;
3399 
3400       T.takeAssumedDerefBytesMinimum(
3401           std::max(int64_t(0), DerefBytes - OffsetSExt));
3402 
3403       if (this == &AA) {
3404         if (!Stripped) {
3405           // If nothing was stripped IR information is all we got.
3406           T.takeKnownDerefBytesMaximum(
3407               std::max(int64_t(0), DerefBytes - OffsetSExt));
3408           T.indicatePessimisticFixpoint();
3409         } else if (OffsetSExt > 0) {
3410           // If something was stripped but there is circular reasoning we look
3411           // for the offset. If it is positive we basically decrease the
3412           // dereferenceable bytes in a circluar loop now, which will simply
3413           // drive them down to the known value in a very slow way which we
3414           // can accelerate.
3415           T.indicatePessimisticFixpoint();
3416         }
3417       }
3418 
3419       return T.isValidState();
3420     };
3421 
3422     DerefState T;
3423     if (!genericValueTraversal<AADereferenceable, DerefState>(
3424             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3425       return indicatePessimisticFixpoint();
3426 
3427     return Change | clampStateAndIndicateChange(getState(), T);
3428   }
3429 
3430   /// See AbstractAttribute::trackStatistics()
3431   void trackStatistics() const override {
3432     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3433   }
3434 };
3435 
3436 /// Dereferenceable attribute for a return value.
3437 struct AADereferenceableReturned final
3438     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3439   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3440       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3441             IRP, A) {}
3442 
3443   /// See AbstractAttribute::trackStatistics()
3444   void trackStatistics() const override {
3445     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3446   }
3447 };
3448 
3449 /// Dereferenceable attribute for an argument
3450 struct AADereferenceableArgument final
3451     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3452           AADereferenceable, AADereferenceableImpl> {
3453   using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3454       AADereferenceable, AADereferenceableImpl>;
3455   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3456       : Base(IRP, A) {}
3457 
3458   /// See AbstractAttribute::trackStatistics()
3459   void trackStatistics() const override {
3460     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3461   }
3462 };
3463 
3464 /// Dereferenceable attribute for a call site argument.
3465 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3466   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3467       : AADereferenceableFloating(IRP, A) {}
3468 
3469   /// See AbstractAttribute::trackStatistics()
3470   void trackStatistics() const override {
3471     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3472   }
3473 };
3474 
3475 /// Dereferenceable attribute deduction for a call site return value.
3476 struct AADereferenceableCallSiteReturned final
3477     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3478           AADereferenceable, AADereferenceableImpl> {
3479   using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3480       AADereferenceable, AADereferenceableImpl>;
3481   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3482       : Base(IRP, A) {}
3483 
3484   /// See AbstractAttribute::trackStatistics()
3485   void trackStatistics() const override {
3486     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3487   }
3488 };
3489 
3490 // ------------------------ Align Argument Attribute ------------------------
3491 
3492 /// \p Ptr is accessed so we can get alignment information if the ABI requires
3493 /// the element type to be aligned.
3494 static MaybeAlign getKnownAlignmentFromAccessedPtr(const Value *Ptr,
3495                                                    const DataLayout &DL) {
3496   MaybeAlign KnownAlignment = Ptr->getPointerAlignment(DL);
3497   Type *ElementTy = Ptr->getType()->getPointerElementType();
3498   if (ElementTy->isSized())
3499     KnownAlignment = max(KnownAlignment, DL.getABITypeAlign(ElementTy));
3500   return KnownAlignment;
3501 }
3502 
3503 static unsigned getKnownAlignForUse(Attributor &A,
3504                                     AbstractAttribute &QueryingAA,
3505                                     Value &AssociatedValue, const Use *U,
3506                                     const Instruction *I, bool &TrackUse) {
3507   // We need to follow common pointer manipulation uses to the accesses they
3508   // feed into.
3509   if (isa<CastInst>(I)) {
3510     // Follow all but ptr2int casts.
3511     TrackUse = !isa<PtrToIntInst>(I);
3512     return 0;
3513   }
3514   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3515     if (GEP->hasAllConstantIndices()) {
3516       TrackUse = true;
3517       return 0;
3518     }
3519   }
3520 
3521   MaybeAlign MA;
3522   if (const auto *CB = dyn_cast<CallBase>(I)) {
3523     if (CB->isBundleOperand(U) || CB->isCallee(U))
3524       return 0;
3525 
3526     unsigned ArgNo = CB->getArgOperandNo(U);
3527     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3528     // As long as we only use known information there is no need to track
3529     // dependences here.
3530     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3531                                         /* TrackDependence */ false);
3532     MA = MaybeAlign(AlignAA.getKnownAlign());
3533   }
3534 
3535   const DataLayout &DL = A.getDataLayout();
3536   const Value *UseV = U->get();
3537   if (auto *SI = dyn_cast<StoreInst>(I)) {
3538     if (SI->getPointerOperand() == UseV) {
3539       if (unsigned SIAlign = SI->getAlignment())
3540         MA = MaybeAlign(SIAlign);
3541       else
3542         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3543     }
3544   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3545     if (LI->getPointerOperand() == UseV) {
3546       if (unsigned LIAlign = LI->getAlignment())
3547         MA = MaybeAlign(LIAlign);
3548       else
3549         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3550     }
3551   }
3552 
3553   if (!MA.hasValue() || MA <= 1)
3554     return 0;
3555 
3556   unsigned Alignment = MA->value();
3557   int64_t Offset;
3558 
3559   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3560     if (Base == &AssociatedValue) {
3561       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3562       // So we can say that the maximum power of two which is a divisor of
3563       // gcd(Offset, Alignment) is an alignment.
3564 
3565       uint32_t gcd =
3566           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3567       Alignment = llvm::PowerOf2Floor(gcd);
3568     }
3569   }
3570 
3571   return Alignment;
3572 }
3573 
3574 struct AAAlignImpl : AAAlign {
3575   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3576 
3577   /// See AbstractAttribute::initialize(...).
3578   void initialize(Attributor &A) override {
3579     SmallVector<Attribute, 4> Attrs;
3580     getAttrs({Attribute::Alignment}, Attrs);
3581     for (const Attribute &Attr : Attrs)
3582       takeKnownMaximum(Attr.getValueAsInt());
3583 
3584     if (getIRPosition().isFnInterfaceKind() &&
3585         (!getAnchorScope() ||
3586          !A.isFunctionIPOAmendable(*getAssociatedFunction())))
3587       indicatePessimisticFixpoint();
3588   }
3589 
3590   /// See AbstractAttribute::manifest(...).
3591   ChangeStatus manifest(Attributor &A) override {
3592     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3593 
3594     // Check for users that allow alignment annotations.
3595     Value &AssociatedValue = getAssociatedValue();
3596     for (const Use &U : AssociatedValue.uses()) {
3597       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3598         if (SI->getPointerOperand() == &AssociatedValue)
3599           if (SI->getAlignment() < getAssumedAlign()) {
3600             STATS_DECLTRACK(AAAlign, Store,
3601                             "Number of times alignment added to a store");
3602             SI->setAlignment(Align(getAssumedAlign()));
3603             LoadStoreChanged = ChangeStatus::CHANGED;
3604           }
3605       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3606         if (LI->getPointerOperand() == &AssociatedValue)
3607           if (LI->getAlignment() < getAssumedAlign()) {
3608             LI->setAlignment(Align(getAssumedAlign()));
3609             STATS_DECLTRACK(AAAlign, Load,
3610                             "Number of times alignment added to a load");
3611             LoadStoreChanged = ChangeStatus::CHANGED;
3612           }
3613       }
3614     }
3615 
3616     ChangeStatus Changed = AAAlign::manifest(A);
3617 
3618     MaybeAlign InheritAlign =
3619         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3620     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3621       return LoadStoreChanged;
3622     return Changed | LoadStoreChanged;
3623   }
3624 
3625   // TODO: Provide a helper to determine the implied ABI alignment and check in
3626   //       the existing manifest method and a new one for AAAlignImpl that value
3627   //       to avoid making the alignment explicit if it did not improve.
3628 
3629   /// See AbstractAttribute::getDeducedAttributes
3630   virtual void
3631   getDeducedAttributes(LLVMContext &Ctx,
3632                        SmallVectorImpl<Attribute> &Attrs) const override {
3633     if (getAssumedAlign() > 1)
3634       Attrs.emplace_back(
3635           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3636   }
3637   /// See AAFromMustBeExecutedContext
3638   bool followUse(Attributor &A, const Use *U, const Instruction *I,
3639                  AAAlign::StateType &State) {
3640     bool TrackUse = false;
3641 
3642     unsigned int KnownAlign =
3643         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3644     State.takeKnownMaximum(KnownAlign);
3645 
3646     return TrackUse;
3647   }
3648 
3649   /// See AbstractAttribute::getAsStr().
3650   const std::string getAsStr() const override {
3651     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3652                                 "-" + std::to_string(getAssumedAlign()) + ">")
3653                              : "unknown-align";
3654   }
3655 };
3656 
3657 /// Align attribute for a floating value.
3658 struct AAAlignFloating : AAFromMustBeExecutedContext<AAAlign, AAAlignImpl> {
3659   using Base = AAFromMustBeExecutedContext<AAAlign, AAAlignImpl>;
3660   AAAlignFloating(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3661 
3662   /// See AbstractAttribute::updateImpl(...).
3663   ChangeStatus updateImpl(Attributor &A) override {
3664     Base::updateImpl(A);
3665 
3666     const DataLayout &DL = A.getDataLayout();
3667 
3668     auto VisitValueCB = [&](Value &V, const Instruction *,
3669                             AAAlign::StateType &T, bool Stripped) -> bool {
3670       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3671       if (!Stripped && this == &AA) {
3672         // Use only IR information if we did not strip anything.
3673         const MaybeAlign PA = V.getPointerAlignment(DL);
3674         T.takeKnownMaximum(PA ? PA->value() : 0);
3675         T.indicatePessimisticFixpoint();
3676       } else {
3677         // Use abstract attribute information.
3678         const AAAlign::StateType &DS =
3679             static_cast<const AAAlign::StateType &>(AA.getState());
3680         T ^= DS;
3681       }
3682       return T.isValidState();
3683     };
3684 
3685     StateType T;
3686     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3687                                                    VisitValueCB, getCtxI()))
3688       return indicatePessimisticFixpoint();
3689 
3690     // TODO: If we know we visited all incoming values, thus no are assumed
3691     // dead, we can take the known information from the state T.
3692     return clampStateAndIndicateChange(getState(), T);
3693   }
3694 
3695   /// See AbstractAttribute::trackStatistics()
3696   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3697 };
3698 
3699 /// Align attribute for function return value.
3700 struct AAAlignReturned final
3701     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3702   AAAlignReturned(const IRPosition &IRP, Attributor &A)
3703       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP, A) {}
3704 
3705   /// See AbstractAttribute::trackStatistics()
3706   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3707 };
3708 
3709 /// Align attribute for function argument.
3710 struct AAAlignArgument final
3711     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3712                                                               AAAlignImpl> {
3713   using Base =
3714       AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3715                                                               AAAlignImpl>;
3716   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3717 
3718   /// See AbstractAttribute::manifest(...).
3719   ChangeStatus manifest(Attributor &A) override {
3720     // If the associated argument is involved in a must-tail call we give up
3721     // because we would need to keep the argument alignments of caller and
3722     // callee in-sync. Just does not seem worth the trouble right now.
3723     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3724       return ChangeStatus::UNCHANGED;
3725     return Base::manifest(A);
3726   }
3727 
3728   /// See AbstractAttribute::trackStatistics()
3729   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3730 };
3731 
3732 struct AAAlignCallSiteArgument final : AAAlignFloating {
3733   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3734       : AAAlignFloating(IRP, A) {}
3735 
3736   /// See AbstractAttribute::manifest(...).
3737   ChangeStatus manifest(Attributor &A) override {
3738     // If the associated argument is involved in a must-tail call we give up
3739     // because we would need to keep the argument alignments of caller and
3740     // callee in-sync. Just does not seem worth the trouble right now.
3741     if (Argument *Arg = getAssociatedArgument())
3742       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3743         return ChangeStatus::UNCHANGED;
3744     ChangeStatus Changed = AAAlignImpl::manifest(A);
3745     MaybeAlign InheritAlign =
3746         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3747     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3748       Changed = ChangeStatus::UNCHANGED;
3749     return Changed;
3750   }
3751 
3752   /// See AbstractAttribute::updateImpl(Attributor &A).
3753   ChangeStatus updateImpl(Attributor &A) override {
3754     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3755     if (Argument *Arg = getAssociatedArgument()) {
3756       // We only take known information from the argument
3757       // so we do not need to track a dependence.
3758       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3759           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3760       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3761     }
3762     return Changed;
3763   }
3764 
3765   /// See AbstractAttribute::trackStatistics()
3766   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3767 };
3768 
3769 /// Align attribute deduction for a call site return value.
3770 struct AAAlignCallSiteReturned final
3771     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3772                                                              AAAlignImpl> {
3773   using Base =
3774       AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3775                                                              AAAlignImpl>;
3776   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3777       : Base(IRP, A) {}
3778 
3779   /// See AbstractAttribute::initialize(...).
3780   void initialize(Attributor &A) override {
3781     Base::initialize(A);
3782     Function *F = getAssociatedFunction();
3783     if (!F)
3784       indicatePessimisticFixpoint();
3785   }
3786 
3787   /// See AbstractAttribute::trackStatistics()
3788   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3789 };
3790 
3791 /// ------------------ Function No-Return Attribute ----------------------------
3792 struct AANoReturnImpl : public AANoReturn {
3793   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
3794 
3795   /// See AbstractAttribute::initialize(...).
3796   void initialize(Attributor &A) override {
3797     AANoReturn::initialize(A);
3798     Function *F = getAssociatedFunction();
3799     if (!F)
3800       indicatePessimisticFixpoint();
3801   }
3802 
3803   /// See AbstractAttribute::getAsStr().
3804   const std::string getAsStr() const override {
3805     return getAssumed() ? "noreturn" : "may-return";
3806   }
3807 
3808   /// See AbstractAttribute::updateImpl(Attributor &A).
3809   virtual ChangeStatus updateImpl(Attributor &A) override {
3810     auto CheckForNoReturn = [](Instruction &) { return false; };
3811     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3812                                    {(unsigned)Instruction::Ret}))
3813       return indicatePessimisticFixpoint();
3814     return ChangeStatus::UNCHANGED;
3815   }
3816 };
3817 
3818 struct AANoReturnFunction final : AANoReturnImpl {
3819   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
3820       : AANoReturnImpl(IRP, A) {}
3821 
3822   /// See AbstractAttribute::trackStatistics()
3823   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3824 };
3825 
3826 /// NoReturn attribute deduction for a call sites.
3827 struct AANoReturnCallSite final : AANoReturnImpl {
3828   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
3829       : AANoReturnImpl(IRP, A) {}
3830 
3831   /// See AbstractAttribute::updateImpl(...).
3832   ChangeStatus updateImpl(Attributor &A) override {
3833     // TODO: Once we have call site specific value information we can provide
3834     //       call site specific liveness information and then it makes
3835     //       sense to specialize attributes for call sites arguments instead of
3836     //       redirecting requests to the callee argument.
3837     Function *F = getAssociatedFunction();
3838     const IRPosition &FnPos = IRPosition::function(*F);
3839     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3840     return clampStateAndIndicateChange(
3841         getState(),
3842         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3843   }
3844 
3845   /// See AbstractAttribute::trackStatistics()
3846   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3847 };
3848 
3849 /// ----------------------- Variable Capturing ---------------------------------
3850 
3851 /// A class to hold the state of for no-capture attributes.
3852 struct AANoCaptureImpl : public AANoCapture {
3853   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
3854 
3855   /// See AbstractAttribute::initialize(...).
3856   void initialize(Attributor &A) override {
3857     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3858       indicateOptimisticFixpoint();
3859       return;
3860     }
3861     Function *AnchorScope = getAnchorScope();
3862     if (isFnInterfaceKind() &&
3863         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
3864       indicatePessimisticFixpoint();
3865       return;
3866     }
3867 
3868     // You cannot "capture" null in the default address space.
3869     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3870         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3871       indicateOptimisticFixpoint();
3872       return;
3873     }
3874 
3875     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3876 
3877     // Check what state the associated function can actually capture.
3878     if (F)
3879       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3880     else
3881       indicatePessimisticFixpoint();
3882   }
3883 
3884   /// See AbstractAttribute::updateImpl(...).
3885   ChangeStatus updateImpl(Attributor &A) override;
3886 
3887   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3888   virtual void
3889   getDeducedAttributes(LLVMContext &Ctx,
3890                        SmallVectorImpl<Attribute> &Attrs) const override {
3891     if (!isAssumedNoCaptureMaybeReturned())
3892       return;
3893 
3894     if (getArgNo() >= 0) {
3895       if (isAssumedNoCapture())
3896         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3897       else if (ManifestInternal)
3898         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3899     }
3900   }
3901 
3902   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3903   /// depending on the ability of the function associated with \p IRP to capture
3904   /// state in memory and through "returning/throwing", respectively.
3905   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3906                                                    const Function &F,
3907                                                    BitIntegerState &State) {
3908     // TODO: Once we have memory behavior attributes we should use them here.
3909 
3910     // If we know we cannot communicate or write to memory, we do not care about
3911     // ptr2int anymore.
3912     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3913         F.getReturnType()->isVoidTy()) {
3914       State.addKnownBits(NO_CAPTURE);
3915       return;
3916     }
3917 
3918     // A function cannot capture state in memory if it only reads memory, it can
3919     // however return/throw state and the state might be influenced by the
3920     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3921     if (F.onlyReadsMemory())
3922       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3923 
3924     // A function cannot communicate state back if it does not through
3925     // exceptions and doesn not return values.
3926     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3927       State.addKnownBits(NOT_CAPTURED_IN_RET);
3928 
3929     // Check existing "returned" attributes.
3930     int ArgNo = IRP.getArgNo();
3931     if (F.doesNotThrow() && ArgNo >= 0) {
3932       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3933         if (F.hasParamAttribute(u, Attribute::Returned)) {
3934           if (u == unsigned(ArgNo))
3935             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3936           else if (F.onlyReadsMemory())
3937             State.addKnownBits(NO_CAPTURE);
3938           else
3939             State.addKnownBits(NOT_CAPTURED_IN_RET);
3940           break;
3941         }
3942     }
3943   }
3944 
3945   /// See AbstractState::getAsStr().
3946   const std::string getAsStr() const override {
3947     if (isKnownNoCapture())
3948       return "known not-captured";
3949     if (isAssumedNoCapture())
3950       return "assumed not-captured";
3951     if (isKnownNoCaptureMaybeReturned())
3952       return "known not-captured-maybe-returned";
3953     if (isAssumedNoCaptureMaybeReturned())
3954       return "assumed not-captured-maybe-returned";
3955     return "assumed-captured";
3956   }
3957 };
3958 
3959 /// Attributor-aware capture tracker.
3960 struct AACaptureUseTracker final : public CaptureTracker {
3961 
3962   /// Create a capture tracker that can lookup in-flight abstract attributes
3963   /// through the Attributor \p A.
3964   ///
3965   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3966   /// search is stopped. If a use leads to a return instruction,
3967   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3968   /// If a use leads to a ptr2int which may capture the value,
3969   /// \p CapturedInInteger is set. If a use is found that is currently assumed
3970   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3971   /// set. All values in \p PotentialCopies are later tracked as well. For every
3972   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3973   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3974   /// conservatively set to true.
3975   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3976                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3977                       SmallVectorImpl<const Value *> &PotentialCopies,
3978                       unsigned &RemainingUsesToExplore)
3979       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3980         PotentialCopies(PotentialCopies),
3981         RemainingUsesToExplore(RemainingUsesToExplore) {}
3982 
3983   /// Determine if \p V maybe captured. *Also updates the state!*
3984   bool valueMayBeCaptured(const Value *V) {
3985     if (V->getType()->isPointerTy()) {
3986       PointerMayBeCaptured(V, this);
3987     } else {
3988       State.indicatePessimisticFixpoint();
3989     }
3990     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3991   }
3992 
3993   /// See CaptureTracker::tooManyUses().
3994   void tooManyUses() override {
3995     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
3996   }
3997 
3998   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3999     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4000       return true;
4001     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4002         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
4003         DepClassTy::OPTIONAL);
4004     return DerefAA.getAssumedDereferenceableBytes();
4005   }
4006 
4007   /// See CaptureTracker::captured(...).
4008   bool captured(const Use *U) override {
4009     Instruction *UInst = cast<Instruction>(U->getUser());
4010     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4011                       << "\n");
4012 
4013     // Because we may reuse the tracker multiple times we keep track of the
4014     // number of explored uses ourselves as well.
4015     if (RemainingUsesToExplore-- == 0) {
4016       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4017       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4018                           /* Return */ true);
4019     }
4020 
4021     // Deal with ptr2int by following uses.
4022     if (isa<PtrToIntInst>(UInst)) {
4023       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4024       return valueMayBeCaptured(UInst);
4025     }
4026 
4027     // Explicitly catch return instructions.
4028     if (isa<ReturnInst>(UInst))
4029       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4030                           /* Return */ true);
4031 
4032     // For now we only use special logic for call sites. However, the tracker
4033     // itself knows about a lot of other non-capturing cases already.
4034     auto *CB = dyn_cast<CallBase>(UInst);
4035     if (!CB || !CB->isArgOperand(U))
4036       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4037                           /* Return */ true);
4038 
4039     unsigned ArgNo = CB->getArgOperandNo(U);
4040     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4041     // If we have a abstract no-capture attribute for the argument we can use
4042     // it to justify a non-capture attribute here. This allows recursion!
4043     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
4044     if (ArgNoCaptureAA.isAssumedNoCapture())
4045       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4046                           /* Return */ false);
4047     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4048       addPotentialCopy(*CB);
4049       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4050                           /* Return */ false);
4051     }
4052 
4053     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4054     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4055                         /* Return */ true);
4056   }
4057 
4058   /// Register \p CS as potential copy of the value we are checking.
4059   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4060 
4061   /// See CaptureTracker::shouldExplore(...).
4062   bool shouldExplore(const Use *U) override {
4063     // Check liveness and ignore droppable users.
4064     return !U->getUser()->isDroppable() &&
4065            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4066   }
4067 
4068   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4069   /// \p CapturedInRet, then return the appropriate value for use in the
4070   /// CaptureTracker::captured() interface.
4071   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4072                     bool CapturedInRet) {
4073     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4074                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4075     if (CapturedInMem)
4076       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4077     if (CapturedInInt)
4078       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4079     if (CapturedInRet)
4080       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4081     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4082   }
4083 
4084 private:
4085   /// The attributor providing in-flight abstract attributes.
4086   Attributor &A;
4087 
4088   /// The abstract attribute currently updated.
4089   AANoCapture &NoCaptureAA;
4090 
4091   /// The abstract liveness state.
4092   const AAIsDead &IsDeadAA;
4093 
4094   /// The state currently updated.
4095   AANoCapture::StateType &State;
4096 
4097   /// Set of potential copies of the tracked value.
4098   SmallVectorImpl<const Value *> &PotentialCopies;
4099 
4100   /// Global counter to limit the number of explored uses.
4101   unsigned &RemainingUsesToExplore;
4102 };
4103 
4104 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4105   const IRPosition &IRP = getIRPosition();
4106   const Value *V =
4107       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4108   if (!V)
4109     return indicatePessimisticFixpoint();
4110 
4111   const Function *F =
4112       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4113   assert(F && "Expected a function!");
4114   const IRPosition &FnPos = IRPosition::function(*F);
4115   const auto &IsDeadAA =
4116       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4117 
4118   AANoCapture::StateType T;
4119 
4120   // Readonly means we cannot capture through memory.
4121   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
4122       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4123   if (FnMemAA.isAssumedReadOnly()) {
4124     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4125     if (FnMemAA.isKnownReadOnly())
4126       addKnownBits(NOT_CAPTURED_IN_MEM);
4127   }
4128 
4129   // Make sure all returned values are different than the underlying value.
4130   // TODO: we could do this in a more sophisticated way inside
4131   //       AAReturnedValues, e.g., track all values that escape through returns
4132   //       directly somehow.
4133   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4134     bool SeenConstant = false;
4135     for (auto &It : RVAA.returned_values()) {
4136       if (isa<Constant>(It.first)) {
4137         if (SeenConstant)
4138           return false;
4139         SeenConstant = true;
4140       } else if (!isa<Argument>(It.first) ||
4141                  It.first == getAssociatedArgument())
4142         return false;
4143     }
4144     return true;
4145   };
4146 
4147   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4148       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4149   if (NoUnwindAA.isAssumedNoUnwind()) {
4150     bool IsVoidTy = F->getReturnType()->isVoidTy();
4151     const AAReturnedValues *RVAA =
4152         IsVoidTy ? nullptr
4153                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4154                                                  /* TrackDependence */ true,
4155                                                  DepClassTy::OPTIONAL);
4156     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4157       T.addKnownBits(NOT_CAPTURED_IN_RET);
4158       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4159         return ChangeStatus::UNCHANGED;
4160       if (NoUnwindAA.isKnownNoUnwind() &&
4161           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4162         addKnownBits(NOT_CAPTURED_IN_RET);
4163         if (isKnown(NOT_CAPTURED_IN_MEM))
4164           return indicateOptimisticFixpoint();
4165       }
4166     }
4167   }
4168 
4169   // Use the CaptureTracker interface and logic with the specialized tracker,
4170   // defined in AACaptureUseTracker, that can look at in-flight abstract
4171   // attributes and directly updates the assumed state.
4172   SmallVector<const Value *, 4> PotentialCopies;
4173   unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
4174   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4175                               RemainingUsesToExplore);
4176 
4177   // Check all potential copies of the associated value until we can assume
4178   // none will be captured or we have to assume at least one might be.
4179   unsigned Idx = 0;
4180   PotentialCopies.push_back(V);
4181   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4182     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4183 
4184   AANoCapture::StateType &S = getState();
4185   auto Assumed = S.getAssumed();
4186   S.intersectAssumedBits(T.getAssumed());
4187   if (!isAssumedNoCaptureMaybeReturned())
4188     return indicatePessimisticFixpoint();
4189   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4190                                    : ChangeStatus::CHANGED;
4191 }
4192 
4193 /// NoCapture attribute for function arguments.
4194 struct AANoCaptureArgument final : AANoCaptureImpl {
4195   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4196       : AANoCaptureImpl(IRP, A) {}
4197 
4198   /// See AbstractAttribute::trackStatistics()
4199   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4200 };
4201 
4202 /// NoCapture attribute for call site arguments.
4203 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4204   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4205       : AANoCaptureImpl(IRP, A) {}
4206 
4207   /// See AbstractAttribute::initialize(...).
4208   void initialize(Attributor &A) override {
4209     if (Argument *Arg = getAssociatedArgument())
4210       if (Arg->hasByValAttr())
4211         indicateOptimisticFixpoint();
4212     AANoCaptureImpl::initialize(A);
4213   }
4214 
4215   /// See AbstractAttribute::updateImpl(...).
4216   ChangeStatus updateImpl(Attributor &A) override {
4217     // TODO: Once we have call site specific value information we can provide
4218     //       call site specific liveness information and then it makes
4219     //       sense to specialize attributes for call sites arguments instead of
4220     //       redirecting requests to the callee argument.
4221     Argument *Arg = getAssociatedArgument();
4222     if (!Arg)
4223       return indicatePessimisticFixpoint();
4224     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4225     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4226     return clampStateAndIndicateChange(
4227         getState(),
4228         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4229   }
4230 
4231   /// See AbstractAttribute::trackStatistics()
4232   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4233 };
4234 
4235 /// NoCapture attribute for floating values.
4236 struct AANoCaptureFloating final : AANoCaptureImpl {
4237   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4238       : AANoCaptureImpl(IRP, A) {}
4239 
4240   /// See AbstractAttribute::trackStatistics()
4241   void trackStatistics() const override {
4242     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4243   }
4244 };
4245 
4246 /// NoCapture attribute for function return value.
4247 struct AANoCaptureReturned final : AANoCaptureImpl {
4248   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4249       : AANoCaptureImpl(IRP, A) {
4250     llvm_unreachable("NoCapture is not applicable to function returns!");
4251   }
4252 
4253   /// See AbstractAttribute::initialize(...).
4254   void initialize(Attributor &A) override {
4255     llvm_unreachable("NoCapture is not applicable to function returns!");
4256   }
4257 
4258   /// See AbstractAttribute::updateImpl(...).
4259   ChangeStatus updateImpl(Attributor &A) override {
4260     llvm_unreachable("NoCapture is not applicable to function returns!");
4261   }
4262 
4263   /// See AbstractAttribute::trackStatistics()
4264   void trackStatistics() const override {}
4265 };
4266 
4267 /// NoCapture attribute deduction for a call site return value.
4268 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4269   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4270       : AANoCaptureImpl(IRP, A) {}
4271 
4272   /// See AbstractAttribute::trackStatistics()
4273   void trackStatistics() const override {
4274     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4275   }
4276 };
4277 
4278 /// ------------------ Value Simplify Attribute ----------------------------
4279 struct AAValueSimplifyImpl : AAValueSimplify {
4280   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4281       : AAValueSimplify(IRP, A) {}
4282 
4283   /// See AbstractAttribute::initialize(...).
4284   void initialize(Attributor &A) override {
4285     if (getAssociatedValue().getType()->isVoidTy())
4286       indicatePessimisticFixpoint();
4287   }
4288 
4289   /// See AbstractAttribute::getAsStr().
4290   const std::string getAsStr() const override {
4291     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4292                         : "not-simple";
4293   }
4294 
4295   /// See AbstractAttribute::trackStatistics()
4296   void trackStatistics() const override {}
4297 
4298   /// See AAValueSimplify::getAssumedSimplifiedValue()
4299   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4300     if (!getAssumed())
4301       return const_cast<Value *>(&getAssociatedValue());
4302     return SimplifiedAssociatedValue;
4303   }
4304 
4305   /// Helper function for querying AAValueSimplify and updating candicate.
4306   /// \param QueryingValue Value trying to unify with SimplifiedValue
4307   /// \param AccumulatedSimplifiedValue Current simplification result.
4308   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4309                              Value &QueryingValue,
4310                              Optional<Value *> &AccumulatedSimplifiedValue) {
4311     // FIXME: Add a typecast support.
4312 
4313     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4314         QueryingAA, IRPosition::value(QueryingValue));
4315 
4316     Optional<Value *> QueryingValueSimplified =
4317         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4318 
4319     if (!QueryingValueSimplified.hasValue())
4320       return true;
4321 
4322     if (!QueryingValueSimplified.getValue())
4323       return false;
4324 
4325     Value &QueryingValueSimplifiedUnwrapped =
4326         *QueryingValueSimplified.getValue();
4327 
4328     if (AccumulatedSimplifiedValue.hasValue() &&
4329         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4330         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4331       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4332     if (AccumulatedSimplifiedValue.hasValue() &&
4333         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4334       return true;
4335 
4336     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4337                       << " is assumed to be "
4338                       << QueryingValueSimplifiedUnwrapped << "\n");
4339 
4340     AccumulatedSimplifiedValue = QueryingValueSimplified;
4341     return true;
4342   }
4343 
4344   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4345     if (!getAssociatedValue().getType()->isIntegerTy())
4346       return false;
4347 
4348     const auto &ValueConstantRangeAA =
4349         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4350 
4351     Optional<ConstantInt *> COpt =
4352         ValueConstantRangeAA.getAssumedConstantInt(A);
4353     if (COpt.hasValue()) {
4354       if (auto *C = COpt.getValue())
4355         SimplifiedAssociatedValue = C;
4356       else
4357         return false;
4358     } else {
4359       SimplifiedAssociatedValue = llvm::None;
4360     }
4361     return true;
4362   }
4363 
4364   /// See AbstractAttribute::manifest(...).
4365   ChangeStatus manifest(Attributor &A) override {
4366     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4367 
4368     if (SimplifiedAssociatedValue.hasValue() &&
4369         !SimplifiedAssociatedValue.getValue())
4370       return Changed;
4371 
4372     Value &V = getAssociatedValue();
4373     auto *C = SimplifiedAssociatedValue.hasValue()
4374                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4375                   : UndefValue::get(V.getType());
4376     if (C) {
4377       // We can replace the AssociatedValue with the constant.
4378       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4379         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4380                           << " :: " << *this << "\n");
4381         if (A.changeValueAfterManifest(V, *C))
4382           Changed = ChangeStatus::CHANGED;
4383       }
4384     }
4385 
4386     return Changed | AAValueSimplify::manifest(A);
4387   }
4388 
4389   /// See AbstractState::indicatePessimisticFixpoint(...).
4390   ChangeStatus indicatePessimisticFixpoint() override {
4391     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4392     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4393     SimplifiedAssociatedValue = &getAssociatedValue();
4394     indicateOptimisticFixpoint();
4395     return ChangeStatus::CHANGED;
4396   }
4397 
4398 protected:
4399   // An assumed simplified value. Initially, it is set to Optional::None, which
4400   // means that the value is not clear under current assumption. If in the
4401   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4402   // returns orignal associated value.
4403   Optional<Value *> SimplifiedAssociatedValue;
4404 };
4405 
4406 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4407   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4408       : AAValueSimplifyImpl(IRP, A) {}
4409 
4410   void initialize(Attributor &A) override {
4411     AAValueSimplifyImpl::initialize(A);
4412     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4413       indicatePessimisticFixpoint();
4414     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4415                 /* IgnoreSubsumingPositions */ true))
4416       indicatePessimisticFixpoint();
4417 
4418     // FIXME: This is a hack to prevent us from propagating function poiner in
4419     // the new pass manager CGSCC pass as it creates call edges the
4420     // CallGraphUpdater cannot handle yet.
4421     Value &V = getAssociatedValue();
4422     if (V.getType()->isPointerTy() &&
4423         V.getType()->getPointerElementType()->isFunctionTy() &&
4424         !A.isModulePass())
4425       indicatePessimisticFixpoint();
4426   }
4427 
4428   /// See AbstractAttribute::updateImpl(...).
4429   ChangeStatus updateImpl(Attributor &A) override {
4430     // Byval is only replacable if it is readonly otherwise we would write into
4431     // the replaced value and not the copy that byval creates implicitly.
4432     Argument *Arg = getAssociatedArgument();
4433     if (Arg->hasByValAttr()) {
4434       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4435       //       there is no race by not copying a constant byval.
4436       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4437       if (!MemAA.isAssumedReadOnly())
4438         return indicatePessimisticFixpoint();
4439     }
4440 
4441     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4442 
4443     auto PredForCallSite = [&](AbstractCallSite ACS) {
4444       const IRPosition &ACSArgPos =
4445           IRPosition::callsite_argument(ACS, getArgNo());
4446       // Check if a coresponding argument was found or if it is on not
4447       // associated (which can happen for callback calls).
4448       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4449         return false;
4450 
4451       // We can only propagate thread independent values through callbacks.
4452       // This is different to direct/indirect call sites because for them we
4453       // know the thread executing the caller and callee is the same. For
4454       // callbacks this is not guaranteed, thus a thread dependent value could
4455       // be different for the caller and callee, making it invalid to propagate.
4456       Value &ArgOp = ACSArgPos.getAssociatedValue();
4457       if (ACS.isCallbackCall())
4458         if (auto *C = dyn_cast<Constant>(&ArgOp))
4459           if (C->isThreadDependent())
4460             return false;
4461       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4462     };
4463 
4464     bool AllCallSitesKnown;
4465     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4466                                 AllCallSitesKnown))
4467       if (!askSimplifiedValueForAAValueConstantRange(A))
4468         return indicatePessimisticFixpoint();
4469 
4470     // If a candicate was found in this update, return CHANGED.
4471     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4472                ? ChangeStatus::UNCHANGED
4473                : ChangeStatus ::CHANGED;
4474   }
4475 
4476   /// See AbstractAttribute::trackStatistics()
4477   void trackStatistics() const override {
4478     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4479   }
4480 };
4481 
4482 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4483   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4484       : AAValueSimplifyImpl(IRP, A) {}
4485 
4486   /// See AbstractAttribute::updateImpl(...).
4487   ChangeStatus updateImpl(Attributor &A) override {
4488     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4489 
4490     auto PredForReturned = [&](Value &V) {
4491       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4492     };
4493 
4494     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4495       if (!askSimplifiedValueForAAValueConstantRange(A))
4496         return indicatePessimisticFixpoint();
4497 
4498     // If a candicate was found in this update, return CHANGED.
4499     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4500                ? ChangeStatus::UNCHANGED
4501                : ChangeStatus ::CHANGED;
4502   }
4503 
4504   ChangeStatus manifest(Attributor &A) override {
4505     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4506 
4507     if (SimplifiedAssociatedValue.hasValue() &&
4508         !SimplifiedAssociatedValue.getValue())
4509       return Changed;
4510 
4511     Value &V = getAssociatedValue();
4512     auto *C = SimplifiedAssociatedValue.hasValue()
4513                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4514                   : UndefValue::get(V.getType());
4515     if (C) {
4516       auto PredForReturned =
4517           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4518             // We can replace the AssociatedValue with the constant.
4519             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4520               return true;
4521 
4522             for (ReturnInst *RI : RetInsts) {
4523               if (RI->getFunction() != getAnchorScope())
4524                 continue;
4525               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4526                                 << " in " << *RI << " :: " << *this << "\n");
4527               if (A.changeUseAfterManifest(RI->getOperandUse(0), *C))
4528                 Changed = ChangeStatus::CHANGED;
4529             }
4530             return true;
4531           };
4532       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4533     }
4534 
4535     return Changed | AAValueSimplify::manifest(A);
4536   }
4537 
4538   /// See AbstractAttribute::trackStatistics()
4539   void trackStatistics() const override {
4540     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4541   }
4542 };
4543 
4544 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4545   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4546       : AAValueSimplifyImpl(IRP, A) {}
4547 
4548   /// See AbstractAttribute::initialize(...).
4549   void initialize(Attributor &A) override {
4550     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4551     //        Needs investigation.
4552     // AAValueSimplifyImpl::initialize(A);
4553     Value &V = getAnchorValue();
4554 
4555     // TODO: add other stuffs
4556     if (isa<Constant>(V))
4557       indicatePessimisticFixpoint();
4558   }
4559 
4560   /// See AbstractAttribute::updateImpl(...).
4561   ChangeStatus updateImpl(Attributor &A) override {
4562     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4563 
4564     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4565                             bool Stripped) -> bool {
4566       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4567       if (!Stripped && this == &AA) {
4568         // TODO: Look the instruction and check recursively.
4569 
4570         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4571                           << "\n");
4572         return false;
4573       }
4574       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4575     };
4576 
4577     bool Dummy = false;
4578     if (!genericValueTraversal<AAValueSimplify, bool>(
4579             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI()))
4580       if (!askSimplifiedValueForAAValueConstantRange(A))
4581         return indicatePessimisticFixpoint();
4582 
4583     // If a candicate was found in this update, return CHANGED.
4584 
4585     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4586                ? ChangeStatus::UNCHANGED
4587                : ChangeStatus ::CHANGED;
4588   }
4589 
4590   /// See AbstractAttribute::trackStatistics()
4591   void trackStatistics() const override {
4592     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4593   }
4594 };
4595 
4596 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4597   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4598       : AAValueSimplifyImpl(IRP, A) {}
4599 
4600   /// See AbstractAttribute::initialize(...).
4601   void initialize(Attributor &A) override {
4602     SimplifiedAssociatedValue = &getAnchorValue();
4603     indicateOptimisticFixpoint();
4604   }
4605   /// See AbstractAttribute::initialize(...).
4606   ChangeStatus updateImpl(Attributor &A) override {
4607     llvm_unreachable(
4608         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4609   }
4610   /// See AbstractAttribute::trackStatistics()
4611   void trackStatistics() const override {
4612     STATS_DECLTRACK_FN_ATTR(value_simplify)
4613   }
4614 };
4615 
4616 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4617   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4618       : AAValueSimplifyFunction(IRP, A) {}
4619   /// See AbstractAttribute::trackStatistics()
4620   void trackStatistics() const override {
4621     STATS_DECLTRACK_CS_ATTR(value_simplify)
4622   }
4623 };
4624 
4625 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4626   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4627       : AAValueSimplifyReturned(IRP, A) {}
4628 
4629   /// See AbstractAttribute::manifest(...).
4630   ChangeStatus manifest(Attributor &A) override {
4631     return AAValueSimplifyImpl::manifest(A);
4632   }
4633 
4634   void trackStatistics() const override {
4635     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4636   }
4637 };
4638 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4639   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4640       : AAValueSimplifyFloating(IRP, A) {}
4641 
4642   void trackStatistics() const override {
4643     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4644   }
4645 };
4646 
4647 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4648 struct AAHeapToStackImpl : public AAHeapToStack {
4649   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
4650       : AAHeapToStack(IRP, A) {}
4651 
4652   const std::string getAsStr() const override {
4653     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4654   }
4655 
4656   ChangeStatus manifest(Attributor &A) override {
4657     assert(getState().isValidState() &&
4658            "Attempted to manifest an invalid state!");
4659 
4660     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4661     Function *F = getAnchorScope();
4662     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4663 
4664     for (Instruction *MallocCall : MallocCalls) {
4665       // This malloc cannot be replaced.
4666       if (BadMallocCalls.count(MallocCall))
4667         continue;
4668 
4669       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4670         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4671         A.deleteAfterManifest(*FreeCall);
4672         HasChanged = ChangeStatus::CHANGED;
4673       }
4674 
4675       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4676                         << "\n");
4677 
4678       MaybeAlign Alignment;
4679       Constant *Size;
4680       if (isCallocLikeFn(MallocCall, TLI)) {
4681         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4682         auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1));
4683         APInt TotalSize = SizeT->getValue() * Num->getValue();
4684         Size =
4685             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4686       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
4687         Size = cast<ConstantInt>(MallocCall->getOperand(1));
4688         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
4689                                    ->getValue()
4690                                    .getZExtValue());
4691       } else {
4692         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4693       }
4694 
4695       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4696       Instruction *AI =
4697           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
4698                          "", MallocCall->getNextNode());
4699 
4700       if (AI->getType() != MallocCall->getType())
4701         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4702                              AI->getNextNode());
4703 
4704       A.changeValueAfterManifest(*MallocCall, *AI);
4705 
4706       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4707         auto *NBB = II->getNormalDest();
4708         BranchInst::Create(NBB, MallocCall->getParent());
4709         A.deleteAfterManifest(*MallocCall);
4710       } else {
4711         A.deleteAfterManifest(*MallocCall);
4712       }
4713 
4714       // Zero out the allocated memory if it was a calloc.
4715       if (isCallocLikeFn(MallocCall, TLI)) {
4716         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4717                                    AI->getNextNode());
4718         Value *Ops[] = {
4719             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4720             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4721 
4722         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4723         Module *M = F->getParent();
4724         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4725         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4726       }
4727       HasChanged = ChangeStatus::CHANGED;
4728     }
4729 
4730     return HasChanged;
4731   }
4732 
4733   /// Collection of all malloc calls in a function.
4734   SmallSetVector<Instruction *, 4> MallocCalls;
4735 
4736   /// Collection of malloc calls that cannot be converted.
4737   DenseSet<const Instruction *> BadMallocCalls;
4738 
4739   /// A map for each malloc call to the set of associated free calls.
4740   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4741 
4742   ChangeStatus updateImpl(Attributor &A) override;
4743 };
4744 
4745 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4746   const Function *F = getAnchorScope();
4747   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4748 
4749   MustBeExecutedContextExplorer &Explorer =
4750       A.getInfoCache().getMustBeExecutedContextExplorer();
4751 
4752   auto FreeCheck = [&](Instruction &I) {
4753     const auto &Frees = FreesForMalloc.lookup(&I);
4754     if (Frees.size() != 1)
4755       return false;
4756     Instruction *UniqueFree = *Frees.begin();
4757     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4758   };
4759 
4760   auto UsesCheck = [&](Instruction &I) {
4761     bool ValidUsesOnly = true;
4762     bool MustUse = true;
4763     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4764       Instruction *UserI = cast<Instruction>(U.getUser());
4765       if (isa<LoadInst>(UserI))
4766         return true;
4767       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4768         if (SI->getValueOperand() == U.get()) {
4769           LLVM_DEBUG(dbgs()
4770                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4771           ValidUsesOnly = false;
4772         } else {
4773           // A store into the malloc'ed memory is fine.
4774         }
4775         return true;
4776       }
4777       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4778         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4779           return true;
4780         // Record malloc.
4781         if (isFreeCall(UserI, TLI)) {
4782           if (MustUse) {
4783             FreesForMalloc[&I].insert(UserI);
4784           } else {
4785             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4786                               << *UserI << "\n");
4787             ValidUsesOnly = false;
4788           }
4789           return true;
4790         }
4791 
4792         unsigned ArgNo = CB->getArgOperandNo(&U);
4793 
4794         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4795             *this, IRPosition::callsite_argument(*CB, ArgNo));
4796 
4797         // If a callsite argument use is nofree, we are fine.
4798         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4799             *this, IRPosition::callsite_argument(*CB, ArgNo));
4800 
4801         if (!NoCaptureAA.isAssumedNoCapture() ||
4802             !ArgNoFreeAA.isAssumedNoFree()) {
4803           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4804           ValidUsesOnly = false;
4805         }
4806         return true;
4807       }
4808 
4809       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4810           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4811         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4812         Follow = true;
4813         return true;
4814       }
4815       // Unknown user for which we can not track uses further (in a way that
4816       // makes sense).
4817       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4818       ValidUsesOnly = false;
4819       return true;
4820     };
4821     A.checkForAllUses(Pred, *this, I);
4822     return ValidUsesOnly;
4823   };
4824 
4825   auto MallocCallocCheck = [&](Instruction &I) {
4826     if (BadMallocCalls.count(&I))
4827       return true;
4828 
4829     bool IsMalloc = isMallocLikeFn(&I, TLI);
4830     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
4831     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4832     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
4833       BadMallocCalls.insert(&I);
4834       return true;
4835     }
4836 
4837     if (IsMalloc) {
4838       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4839         if (Size->getValue().ule(MaxHeapToStackSize))
4840           if (UsesCheck(I) || FreeCheck(I)) {
4841             MallocCalls.insert(&I);
4842             return true;
4843           }
4844     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
4845       // Only if the alignment and sizes are constant.
4846       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4847         if (Size->getValue().ule(MaxHeapToStackSize))
4848           if (UsesCheck(I) || FreeCheck(I)) {
4849             MallocCalls.insert(&I);
4850             return true;
4851           }
4852     } else if (IsCalloc) {
4853       bool Overflow = false;
4854       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4855         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4856           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4857                   .ule(MaxHeapToStackSize))
4858             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4859               MallocCalls.insert(&I);
4860               return true;
4861             }
4862     }
4863 
4864     BadMallocCalls.insert(&I);
4865     return true;
4866   };
4867 
4868   size_t NumBadMallocs = BadMallocCalls.size();
4869 
4870   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4871 
4872   if (NumBadMallocs != BadMallocCalls.size())
4873     return ChangeStatus::CHANGED;
4874 
4875   return ChangeStatus::UNCHANGED;
4876 }
4877 
4878 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4879   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
4880       : AAHeapToStackImpl(IRP, A) {}
4881 
4882   /// See AbstractAttribute::trackStatistics().
4883   void trackStatistics() const override {
4884     STATS_DECL(
4885         MallocCalls, Function,
4886         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
4887     for (auto *C : MallocCalls)
4888       if (!BadMallocCalls.count(C))
4889         ++BUILD_STAT_NAME(MallocCalls, Function);
4890   }
4891 };
4892 
4893 /// ----------------------- Privatizable Pointers ------------------------------
4894 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
4895   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
4896       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
4897 
4898   ChangeStatus indicatePessimisticFixpoint() override {
4899     AAPrivatizablePtr::indicatePessimisticFixpoint();
4900     PrivatizableType = nullptr;
4901     return ChangeStatus::CHANGED;
4902   }
4903 
4904   /// Identify the type we can chose for a private copy of the underlying
4905   /// argument. None means it is not clear yet, nullptr means there is none.
4906   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
4907 
4908   /// Return a privatizable type that encloses both T0 and T1.
4909   /// TODO: This is merely a stub for now as we should manage a mapping as well.
4910   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
4911     if (!T0.hasValue())
4912       return T1;
4913     if (!T1.hasValue())
4914       return T0;
4915     if (T0 == T1)
4916       return T0;
4917     return nullptr;
4918   }
4919 
4920   Optional<Type *> getPrivatizableType() const override {
4921     return PrivatizableType;
4922   }
4923 
4924   const std::string getAsStr() const override {
4925     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
4926   }
4927 
4928 protected:
4929   Optional<Type *> PrivatizableType;
4930 };
4931 
4932 // TODO: Do this for call site arguments (probably also other values) as well.
4933 
4934 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
4935   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
4936       : AAPrivatizablePtrImpl(IRP, A) {}
4937 
4938   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
4939   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
4940     // If this is a byval argument and we know all the call sites (so we can
4941     // rewrite them), there is no need to check them explicitly.
4942     bool AllCallSitesKnown;
4943     if (getIRPosition().hasAttr(Attribute::ByVal) &&
4944         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
4945                                true, AllCallSitesKnown))
4946       return getAssociatedValue().getType()->getPointerElementType();
4947 
4948     Optional<Type *> Ty;
4949     unsigned ArgNo = getIRPosition().getArgNo();
4950 
4951     // Make sure the associated call site argument has the same type at all call
4952     // sites and it is an allocation we know is safe to privatize, for now that
4953     // means we only allow alloca instructions.
4954     // TODO: We can additionally analyze the accesses in the callee to  create
4955     //       the type from that information instead. That is a little more
4956     //       involved and will be done in a follow up patch.
4957     auto CallSiteCheck = [&](AbstractCallSite ACS) {
4958       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
4959       // Check if a coresponding argument was found or if it is one not
4960       // associated (which can happen for callback calls).
4961       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4962         return false;
4963 
4964       // Check that all call sites agree on a type.
4965       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
4966       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
4967 
4968       LLVM_DEBUG({
4969         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
4970         if (CSTy.hasValue() && CSTy.getValue())
4971           CSTy.getValue()->print(dbgs());
4972         else if (CSTy.hasValue())
4973           dbgs() << "<nullptr>";
4974         else
4975           dbgs() << "<none>";
4976       });
4977 
4978       Ty = combineTypes(Ty, CSTy);
4979 
4980       LLVM_DEBUG({
4981         dbgs() << " : New Type: ";
4982         if (Ty.hasValue() && Ty.getValue())
4983           Ty.getValue()->print(dbgs());
4984         else if (Ty.hasValue())
4985           dbgs() << "<nullptr>";
4986         else
4987           dbgs() << "<none>";
4988         dbgs() << "\n";
4989       });
4990 
4991       return !Ty.hasValue() || Ty.getValue();
4992     };
4993 
4994     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
4995       return nullptr;
4996     return Ty;
4997   }
4998 
4999   /// See AbstractAttribute::updateImpl(...).
5000   ChangeStatus updateImpl(Attributor &A) override {
5001     PrivatizableType = identifyPrivatizableType(A);
5002     if (!PrivatizableType.hasValue())
5003       return ChangeStatus::UNCHANGED;
5004     if (!PrivatizableType.getValue())
5005       return indicatePessimisticFixpoint();
5006 
5007     // Avoid arguments with padding for now.
5008     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5009         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5010                                                 A.getInfoCache().getDL())) {
5011       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5012       return indicatePessimisticFixpoint();
5013     }
5014 
5015     // Verify callee and caller agree on how the promoted argument would be
5016     // passed.
5017     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5018     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5019     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5020     Function &Fn = *getIRPosition().getAnchorScope();
5021     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5022     ArgsToPromote.insert(getAssociatedArgument());
5023     const auto *TTI =
5024         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5025     if (!TTI ||
5026         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5027             Fn, *TTI, ArgsToPromote, Dummy) ||
5028         ArgsToPromote.empty()) {
5029       LLVM_DEBUG(
5030           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5031                  << Fn.getName() << "\n");
5032       return indicatePessimisticFixpoint();
5033     }
5034 
5035     // Collect the types that will replace the privatizable type in the function
5036     // signature.
5037     SmallVector<Type *, 16> ReplacementTypes;
5038     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5039 
5040     // Register a rewrite of the argument.
5041     Argument *Arg = getAssociatedArgument();
5042     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5043       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5044       return indicatePessimisticFixpoint();
5045     }
5046 
5047     unsigned ArgNo = Arg->getArgNo();
5048 
5049     // Helper to check if for the given call site the associated argument is
5050     // passed to a callback where the privatization would be different.
5051     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5052       SmallVector<const Use *, 4> CallbackUses;
5053       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5054       for (const Use *U : CallbackUses) {
5055         AbstractCallSite CBACS(U);
5056         assert(CBACS && CBACS.isCallbackCall());
5057         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5058           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5059 
5060           LLVM_DEBUG({
5061             dbgs()
5062                 << "[AAPrivatizablePtr] Argument " << *Arg
5063                 << "check if can be privatized in the context of its parent ("
5064                 << Arg->getParent()->getName()
5065                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5066                    "callback ("
5067                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5068                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5069                 << CBACS.getCallArgOperand(CBArg) << " vs "
5070                 << CB.getArgOperand(ArgNo) << "\n"
5071                 << "[AAPrivatizablePtr] " << CBArg << " : "
5072                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5073           });
5074 
5075           if (CBArgNo != int(ArgNo))
5076             continue;
5077           const auto &CBArgPrivAA =
5078               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5079           if (CBArgPrivAA.isValidState()) {
5080             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5081             if (!CBArgPrivTy.hasValue())
5082               continue;
5083             if (CBArgPrivTy.getValue() == PrivatizableType)
5084               continue;
5085           }
5086 
5087           LLVM_DEBUG({
5088             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5089                    << " cannot be privatized in the context of its parent ("
5090                    << Arg->getParent()->getName()
5091                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5092                       "callback ("
5093                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5094                    << ").\n[AAPrivatizablePtr] for which the argument "
5095                       "privatization is not compatible.\n";
5096           });
5097           return false;
5098         }
5099       }
5100       return true;
5101     };
5102 
5103     // Helper to check if for the given call site the associated argument is
5104     // passed to a direct call where the privatization would be different.
5105     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5106       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5107       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5108       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5109              "Expected a direct call operand for callback call operand");
5110 
5111       LLVM_DEBUG({
5112         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5113                << " check if be privatized in the context of its parent ("
5114                << Arg->getParent()->getName()
5115                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5116                   "direct call of ("
5117                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5118                << ").\n";
5119       });
5120 
5121       Function *DCCallee = DC->getCalledFunction();
5122       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5123         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5124             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5125         if (DCArgPrivAA.isValidState()) {
5126           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5127           if (!DCArgPrivTy.hasValue())
5128             return true;
5129           if (DCArgPrivTy.getValue() == PrivatizableType)
5130             return true;
5131         }
5132       }
5133 
5134       LLVM_DEBUG({
5135         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5136                << " cannot be privatized in the context of its parent ("
5137                << Arg->getParent()->getName()
5138                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5139                   "direct call of ("
5140                << ACS.getInstruction()->getCalledFunction()->getName()
5141                << ").\n[AAPrivatizablePtr] for which the argument "
5142                   "privatization is not compatible.\n";
5143       });
5144       return false;
5145     };
5146 
5147     // Helper to check if the associated argument is used at the given abstract
5148     // call site in a way that is incompatible with the privatization assumed
5149     // here.
5150     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5151       if (ACS.isDirectCall())
5152         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5153       if (ACS.isCallbackCall())
5154         return IsCompatiblePrivArgOfDirectCS(ACS);
5155       return false;
5156     };
5157 
5158     bool AllCallSitesKnown;
5159     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5160                                 AllCallSitesKnown))
5161       return indicatePessimisticFixpoint();
5162 
5163     return ChangeStatus::UNCHANGED;
5164   }
5165 
5166   /// Given a type to private \p PrivType, collect the constituates (which are
5167   /// used) in \p ReplacementTypes.
5168   static void
5169   identifyReplacementTypes(Type *PrivType,
5170                            SmallVectorImpl<Type *> &ReplacementTypes) {
5171     // TODO: For now we expand the privatization type to the fullest which can
5172     //       lead to dead arguments that need to be removed later.
5173     assert(PrivType && "Expected privatizable type!");
5174 
5175     // Traverse the type, extract constituate types on the outermost level.
5176     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5177       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5178         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5179     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5180       ReplacementTypes.append(PrivArrayType->getNumElements(),
5181                               PrivArrayType->getElementType());
5182     } else {
5183       ReplacementTypes.push_back(PrivType);
5184     }
5185   }
5186 
5187   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5188   /// The values needed are taken from the arguments of \p F starting at
5189   /// position \p ArgNo.
5190   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5191                                    unsigned ArgNo, Instruction &IP) {
5192     assert(PrivType && "Expected privatizable type!");
5193 
5194     IRBuilder<NoFolder> IRB(&IP);
5195     const DataLayout &DL = F.getParent()->getDataLayout();
5196 
5197     // Traverse the type, build GEPs and stores.
5198     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5199       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5200       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5201         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5202         Value *Ptr = constructPointer(
5203             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5204         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5205       }
5206     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5207       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5208       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5209       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5210         Value *Ptr =
5211             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5212         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5213       }
5214     } else {
5215       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5216     }
5217   }
5218 
5219   /// Extract values from \p Base according to the type \p PrivType at the
5220   /// call position \p ACS. The values are appended to \p ReplacementValues.
5221   void createReplacementValues(Type *PrivType, AbstractCallSite ACS,
5222                                Value *Base,
5223                                SmallVectorImpl<Value *> &ReplacementValues) {
5224     assert(Base && "Expected base value!");
5225     assert(PrivType && "Expected privatizable type!");
5226     Instruction *IP = ACS.getInstruction();
5227 
5228     IRBuilder<NoFolder> IRB(IP);
5229     const DataLayout &DL = IP->getModule()->getDataLayout();
5230 
5231     if (Base->getType()->getPointerElementType() != PrivType)
5232       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5233                                                  "", ACS.getInstruction());
5234 
5235     // TODO: Improve the alignment of the loads.
5236     // Traverse the type, build GEPs and loads.
5237     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5238       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5239       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5240         Type *PointeeTy = PrivStructType->getElementType(u);
5241         Value *Ptr =
5242             constructPointer(PointeeTy->getPointerTo(), Base,
5243                              PrivStructLayout->getElementOffset(u), IRB, DL);
5244         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5245         L->setAlignment(Align(1));
5246         ReplacementValues.push_back(L);
5247       }
5248     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5249       Type *PointeeTy = PrivArrayType->getElementType();
5250       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5251       Type *PointeePtrTy = PointeeTy->getPointerTo();
5252       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5253         Value *Ptr =
5254             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5255         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5256         L->setAlignment(Align(1));
5257         ReplacementValues.push_back(L);
5258       }
5259     } else {
5260       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5261       L->setAlignment(Align(1));
5262       ReplacementValues.push_back(L);
5263     }
5264   }
5265 
5266   /// See AbstractAttribute::manifest(...)
5267   ChangeStatus manifest(Attributor &A) override {
5268     if (!PrivatizableType.hasValue())
5269       return ChangeStatus::UNCHANGED;
5270     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5271 
5272     // Collect all tail calls in the function as we cannot allow new allocas to
5273     // escape into tail recursion.
5274     // TODO: Be smarter about new allocas escaping into tail calls.
5275     SmallVector<CallInst *, 16> TailCalls;
5276     if (!A.checkForAllInstructions(
5277             [&](Instruction &I) {
5278               CallInst &CI = cast<CallInst>(I);
5279               if (CI.isTailCall())
5280                 TailCalls.push_back(&CI);
5281               return true;
5282             },
5283             *this, {Instruction::Call}))
5284       return ChangeStatus::UNCHANGED;
5285 
5286     Argument *Arg = getAssociatedArgument();
5287 
5288     // Callback to repair the associated function. A new alloca is placed at the
5289     // beginning and initialized with the values passed through arguments. The
5290     // new alloca replaces the use of the old pointer argument.
5291     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5292         [=](const Attributor::ArgumentReplacementInfo &ARI,
5293             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5294           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5295           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5296           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5297                                     Arg->getName() + ".priv", IP);
5298           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5299                                ArgIt->getArgNo(), *IP);
5300           Arg->replaceAllUsesWith(AI);
5301 
5302           for (CallInst *CI : TailCalls)
5303             CI->setTailCall(false);
5304         };
5305 
5306     // Callback to repair a call site of the associated function. The elements
5307     // of the privatizable type are loaded prior to the call and passed to the
5308     // new function version.
5309     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5310         [=](const Attributor::ArgumentReplacementInfo &ARI,
5311             AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
5312           createReplacementValues(
5313               PrivatizableType.getValue(), ACS,
5314               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5315               NewArgOperands);
5316         };
5317 
5318     // Collect the types that will replace the privatizable type in the function
5319     // signature.
5320     SmallVector<Type *, 16> ReplacementTypes;
5321     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5322 
5323     // Register a rewrite of the argument.
5324     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5325                                            std::move(FnRepairCB),
5326                                            std::move(ACSRepairCB)))
5327       return ChangeStatus::CHANGED;
5328     return ChangeStatus::UNCHANGED;
5329   }
5330 
5331   /// See AbstractAttribute::trackStatistics()
5332   void trackStatistics() const override {
5333     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5334   }
5335 };
5336 
5337 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5338   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5339       : AAPrivatizablePtrImpl(IRP, A) {}
5340 
5341   /// See AbstractAttribute::initialize(...).
5342   virtual void initialize(Attributor &A) override {
5343     // TODO: We can privatize more than arguments.
5344     indicatePessimisticFixpoint();
5345   }
5346 
5347   ChangeStatus updateImpl(Attributor &A) override {
5348     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5349                      "updateImpl will not be called");
5350   }
5351 
5352   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5353   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5354     Value *Obj =
5355         GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
5356     if (!Obj) {
5357       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5358       return nullptr;
5359     }
5360 
5361     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5362       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5363         if (CI->isOne())
5364           return Obj->getType()->getPointerElementType();
5365     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5366       auto &PrivArgAA =
5367           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5368       if (PrivArgAA.isAssumedPrivatizablePtr())
5369         return Obj->getType()->getPointerElementType();
5370     }
5371 
5372     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5373                          "alloca nor privatizable argument: "
5374                       << *Obj << "!\n");
5375     return nullptr;
5376   }
5377 
5378   /// See AbstractAttribute::trackStatistics()
5379   void trackStatistics() const override {
5380     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5381   }
5382 };
5383 
5384 struct AAPrivatizablePtrCallSiteArgument final
5385     : public AAPrivatizablePtrFloating {
5386   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5387       : AAPrivatizablePtrFloating(IRP, A) {}
5388 
5389   /// See AbstractAttribute::initialize(...).
5390   void initialize(Attributor &A) override {
5391     if (getIRPosition().hasAttr(Attribute::ByVal))
5392       indicateOptimisticFixpoint();
5393   }
5394 
5395   /// See AbstractAttribute::updateImpl(...).
5396   ChangeStatus updateImpl(Attributor &A) override {
5397     PrivatizableType = identifyPrivatizableType(A);
5398     if (!PrivatizableType.hasValue())
5399       return ChangeStatus::UNCHANGED;
5400     if (!PrivatizableType.getValue())
5401       return indicatePessimisticFixpoint();
5402 
5403     const IRPosition &IRP = getIRPosition();
5404     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5405     if (!NoCaptureAA.isAssumedNoCapture()) {
5406       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5407       return indicatePessimisticFixpoint();
5408     }
5409 
5410     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5411     if (!NoAliasAA.isAssumedNoAlias()) {
5412       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5413       return indicatePessimisticFixpoint();
5414     }
5415 
5416     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5417     if (!MemBehaviorAA.isAssumedReadOnly()) {
5418       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5419       return indicatePessimisticFixpoint();
5420     }
5421 
5422     return ChangeStatus::UNCHANGED;
5423   }
5424 
5425   /// See AbstractAttribute::trackStatistics()
5426   void trackStatistics() const override {
5427     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5428   }
5429 };
5430 
5431 struct AAPrivatizablePtrCallSiteReturned final
5432     : public AAPrivatizablePtrFloating {
5433   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5434       : AAPrivatizablePtrFloating(IRP, A) {}
5435 
5436   /// See AbstractAttribute::initialize(...).
5437   void initialize(Attributor &A) override {
5438     // TODO: We can privatize more than arguments.
5439     indicatePessimisticFixpoint();
5440   }
5441 
5442   /// See AbstractAttribute::trackStatistics()
5443   void trackStatistics() const override {
5444     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5445   }
5446 };
5447 
5448 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5449   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5450       : AAPrivatizablePtrFloating(IRP, A) {}
5451 
5452   /// See AbstractAttribute::initialize(...).
5453   void initialize(Attributor &A) override {
5454     // TODO: We can privatize more than arguments.
5455     indicatePessimisticFixpoint();
5456   }
5457 
5458   /// See AbstractAttribute::trackStatistics()
5459   void trackStatistics() const override {
5460     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5461   }
5462 };
5463 
5464 /// -------------------- Memory Behavior Attributes ----------------------------
5465 /// Includes read-none, read-only, and write-only.
5466 /// ----------------------------------------------------------------------------
5467 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5468   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5469       : AAMemoryBehavior(IRP, A) {}
5470 
5471   /// See AbstractAttribute::initialize(...).
5472   void initialize(Attributor &A) override {
5473     intersectAssumedBits(BEST_STATE);
5474     getKnownStateFromValue(getIRPosition(), getState());
5475     IRAttribute::initialize(A);
5476   }
5477 
5478   /// Return the memory behavior information encoded in the IR for \p IRP.
5479   static void getKnownStateFromValue(const IRPosition &IRP,
5480                                      BitIntegerState &State,
5481                                      bool IgnoreSubsumingPositions = false) {
5482     SmallVector<Attribute, 2> Attrs;
5483     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5484     for (const Attribute &Attr : Attrs) {
5485       switch (Attr.getKindAsEnum()) {
5486       case Attribute::ReadNone:
5487         State.addKnownBits(NO_ACCESSES);
5488         break;
5489       case Attribute::ReadOnly:
5490         State.addKnownBits(NO_WRITES);
5491         break;
5492       case Attribute::WriteOnly:
5493         State.addKnownBits(NO_READS);
5494         break;
5495       default:
5496         llvm_unreachable("Unexpected attribute!");
5497       }
5498     }
5499 
5500     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5501       if (!I->mayReadFromMemory())
5502         State.addKnownBits(NO_READS);
5503       if (!I->mayWriteToMemory())
5504         State.addKnownBits(NO_WRITES);
5505     }
5506   }
5507 
5508   /// See AbstractAttribute::getDeducedAttributes(...).
5509   void getDeducedAttributes(LLVMContext &Ctx,
5510                             SmallVectorImpl<Attribute> &Attrs) const override {
5511     assert(Attrs.size() == 0);
5512     if (isAssumedReadNone())
5513       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5514     else if (isAssumedReadOnly())
5515       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5516     else if (isAssumedWriteOnly())
5517       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5518     assert(Attrs.size() <= 1);
5519   }
5520 
5521   /// See AbstractAttribute::manifest(...).
5522   ChangeStatus manifest(Attributor &A) override {
5523     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5524       return ChangeStatus::UNCHANGED;
5525 
5526     const IRPosition &IRP = getIRPosition();
5527 
5528     // Check if we would improve the existing attributes first.
5529     SmallVector<Attribute, 4> DeducedAttrs;
5530     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5531     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5532           return IRP.hasAttr(Attr.getKindAsEnum(),
5533                              /* IgnoreSubsumingPositions */ true);
5534         }))
5535       return ChangeStatus::UNCHANGED;
5536 
5537     // Clear existing attributes.
5538     IRP.removeAttrs(AttrKinds);
5539 
5540     // Use the generic manifest method.
5541     return IRAttribute::manifest(A);
5542   }
5543 
5544   /// See AbstractState::getAsStr().
5545   const std::string getAsStr() const override {
5546     if (isAssumedReadNone())
5547       return "readnone";
5548     if (isAssumedReadOnly())
5549       return "readonly";
5550     if (isAssumedWriteOnly())
5551       return "writeonly";
5552     return "may-read/write";
5553   }
5554 
5555   /// The set of IR attributes AAMemoryBehavior deals with.
5556   static const Attribute::AttrKind AttrKinds[3];
5557 };
5558 
5559 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5560     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5561 
5562 /// Memory behavior attribute for a floating value.
5563 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5564   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5565       : AAMemoryBehaviorImpl(IRP, A) {}
5566 
5567   /// See AbstractAttribute::initialize(...).
5568   void initialize(Attributor &A) override {
5569     AAMemoryBehaviorImpl::initialize(A);
5570     // Initialize the use vector with all direct uses of the associated value.
5571     for (const Use &U : getAssociatedValue().uses())
5572       Uses.insert(&U);
5573   }
5574 
5575   /// See AbstractAttribute::updateImpl(...).
5576   ChangeStatus updateImpl(Attributor &A) override;
5577 
5578   /// See AbstractAttribute::trackStatistics()
5579   void trackStatistics() const override {
5580     if (isAssumedReadNone())
5581       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5582     else if (isAssumedReadOnly())
5583       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5584     else if (isAssumedWriteOnly())
5585       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5586   }
5587 
5588 private:
5589   /// Return true if users of \p UserI might access the underlying
5590   /// variable/location described by \p U and should therefore be analyzed.
5591   bool followUsersOfUseIn(Attributor &A, const Use *U,
5592                           const Instruction *UserI);
5593 
5594   /// Update the state according to the effect of use \p U in \p UserI.
5595   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5596 
5597 protected:
5598   /// Container for (transitive) uses of the associated argument.
5599   SetVector<const Use *> Uses;
5600 };
5601 
5602 /// Memory behavior attribute for function argument.
5603 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5604   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
5605       : AAMemoryBehaviorFloating(IRP, A) {}
5606 
5607   /// See AbstractAttribute::initialize(...).
5608   void initialize(Attributor &A) override {
5609     intersectAssumedBits(BEST_STATE);
5610     const IRPosition &IRP = getIRPosition();
5611     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5612     // can query it when we use has/getAttr. That would allow us to reuse the
5613     // initialize of the base class here.
5614     bool HasByVal =
5615         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5616     getKnownStateFromValue(IRP, getState(),
5617                            /* IgnoreSubsumingPositions */ HasByVal);
5618 
5619     // Initialize the use vector with all direct uses of the associated value.
5620     Argument *Arg = getAssociatedArgument();
5621     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5622       indicatePessimisticFixpoint();
5623     } else {
5624       // Initialize the use vector with all direct uses of the associated value.
5625       for (const Use &U : Arg->uses())
5626         Uses.insert(&U);
5627     }
5628   }
5629 
5630   ChangeStatus manifest(Attributor &A) override {
5631     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5632     if (!getAssociatedValue().getType()->isPointerTy())
5633       return ChangeStatus::UNCHANGED;
5634 
5635     // TODO: From readattrs.ll: "inalloca parameters are always
5636     //                           considered written"
5637     if (hasAttr({Attribute::InAlloca})) {
5638       removeKnownBits(NO_WRITES);
5639       removeAssumedBits(NO_WRITES);
5640     }
5641     return AAMemoryBehaviorFloating::manifest(A);
5642   }
5643 
5644   /// See AbstractAttribute::trackStatistics()
5645   void trackStatistics() const override {
5646     if (isAssumedReadNone())
5647       STATS_DECLTRACK_ARG_ATTR(readnone)
5648     else if (isAssumedReadOnly())
5649       STATS_DECLTRACK_ARG_ATTR(readonly)
5650     else if (isAssumedWriteOnly())
5651       STATS_DECLTRACK_ARG_ATTR(writeonly)
5652   }
5653 };
5654 
5655 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5656   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
5657       : AAMemoryBehaviorArgument(IRP, A) {}
5658 
5659   /// See AbstractAttribute::initialize(...).
5660   void initialize(Attributor &A) override {
5661     if (Argument *Arg = getAssociatedArgument()) {
5662       if (Arg->hasByValAttr()) {
5663         addKnownBits(NO_WRITES);
5664         removeKnownBits(NO_READS);
5665         removeAssumedBits(NO_READS);
5666       }
5667     } else {
5668     }
5669     AAMemoryBehaviorArgument::initialize(A);
5670   }
5671 
5672   /// See AbstractAttribute::updateImpl(...).
5673   ChangeStatus updateImpl(Attributor &A) override {
5674     // TODO: Once we have call site specific value information we can provide
5675     //       call site specific liveness liveness information and then it makes
5676     //       sense to specialize attributes for call sites arguments instead of
5677     //       redirecting requests to the callee argument.
5678     Argument *Arg = getAssociatedArgument();
5679     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5680     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5681     return clampStateAndIndicateChange(
5682         getState(),
5683         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
5684   }
5685 
5686   /// See AbstractAttribute::trackStatistics()
5687   void trackStatistics() const override {
5688     if (isAssumedReadNone())
5689       STATS_DECLTRACK_CSARG_ATTR(readnone)
5690     else if (isAssumedReadOnly())
5691       STATS_DECLTRACK_CSARG_ATTR(readonly)
5692     else if (isAssumedWriteOnly())
5693       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5694   }
5695 };
5696 
5697 /// Memory behavior attribute for a call site return position.
5698 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5699   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
5700       : AAMemoryBehaviorFloating(IRP, A) {}
5701 
5702   /// See AbstractAttribute::manifest(...).
5703   ChangeStatus manifest(Attributor &A) override {
5704     // We do not annotate returned values.
5705     return ChangeStatus::UNCHANGED;
5706   }
5707 
5708   /// See AbstractAttribute::trackStatistics()
5709   void trackStatistics() const override {}
5710 };
5711 
5712 /// An AA to represent the memory behavior function attributes.
5713 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
5714   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
5715       : AAMemoryBehaviorImpl(IRP, A) {}
5716 
5717   /// See AbstractAttribute::updateImpl(Attributor &A).
5718   virtual ChangeStatus updateImpl(Attributor &A) override;
5719 
5720   /// See AbstractAttribute::manifest(...).
5721   ChangeStatus manifest(Attributor &A) override {
5722     Function &F = cast<Function>(getAnchorValue());
5723     if (isAssumedReadNone()) {
5724       F.removeFnAttr(Attribute::ArgMemOnly);
5725       F.removeFnAttr(Attribute::InaccessibleMemOnly);
5726       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
5727     }
5728     return AAMemoryBehaviorImpl::manifest(A);
5729   }
5730 
5731   /// See AbstractAttribute::trackStatistics()
5732   void trackStatistics() const override {
5733     if (isAssumedReadNone())
5734       STATS_DECLTRACK_FN_ATTR(readnone)
5735     else if (isAssumedReadOnly())
5736       STATS_DECLTRACK_FN_ATTR(readonly)
5737     else if (isAssumedWriteOnly())
5738       STATS_DECLTRACK_FN_ATTR(writeonly)
5739   }
5740 };
5741 
5742 /// AAMemoryBehavior attribute for call sites.
5743 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
5744   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
5745       : AAMemoryBehaviorImpl(IRP, A) {}
5746 
5747   /// See AbstractAttribute::initialize(...).
5748   void initialize(Attributor &A) override {
5749     AAMemoryBehaviorImpl::initialize(A);
5750     Function *F = getAssociatedFunction();
5751     if (!F || !A.isFunctionIPOAmendable(*F))
5752       indicatePessimisticFixpoint();
5753   }
5754 
5755   /// See AbstractAttribute::updateImpl(...).
5756   ChangeStatus updateImpl(Attributor &A) override {
5757     // TODO: Once we have call site specific value information we can provide
5758     //       call site specific liveness liveness information and then it makes
5759     //       sense to specialize attributes for call sites arguments instead of
5760     //       redirecting requests to the callee argument.
5761     Function *F = getAssociatedFunction();
5762     const IRPosition &FnPos = IRPosition::function(*F);
5763     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5764     return clampStateAndIndicateChange(
5765         getState(),
5766         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
5767   }
5768 
5769   /// See AbstractAttribute::trackStatistics()
5770   void trackStatistics() const override {
5771     if (isAssumedReadNone())
5772       STATS_DECLTRACK_CS_ATTR(readnone)
5773     else if (isAssumedReadOnly())
5774       STATS_DECLTRACK_CS_ATTR(readonly)
5775     else if (isAssumedWriteOnly())
5776       STATS_DECLTRACK_CS_ATTR(writeonly)
5777   }
5778 };
5779 
5780 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
5781 
5782   // The current assumed state used to determine a change.
5783   auto AssumedState = getAssumed();
5784 
5785   auto CheckRWInst = [&](Instruction &I) {
5786     // If the instruction has an own memory behavior state, use it to restrict
5787     // the local state. No further analysis is required as the other memory
5788     // state is as optimistic as it gets.
5789     if (const auto *CB = dyn_cast<CallBase>(&I)) {
5790       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5791           *this, IRPosition::callsite_function(*CB));
5792       intersectAssumedBits(MemBehaviorAA.getAssumed());
5793       return !isAtFixpoint();
5794     }
5795 
5796     // Remove access kind modifiers if necessary.
5797     if (I.mayReadFromMemory())
5798       removeAssumedBits(NO_READS);
5799     if (I.mayWriteToMemory())
5800       removeAssumedBits(NO_WRITES);
5801     return !isAtFixpoint();
5802   };
5803 
5804   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5805     return indicatePessimisticFixpoint();
5806 
5807   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5808                                         : ChangeStatus::UNCHANGED;
5809 }
5810 
5811 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5812 
5813   const IRPosition &IRP = getIRPosition();
5814   const IRPosition &FnPos = IRPosition::function_scope(IRP);
5815   AAMemoryBehavior::StateType &S = getState();
5816 
5817   // First, check the function scope. We take the known information and we avoid
5818   // work if the assumed information implies the current assumed information for
5819   // this attribute. This is a valid for all but byval arguments.
5820   Argument *Arg = IRP.getAssociatedArgument();
5821   AAMemoryBehavior::base_t FnMemAssumedState =
5822       AAMemoryBehavior::StateType::getWorstState();
5823   if (!Arg || !Arg->hasByValAttr()) {
5824     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
5825         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5826     FnMemAssumedState = FnMemAA.getAssumed();
5827     S.addKnownBits(FnMemAA.getKnown());
5828     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
5829       return ChangeStatus::UNCHANGED;
5830   }
5831 
5832   // Make sure the value is not captured (except through "return"), if
5833   // it is, any information derived would be irrelevant anyway as we cannot
5834   // check the potential aliases introduced by the capture. However, no need
5835   // to fall back to anythign less optimistic than the function state.
5836   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5837       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5838   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5839     S.intersectAssumedBits(FnMemAssumedState);
5840     return ChangeStatus::CHANGED;
5841   }
5842 
5843   // The current assumed state used to determine a change.
5844   auto AssumedState = S.getAssumed();
5845 
5846   // Liveness information to exclude dead users.
5847   // TODO: Take the FnPos once we have call site specific liveness information.
5848   const auto &LivenessAA = A.getAAFor<AAIsDead>(
5849       *this, IRPosition::function(*IRP.getAssociatedFunction()),
5850       /* TrackDependence */ false);
5851 
5852   // Visit and expand uses until all are analyzed or a fixpoint is reached.
5853   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
5854     const Use *U = Uses[i];
5855     Instruction *UserI = cast<Instruction>(U->getUser());
5856     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
5857                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
5858                       << "]\n");
5859     if (A.isAssumedDead(*U, this, &LivenessAA))
5860       continue;
5861 
5862     // Droppable users, e.g., llvm::assume does not actually perform any action.
5863     if (UserI->isDroppable())
5864       continue;
5865 
5866     // Check if the users of UserI should also be visited.
5867     if (followUsersOfUseIn(A, U, UserI))
5868       for (const Use &UserIUse : UserI->uses())
5869         Uses.insert(&UserIUse);
5870 
5871     // If UserI might touch memory we analyze the use in detail.
5872     if (UserI->mayReadOrWriteMemory())
5873       analyzeUseIn(A, U, UserI);
5874   }
5875 
5876   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5877                                         : ChangeStatus::UNCHANGED;
5878 }
5879 
5880 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
5881                                                   const Instruction *UserI) {
5882   // The loaded value is unrelated to the pointer argument, no need to
5883   // follow the users of the load.
5884   if (isa<LoadInst>(UserI))
5885     return false;
5886 
5887   // By default we follow all uses assuming UserI might leak information on U,
5888   // we have special handling for call sites operands though.
5889   const auto *CB = dyn_cast<CallBase>(UserI);
5890   if (!CB || !CB->isArgOperand(U))
5891     return true;
5892 
5893   // If the use is a call argument known not to be captured, the users of
5894   // the call do not need to be visited because they have to be unrelated to
5895   // the input. Note that this check is not trivial even though we disallow
5896   // general capturing of the underlying argument. The reason is that the
5897   // call might the argument "through return", which we allow and for which we
5898   // need to check call users.
5899   if (U->get()->getType()->isPointerTy()) {
5900     unsigned ArgNo = CB->getArgOperandNo(U);
5901     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5902         *this, IRPosition::callsite_argument(*CB, ArgNo),
5903         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5904     return !ArgNoCaptureAA.isAssumedNoCapture();
5905   }
5906 
5907   return true;
5908 }
5909 
5910 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
5911                                             const Instruction *UserI) {
5912   assert(UserI->mayReadOrWriteMemory());
5913 
5914   switch (UserI->getOpcode()) {
5915   default:
5916     // TODO: Handle all atomics and other side-effect operations we know of.
5917     break;
5918   case Instruction::Load:
5919     // Loads cause the NO_READS property to disappear.
5920     removeAssumedBits(NO_READS);
5921     return;
5922 
5923   case Instruction::Store:
5924     // Stores cause the NO_WRITES property to disappear if the use is the
5925     // pointer operand. Note that we do assume that capturing was taken care of
5926     // somewhere else.
5927     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
5928       removeAssumedBits(NO_WRITES);
5929     return;
5930 
5931   case Instruction::Call:
5932   case Instruction::CallBr:
5933   case Instruction::Invoke: {
5934     // For call sites we look at the argument memory behavior attribute (this
5935     // could be recursive!) in order to restrict our own state.
5936     const auto *CB = cast<CallBase>(UserI);
5937 
5938     // Give up on operand bundles.
5939     if (CB->isBundleOperand(U)) {
5940       indicatePessimisticFixpoint();
5941       return;
5942     }
5943 
5944     // Calling a function does read the function pointer, maybe write it if the
5945     // function is self-modifying.
5946     if (CB->isCallee(U)) {
5947       removeAssumedBits(NO_READS);
5948       break;
5949     }
5950 
5951     // Adjust the possible access behavior based on the information on the
5952     // argument.
5953     IRPosition Pos;
5954     if (U->get()->getType()->isPointerTy())
5955       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
5956     else
5957       Pos = IRPosition::callsite_function(*CB);
5958     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5959         *this, Pos,
5960         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5961     // "assumed" has at most the same bits as the MemBehaviorAA assumed
5962     // and at least "known".
5963     intersectAssumedBits(MemBehaviorAA.getAssumed());
5964     return;
5965   }
5966   };
5967 
5968   // Generally, look at the "may-properties" and adjust the assumed state if we
5969   // did not trigger special handling before.
5970   if (UserI->mayReadFromMemory())
5971     removeAssumedBits(NO_READS);
5972   if (UserI->mayWriteToMemory())
5973     removeAssumedBits(NO_WRITES);
5974 }
5975 
5976 } // namespace
5977 
5978 /// -------------------- Memory Locations Attributes ---------------------------
5979 /// Includes read-none, argmemonly, inaccessiblememonly,
5980 /// inaccessiblememorargmemonly
5981 /// ----------------------------------------------------------------------------
5982 
5983 std::string AAMemoryLocation::getMemoryLocationsAsStr(
5984     AAMemoryLocation::MemoryLocationsKind MLK) {
5985   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
5986     return "all memory";
5987   if (MLK == AAMemoryLocation::NO_LOCATIONS)
5988     return "no memory";
5989   std::string S = "memory:";
5990   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
5991     S += "stack,";
5992   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
5993     S += "constant,";
5994   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
5995     S += "internal global,";
5996   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
5997     S += "external global,";
5998   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
5999     S += "argument,";
6000   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6001     S += "inaccessible,";
6002   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6003     S += "malloced,";
6004   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6005     S += "unknown,";
6006   S.pop_back();
6007   return S;
6008 }
6009 
6010 struct AAMemoryLocationImpl : public AAMemoryLocation {
6011 
6012   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6013       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {}
6014 
6015   ~AAMemoryLocationImpl() {
6016     // The AccessSets are allocated via a BumpPtrAllocator, we call
6017     // the destructor manually.
6018     for (auto &It : AccessKind2Accesses)
6019       It.getSecond()->~AccessSet();
6020   }
6021 
6022   /// See AbstractAttribute::initialize(...).
6023   void initialize(Attributor &A) override {
6024     intersectAssumedBits(BEST_STATE);
6025     getKnownStateFromValue(getIRPosition(), getState());
6026     IRAttribute::initialize(A);
6027   }
6028 
6029   /// Return the memory behavior information encoded in the IR for \p IRP.
6030   static void getKnownStateFromValue(const IRPosition &IRP,
6031                                      BitIntegerState &State,
6032                                      bool IgnoreSubsumingPositions = false) {
6033     SmallVector<Attribute, 2> Attrs;
6034     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6035     for (const Attribute &Attr : Attrs) {
6036       switch (Attr.getKindAsEnum()) {
6037       case Attribute::ReadNone:
6038         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6039         break;
6040       case Attribute::InaccessibleMemOnly:
6041         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6042         break;
6043       case Attribute::ArgMemOnly:
6044         State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6045         break;
6046       case Attribute::InaccessibleMemOrArgMemOnly:
6047         State.addKnownBits(
6048             inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6049         break;
6050       default:
6051         llvm_unreachable("Unexpected attribute!");
6052       }
6053     }
6054   }
6055 
6056   /// See AbstractAttribute::getDeducedAttributes(...).
6057   void getDeducedAttributes(LLVMContext &Ctx,
6058                             SmallVectorImpl<Attribute> &Attrs) const override {
6059     assert(Attrs.size() == 0);
6060     if (isAssumedReadNone()) {
6061       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6062     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6063       if (isAssumedInaccessibleMemOnly())
6064         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6065       else if (isAssumedArgMemOnly())
6066         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6067       else if (isAssumedInaccessibleOrArgMemOnly())
6068         Attrs.push_back(
6069             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6070     }
6071     assert(Attrs.size() <= 1);
6072   }
6073 
6074   /// See AbstractAttribute::manifest(...).
6075   ChangeStatus manifest(Attributor &A) override {
6076     const IRPosition &IRP = getIRPosition();
6077 
6078     // Check if we would improve the existing attributes first.
6079     SmallVector<Attribute, 4> DeducedAttrs;
6080     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6081     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6082           return IRP.hasAttr(Attr.getKindAsEnum(),
6083                              /* IgnoreSubsumingPositions */ true);
6084         }))
6085       return ChangeStatus::UNCHANGED;
6086 
6087     // Clear existing attributes.
6088     IRP.removeAttrs(AttrKinds);
6089     if (isAssumedReadNone())
6090       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6091 
6092     // Use the generic manifest method.
6093     return IRAttribute::manifest(A);
6094   }
6095 
6096   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6097   bool checkForAllAccessesToMemoryKind(
6098       function_ref<bool(const Instruction *, const Value *, AccessKind,
6099                         MemoryLocationsKind)>
6100           Pred,
6101       MemoryLocationsKind RequestedMLK) const override {
6102     if (!isValidState())
6103       return false;
6104 
6105     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6106     if (AssumedMLK == NO_LOCATIONS)
6107       return true;
6108 
6109     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6110       if (CurMLK & RequestedMLK)
6111         continue;
6112 
6113       if (const AccessSet *Accesses = AccessKind2Accesses.lookup(CurMLK))
6114         for (const AccessInfo &AI : *Accesses)
6115           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6116             return false;
6117     }
6118 
6119     return true;
6120   }
6121 
6122   ChangeStatus indicatePessimisticFixpoint() override {
6123     // If we give up and indicate a pessimistic fixpoint this instruction will
6124     // become an access for all potential access kinds:
6125     // TODO: Add pointers for argmemonly and globals to improve the results of
6126     //       checkForAllAccessesToMemoryKind.
6127     bool Changed = false;
6128     MemoryLocationsKind KnownMLK = getKnown();
6129     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6130     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6131       if (!(CurMLK & KnownMLK))
6132         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed);
6133     return AAMemoryLocation::indicatePessimisticFixpoint();
6134   }
6135 
6136 protected:
6137   /// Helper struct to tie together an instruction that has a read or write
6138   /// effect with the pointer it accesses (if any).
6139   struct AccessInfo {
6140 
6141     /// The instruction that caused the access.
6142     const Instruction *I;
6143 
6144     /// The base pointer that is accessed, or null if unknown.
6145     const Value *Ptr;
6146 
6147     /// The kind of access (read/write/read+write).
6148     AccessKind Kind;
6149 
6150     bool operator==(const AccessInfo &RHS) const {
6151       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6152     }
6153     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6154       if (LHS.I != RHS.I)
6155         return LHS.I < RHS.I;
6156       if (LHS.Ptr != RHS.Ptr)
6157         return LHS.Ptr < RHS.Ptr;
6158       if (LHS.Kind != RHS.Kind)
6159         return LHS.Kind < RHS.Kind;
6160       return false;
6161     }
6162   };
6163 
6164   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6165   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6166   using AccessSet = SmallSet<AccessInfo, 8, AccessInfo>;
6167   using AccessKind2AccessSetTy = DenseMap<unsigned, AccessSet *>;
6168   AccessKind2AccessSetTy AccessKind2Accesses;
6169 
6170   /// Return the kind(s) of location that may be accessed by \p V.
6171   AAMemoryLocation::MemoryLocationsKind
6172   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6173 
6174   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6175   /// an access to a \p MLK memory location with the access pointer \p Ptr.
6176   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6177                                  MemoryLocationsKind MLK, const Instruction *I,
6178                                  const Value *Ptr, bool &Changed) {
6179     // TODO: The kind should be determined at the call sites based on the
6180     // information we have there.
6181     AccessKind Kind = READ_WRITE;
6182     if (I) {
6183       Kind = I->mayReadFromMemory() ? READ : NONE;
6184       Kind = AccessKind(Kind | (I->mayWriteToMemory() ? WRITE : NONE));
6185     }
6186 
6187     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6188     auto *&Accesses = AccessKind2Accesses[MLK];
6189     if (!Accesses)
6190       Accesses = new (Allocator) AccessSet();
6191     Changed |= Accesses->insert(AccessInfo{I, Ptr, Kind}).second;
6192     State.removeAssumedBits(MLK);
6193   }
6194 
6195   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6196   /// arguments, and update the state and access map accordingly.
6197   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6198                           AAMemoryLocation::StateType &State, bool &Changed);
6199 
6200   /// Used to allocate access sets.
6201   BumpPtrAllocator &Allocator;
6202 
6203   /// The set of IR attributes AAMemoryLocation deals with.
6204   static const Attribute::AttrKind AttrKinds[4];
6205 };
6206 
6207 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6208     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6209     Attribute::InaccessibleMemOrArgMemOnly};
6210 
6211 void AAMemoryLocationImpl::categorizePtrValue(
6212     Attributor &A, const Instruction &I, const Value &Ptr,
6213     AAMemoryLocation::StateType &State, bool &Changed) {
6214   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6215                     << Ptr << " ["
6216                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6217 
6218   auto StripGEPCB = [](Value *V) -> Value * {
6219     auto *GEP = dyn_cast<GEPOperator>(V);
6220     while (GEP) {
6221       V = GEP->getPointerOperand();
6222       GEP = dyn_cast<GEPOperator>(V);
6223     }
6224     return V;
6225   };
6226 
6227   auto VisitValueCB = [&](Value &V, const Instruction *,
6228                           AAMemoryLocation::StateType &T,
6229                           bool Stripped) -> bool {
6230     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6231     if (isa<UndefValue>(V))
6232       return true;
6233     if (auto *Arg = dyn_cast<Argument>(&V)) {
6234       if (Arg->hasByValAttr())
6235         updateStateAndAccessesMap(T, NO_LOCAL_MEM, &I, &V, Changed);
6236       else
6237         updateStateAndAccessesMap(T, NO_ARGUMENT_MEM, &I, &V, Changed);
6238       return true;
6239     }
6240     if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6241       if (GV->hasLocalLinkage())
6242         updateStateAndAccessesMap(T, NO_GLOBAL_INTERNAL_MEM, &I, &V, Changed);
6243       else
6244         updateStateAndAccessesMap(T, NO_GLOBAL_EXTERNAL_MEM, &I, &V, Changed);
6245       return true;
6246     }
6247     if (isa<AllocaInst>(V)) {
6248       updateStateAndAccessesMap(T, NO_LOCAL_MEM, &I, &V, Changed);
6249       return true;
6250     }
6251     if (const auto *CB = dyn_cast<CallBase>(&V)) {
6252       const auto &NoAliasAA =
6253           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB));
6254       if (NoAliasAA.isAssumedNoAlias()) {
6255         updateStateAndAccessesMap(T, NO_MALLOCED_MEM, &I, &V, Changed);
6256         return true;
6257       }
6258     }
6259 
6260     updateStateAndAccessesMap(T, NO_UNKOWN_MEM, &I, &V, Changed);
6261     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6262                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6263                       << "\n");
6264     return true;
6265   };
6266 
6267   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6268           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6269           /* MaxValues */ 32, StripGEPCB)) {
6270     LLVM_DEBUG(
6271         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6272     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed);
6273   } else {
6274     LLVM_DEBUG(
6275         dbgs()
6276         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6277         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6278   }
6279 }
6280 
6281 AAMemoryLocation::MemoryLocationsKind
6282 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6283                                                   bool &Changed) {
6284   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6285                     << I << "\n");
6286 
6287   AAMemoryLocation::StateType AccessedLocs;
6288   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6289 
6290   if (auto *CB = dyn_cast<CallBase>(&I)) {
6291 
6292     // First check if we assume any memory is access is visible.
6293     const auto &CBMemLocationAA =
6294         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB));
6295     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6296                       << " [" << CBMemLocationAA << "]\n");
6297 
6298     if (CBMemLocationAA.isAssumedReadNone())
6299       return NO_LOCATIONS;
6300 
6301     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6302       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6303                                 Changed);
6304       return AccessedLocs.getAssumed();
6305     }
6306 
6307     uint32_t CBAssumedNotAccessedLocs =
6308         CBMemLocationAA.getAssumedNotAccessedLocation();
6309 
6310     // Set the argmemonly and global bit as we handle them separately below.
6311     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6312         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6313 
6314     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6315       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6316         continue;
6317       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed);
6318     }
6319 
6320     // Now handle global memory if it might be accessed. This is slightly tricky
6321     // as NO_GLOBAL_MEM has multiple bits set.
6322     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6323     if (HasGlobalAccesses) {
6324       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6325                             AccessKind Kind, MemoryLocationsKind MLK) {
6326         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed);
6327         return true;
6328       };
6329       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6330               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6331         return AccessedLocs.getWorstState();
6332     }
6333 
6334     LLVM_DEBUG(
6335         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6336                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6337 
6338     // Now handle argument memory if it might be accessed.
6339     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6340     if (HasArgAccesses) {
6341       for (unsigned ArgNo = 0, E = CB->getNumArgOperands(); ArgNo < E;
6342            ++ArgNo) {
6343 
6344         // Skip non-pointer arguments.
6345         const Value *ArgOp = CB->getArgOperand(ArgNo);
6346         if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6347           continue;
6348 
6349         // Skip readnone arguments.
6350         const IRPosition &ArgOpIRP = IRPosition::callsite_argument(*CB, ArgNo);
6351         const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6352             *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6353 
6354         if (ArgOpMemLocationAA.isAssumedReadNone())
6355           continue;
6356 
6357         // Categorize potentially accessed pointer arguments as if there was an
6358         // access instruction with them as pointer.
6359         categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed);
6360       }
6361     }
6362 
6363     LLVM_DEBUG(
6364         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6365                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6366 
6367     return AccessedLocs.getAssumed();
6368   }
6369 
6370   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6371     LLVM_DEBUG(
6372         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6373                << I << " [" << *Ptr << "]\n");
6374     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6375     return AccessedLocs.getAssumed();
6376   }
6377 
6378   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6379                     << I << "\n");
6380   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed);
6381   return AccessedLocs.getAssumed();
6382 }
6383 
6384 /// An AA to represent the memory behavior function attributes.
6385 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6386   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6387       : AAMemoryLocationImpl(IRP, A) {}
6388 
6389   /// See AbstractAttribute::updateImpl(Attributor &A).
6390   virtual ChangeStatus updateImpl(Attributor &A) override {
6391 
6392     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6393         *this, getIRPosition(), /* TrackDependence */ false);
6394     if (MemBehaviorAA.isAssumedReadNone()) {
6395       if (MemBehaviorAA.isKnownReadNone())
6396         return indicateOptimisticFixpoint();
6397       assert(isAssumedReadNone() &&
6398              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6399       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6400       return ChangeStatus::UNCHANGED;
6401     }
6402 
6403     // The current assumed state used to determine a change.
6404     auto AssumedState = getAssumed();
6405     bool Changed = false;
6406 
6407     auto CheckRWInst = [&](Instruction &I) {
6408       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6409       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6410                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6411       removeAssumedBits(inverseLocation(MLK, false, false));
6412       return true;
6413     };
6414 
6415     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6416       return indicatePessimisticFixpoint();
6417 
6418     Changed |= AssumedState != getAssumed();
6419     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6420   }
6421 
6422   /// See AbstractAttribute::trackStatistics()
6423   void trackStatistics() const override {
6424     if (isAssumedReadNone())
6425       STATS_DECLTRACK_FN_ATTR(readnone)
6426     else if (isAssumedArgMemOnly())
6427       STATS_DECLTRACK_FN_ATTR(argmemonly)
6428     else if (isAssumedInaccessibleMemOnly())
6429       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6430     else if (isAssumedInaccessibleOrArgMemOnly())
6431       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6432   }
6433 };
6434 
6435 /// AAMemoryLocation attribute for call sites.
6436 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6437   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6438       : AAMemoryLocationImpl(IRP, A) {}
6439 
6440   /// See AbstractAttribute::initialize(...).
6441   void initialize(Attributor &A) override {
6442     AAMemoryLocationImpl::initialize(A);
6443     Function *F = getAssociatedFunction();
6444     if (!F || !A.isFunctionIPOAmendable(*F))
6445       indicatePessimisticFixpoint();
6446   }
6447 
6448   /// See AbstractAttribute::updateImpl(...).
6449   ChangeStatus updateImpl(Attributor &A) override {
6450     // TODO: Once we have call site specific value information we can provide
6451     //       call site specific liveness liveness information and then it makes
6452     //       sense to specialize attributes for call sites arguments instead of
6453     //       redirecting requests to the callee argument.
6454     Function *F = getAssociatedFunction();
6455     const IRPosition &FnPos = IRPosition::function(*F);
6456     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6457     bool Changed = false;
6458     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6459                           AccessKind Kind, MemoryLocationsKind MLK) {
6460       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed);
6461       return true;
6462     };
6463     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6464       return indicatePessimisticFixpoint();
6465     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6466   }
6467 
6468   /// See AbstractAttribute::trackStatistics()
6469   void trackStatistics() const override {
6470     if (isAssumedReadNone())
6471       STATS_DECLTRACK_CS_ATTR(readnone)
6472   }
6473 };
6474 
6475 /// ------------------ Value Constant Range Attribute -------------------------
6476 
6477 struct AAValueConstantRangeImpl : AAValueConstantRange {
6478   using StateType = IntegerRangeState;
6479   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6480       : AAValueConstantRange(IRP, A) {}
6481 
6482   /// See AbstractAttribute::getAsStr().
6483   const std::string getAsStr() const override {
6484     std::string Str;
6485     llvm::raw_string_ostream OS(Str);
6486     OS << "range(" << getBitWidth() << ")<";
6487     getKnown().print(OS);
6488     OS << " / ";
6489     getAssumed().print(OS);
6490     OS << ">";
6491     return OS.str();
6492   }
6493 
6494   /// Helper function to get a SCEV expr for the associated value at program
6495   /// point \p I.
6496   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6497     if (!getAnchorScope())
6498       return nullptr;
6499 
6500     ScalarEvolution *SE =
6501         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6502             *getAnchorScope());
6503 
6504     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6505         *getAnchorScope());
6506 
6507     if (!SE || !LI)
6508       return nullptr;
6509 
6510     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6511     if (!I)
6512       return S;
6513 
6514     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6515   }
6516 
6517   /// Helper function to get a range from SCEV for the associated value at
6518   /// program point \p I.
6519   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6520                                          const Instruction *I = nullptr) const {
6521     if (!getAnchorScope())
6522       return getWorstState(getBitWidth());
6523 
6524     ScalarEvolution *SE =
6525         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6526             *getAnchorScope());
6527 
6528     const SCEV *S = getSCEV(A, I);
6529     if (!SE || !S)
6530       return getWorstState(getBitWidth());
6531 
6532     return SE->getUnsignedRange(S);
6533   }
6534 
6535   /// Helper function to get a range from LVI for the associated value at
6536   /// program point \p I.
6537   ConstantRange
6538   getConstantRangeFromLVI(Attributor &A,
6539                           const Instruction *CtxI = nullptr) const {
6540     if (!getAnchorScope())
6541       return getWorstState(getBitWidth());
6542 
6543     LazyValueInfo *LVI =
6544         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6545             *getAnchorScope());
6546 
6547     if (!LVI || !CtxI)
6548       return getWorstState(getBitWidth());
6549     return LVI->getConstantRange(&getAssociatedValue(),
6550                                  const_cast<BasicBlock *>(CtxI->getParent()),
6551                                  const_cast<Instruction *>(CtxI));
6552   }
6553 
6554   /// See AAValueConstantRange::getKnownConstantRange(..).
6555   ConstantRange
6556   getKnownConstantRange(Attributor &A,
6557                         const Instruction *CtxI = nullptr) const override {
6558     if (!CtxI || CtxI == getCtxI())
6559       return getKnown();
6560 
6561     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6562     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6563     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6564   }
6565 
6566   /// See AAValueConstantRange::getAssumedConstantRange(..).
6567   ConstantRange
6568   getAssumedConstantRange(Attributor &A,
6569                           const Instruction *CtxI = nullptr) const override {
6570     // TODO: Make SCEV use Attributor assumption.
6571     //       We may be able to bound a variable range via assumptions in
6572     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6573     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6574 
6575     if (!CtxI || CtxI == getCtxI())
6576       return getAssumed();
6577 
6578     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6579     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6580     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6581   }
6582 
6583   /// See AbstractAttribute::initialize(..).
6584   void initialize(Attributor &A) override {
6585     // Intersect a range given by SCEV.
6586     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6587 
6588     // Intersect a range given by LVI.
6589     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6590   }
6591 
6592   /// Helper function to create MDNode for range metadata.
6593   static MDNode *
6594   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6595                             const ConstantRange &AssumedConstantRange) {
6596     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6597                                   Ty, AssumedConstantRange.getLower())),
6598                               ConstantAsMetadata::get(ConstantInt::get(
6599                                   Ty, AssumedConstantRange.getUpper()))};
6600     return MDNode::get(Ctx, LowAndHigh);
6601   }
6602 
6603   /// Return true if \p Assumed is included in \p KnownRanges.
6604   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6605 
6606     if (Assumed.isFullSet())
6607       return false;
6608 
6609     if (!KnownRanges)
6610       return true;
6611 
6612     // If multiple ranges are annotated in IR, we give up to annotate assumed
6613     // range for now.
6614 
6615     // TODO:  If there exists a known range which containts assumed range, we
6616     // can say assumed range is better.
6617     if (KnownRanges->getNumOperands() > 2)
6618       return false;
6619 
6620     ConstantInt *Lower =
6621         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6622     ConstantInt *Upper =
6623         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6624 
6625     ConstantRange Known(Lower->getValue(), Upper->getValue());
6626     return Known.contains(Assumed) && Known != Assumed;
6627   }
6628 
6629   /// Helper function to set range metadata.
6630   static bool
6631   setRangeMetadataIfisBetterRange(Instruction *I,
6632                                   const ConstantRange &AssumedConstantRange) {
6633     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6634     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6635       if (!AssumedConstantRange.isEmptySet()) {
6636         I->setMetadata(LLVMContext::MD_range,
6637                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6638                                                  AssumedConstantRange));
6639         return true;
6640       }
6641     }
6642     return false;
6643   }
6644 
6645   /// See AbstractAttribute::manifest()
6646   ChangeStatus manifest(Attributor &A) override {
6647     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6648     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
6649     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
6650 
6651     auto &V = getAssociatedValue();
6652     if (!AssumedConstantRange.isEmptySet() &&
6653         !AssumedConstantRange.isSingleElement()) {
6654       if (Instruction *I = dyn_cast<Instruction>(&V))
6655         if (isa<CallInst>(I) || isa<LoadInst>(I))
6656           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
6657             Changed = ChangeStatus::CHANGED;
6658     }
6659 
6660     return Changed;
6661   }
6662 };
6663 
6664 struct AAValueConstantRangeArgument final
6665     : AAArgumentFromCallSiteArguments<
6666           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
6667   using Base = AAArgumentFromCallSiteArguments<
6668       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
6669   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
6670       : Base(IRP, A) {}
6671 
6672   /// See AbstractAttribute::initialize(..).
6673   void initialize(Attributor &A) override {
6674     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
6675       indicatePessimisticFixpoint();
6676     } else {
6677       Base::initialize(A);
6678     }
6679   }
6680 
6681   /// See AbstractAttribute::trackStatistics()
6682   void trackStatistics() const override {
6683     STATS_DECLTRACK_ARG_ATTR(value_range)
6684   }
6685 };
6686 
6687 struct AAValueConstantRangeReturned
6688     : AAReturnedFromReturnedValues<AAValueConstantRange,
6689                                    AAValueConstantRangeImpl> {
6690   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
6691                                             AAValueConstantRangeImpl>;
6692   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
6693       : Base(IRP, A) {}
6694 
6695   /// See AbstractAttribute::initialize(...).
6696   void initialize(Attributor &A) override {}
6697 
6698   /// See AbstractAttribute::trackStatistics()
6699   void trackStatistics() const override {
6700     STATS_DECLTRACK_FNRET_ATTR(value_range)
6701   }
6702 };
6703 
6704 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
6705   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
6706       : AAValueConstantRangeImpl(IRP, A) {}
6707 
6708   /// See AbstractAttribute::initialize(...).
6709   void initialize(Attributor &A) override {
6710     AAValueConstantRangeImpl::initialize(A);
6711     Value &V = getAssociatedValue();
6712 
6713     if (auto *C = dyn_cast<ConstantInt>(&V)) {
6714       unionAssumed(ConstantRange(C->getValue()));
6715       indicateOptimisticFixpoint();
6716       return;
6717     }
6718 
6719     if (isa<UndefValue>(&V)) {
6720       // Collapse the undef state to 0.
6721       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
6722       indicateOptimisticFixpoint();
6723       return;
6724     }
6725 
6726     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
6727       return;
6728     // If it is a load instruction with range metadata, use it.
6729     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
6730       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
6731         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6732         return;
6733       }
6734 
6735     // We can work with PHI and select instruction as we traverse their operands
6736     // during update.
6737     if (isa<SelectInst>(V) || isa<PHINode>(V))
6738       return;
6739 
6740     // Otherwise we give up.
6741     indicatePessimisticFixpoint();
6742 
6743     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
6744                       << getAssociatedValue() << "\n");
6745   }
6746 
6747   bool calculateBinaryOperator(
6748       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
6749       const Instruction *CtxI,
6750       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6751     Value *LHS = BinOp->getOperand(0);
6752     Value *RHS = BinOp->getOperand(1);
6753     // TODO: Allow non integers as well.
6754     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6755       return false;
6756 
6757     auto &LHSAA =
6758         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6759     QuerriedAAs.push_back(&LHSAA);
6760     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6761 
6762     auto &RHSAA =
6763         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6764     QuerriedAAs.push_back(&RHSAA);
6765     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6766 
6767     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
6768 
6769     T.unionAssumed(AssumedRange);
6770 
6771     // TODO: Track a known state too.
6772 
6773     return T.isValidState();
6774   }
6775 
6776   bool calculateCastInst(
6777       Attributor &A, CastInst *CastI, IntegerRangeState &T,
6778       const Instruction *CtxI,
6779       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6780     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
6781     // TODO: Allow non integers as well.
6782     Value &OpV = *CastI->getOperand(0);
6783     if (!OpV.getType()->isIntegerTy())
6784       return false;
6785 
6786     auto &OpAA =
6787         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
6788     QuerriedAAs.push_back(&OpAA);
6789     T.unionAssumed(
6790         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
6791     return T.isValidState();
6792   }
6793 
6794   bool
6795   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
6796                    const Instruction *CtxI,
6797                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6798     Value *LHS = CmpI->getOperand(0);
6799     Value *RHS = CmpI->getOperand(1);
6800     // TODO: Allow non integers as well.
6801     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6802       return false;
6803 
6804     auto &LHSAA =
6805         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6806     QuerriedAAs.push_back(&LHSAA);
6807     auto &RHSAA =
6808         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6809     QuerriedAAs.push_back(&RHSAA);
6810 
6811     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6812     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6813 
6814     // If one of them is empty set, we can't decide.
6815     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
6816       return true;
6817 
6818     bool MustTrue = false, MustFalse = false;
6819 
6820     auto AllowedRegion =
6821         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
6822 
6823     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
6824         CmpI->getPredicate(), RHSAARange);
6825 
6826     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
6827       MustFalse = true;
6828 
6829     if (SatisfyingRegion.contains(LHSAARange))
6830       MustTrue = true;
6831 
6832     assert((!MustTrue || !MustFalse) &&
6833            "Either MustTrue or MustFalse should be false!");
6834 
6835     if (MustTrue)
6836       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
6837     else if (MustFalse)
6838       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
6839     else
6840       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
6841 
6842     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
6843                       << " " << RHSAA << "\n");
6844 
6845     // TODO: Track a known state too.
6846     return T.isValidState();
6847   }
6848 
6849   /// See AbstractAttribute::updateImpl(...).
6850   ChangeStatus updateImpl(Attributor &A) override {
6851     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
6852                             IntegerRangeState &T, bool Stripped) -> bool {
6853       Instruction *I = dyn_cast<Instruction>(&V);
6854       if (!I || isa<CallBase>(I)) {
6855 
6856         // If the value is not instruction, we query AA to Attributor.
6857         const auto &AA =
6858             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
6859 
6860         // Clamp operator is not used to utilize a program point CtxI.
6861         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
6862 
6863         return T.isValidState();
6864       }
6865 
6866       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
6867       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
6868         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
6869           return false;
6870       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
6871         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
6872           return false;
6873       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
6874         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
6875           return false;
6876       } else {
6877         // Give up with other instructions.
6878         // TODO: Add other instructions
6879 
6880         T.indicatePessimisticFixpoint();
6881         return false;
6882       }
6883 
6884       // Catch circular reasoning in a pessimistic way for now.
6885       // TODO: Check how the range evolves and if we stripped anything, see also
6886       //       AADereferenceable or AAAlign for similar situations.
6887       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
6888         if (QueriedAA != this)
6889           continue;
6890         // If we are in a stady state we do not need to worry.
6891         if (T.getAssumed() == getState().getAssumed())
6892           continue;
6893         T.indicatePessimisticFixpoint();
6894       }
6895 
6896       return T.isValidState();
6897     };
6898 
6899     IntegerRangeState T(getBitWidth());
6900 
6901     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
6902             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
6903       return indicatePessimisticFixpoint();
6904 
6905     return clampStateAndIndicateChange(getState(), T);
6906   }
6907 
6908   /// See AbstractAttribute::trackStatistics()
6909   void trackStatistics() const override {
6910     STATS_DECLTRACK_FLOATING_ATTR(value_range)
6911   }
6912 };
6913 
6914 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
6915   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
6916       : AAValueConstantRangeImpl(IRP, A) {}
6917 
6918   /// See AbstractAttribute::initialize(...).
6919   ChangeStatus updateImpl(Attributor &A) override {
6920     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
6921                      "not be called");
6922   }
6923 
6924   /// See AbstractAttribute::trackStatistics()
6925   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
6926 };
6927 
6928 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
6929   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
6930       : AAValueConstantRangeFunction(IRP, A) {}
6931 
6932   /// See AbstractAttribute::trackStatistics()
6933   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
6934 };
6935 
6936 struct AAValueConstantRangeCallSiteReturned
6937     : AACallSiteReturnedFromReturned<AAValueConstantRange,
6938                                      AAValueConstantRangeImpl> {
6939   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
6940       : AACallSiteReturnedFromReturned<AAValueConstantRange,
6941                                        AAValueConstantRangeImpl>(IRP, A) {}
6942 
6943   /// See AbstractAttribute::initialize(...).
6944   void initialize(Attributor &A) override {
6945     // If it is a load instruction with range metadata, use the metadata.
6946     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
6947       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
6948         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6949 
6950     AAValueConstantRangeImpl::initialize(A);
6951   }
6952 
6953   /// See AbstractAttribute::trackStatistics()
6954   void trackStatistics() const override {
6955     STATS_DECLTRACK_CSRET_ATTR(value_range)
6956   }
6957 };
6958 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
6959   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
6960       : AAValueConstantRangeFloating(IRP, A) {}
6961 
6962   /// See AbstractAttribute::trackStatistics()
6963   void trackStatistics() const override {
6964     STATS_DECLTRACK_CSARG_ATTR(value_range)
6965   }
6966 };
6967 
6968 const char AAReturnedValues::ID = 0;
6969 const char AANoUnwind::ID = 0;
6970 const char AANoSync::ID = 0;
6971 const char AANoFree::ID = 0;
6972 const char AANonNull::ID = 0;
6973 const char AANoRecurse::ID = 0;
6974 const char AAWillReturn::ID = 0;
6975 const char AAUndefinedBehavior::ID = 0;
6976 const char AANoAlias::ID = 0;
6977 const char AAReachability::ID = 0;
6978 const char AANoReturn::ID = 0;
6979 const char AAIsDead::ID = 0;
6980 const char AADereferenceable::ID = 0;
6981 const char AAAlign::ID = 0;
6982 const char AANoCapture::ID = 0;
6983 const char AAValueSimplify::ID = 0;
6984 const char AAHeapToStack::ID = 0;
6985 const char AAPrivatizablePtr::ID = 0;
6986 const char AAMemoryBehavior::ID = 0;
6987 const char AAMemoryLocation::ID = 0;
6988 const char AAValueConstantRange::ID = 0;
6989 
6990 // Macro magic to create the static generator function for attributes that
6991 // follow the naming scheme.
6992 
6993 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
6994   case IRPosition::PK:                                                         \
6995     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
6996 
6997 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
6998   case IRPosition::PK:                                                         \
6999     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
7000     ++NumAAs;                                                                  \
7001     break;
7002 
7003 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
7004   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7005     CLASS *AA = nullptr;                                                       \
7006     switch (IRP.getPositionKind()) {                                           \
7007       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7008       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7009       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7010       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7011       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7012       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7013       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7014       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7015     }                                                                          \
7016     return *AA;                                                                \
7017   }
7018 
7019 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
7020   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7021     CLASS *AA = nullptr;                                                       \
7022     switch (IRP.getPositionKind()) {                                           \
7023       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7024       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
7025       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7026       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7027       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7028       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7029       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7030       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7031     }                                                                          \
7032     return *AA;                                                                \
7033   }
7034 
7035 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
7036   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7037     CLASS *AA = nullptr;                                                       \
7038     switch (IRP.getPositionKind()) {                                           \
7039       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7040       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7041       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7042       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7043       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7044       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7045       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7046       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7047     }                                                                          \
7048     return *AA;                                                                \
7049   }
7050 
7051 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
7052   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7053     CLASS *AA = nullptr;                                                       \
7054     switch (IRP.getPositionKind()) {                                           \
7055       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7056       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7057       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7058       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7059       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7060       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7061       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7062       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7063     }                                                                          \
7064     return *AA;                                                                \
7065   }
7066 
7067 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
7068   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7069     CLASS *AA = nullptr;                                                       \
7070     switch (IRP.getPositionKind()) {                                           \
7071       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7072       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7073       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7074       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7075       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7076       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7077       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7078       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7079     }                                                                          \
7080     return *AA;                                                                \
7081   }
7082 
7083 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
7084 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
7085 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
7086 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
7087 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
7088 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
7089 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
7090 
7091 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
7092 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
7093 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
7094 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
7095 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
7096 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
7097 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
7098 
7099 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
7100 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
7101 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
7102 
7103 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
7104 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
7105 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
7106 
7107 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
7108 
7109 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
7110 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
7111 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
7112 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
7113 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
7114 #undef SWITCH_PK_CREATE
7115 #undef SWITCH_PK_INV
7116